1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-07-29 08:21:15 +03:00

Reformat all code to coding standard

This commit is contained in:
Andrew Hutchings
2017-10-26 17:18:17 +01:00
parent 4985f3456e
commit 01446d1e22
1296 changed files with 403852 additions and 353747 deletions

View File

@ -14,56 +14,61 @@ using namespace WriteEngine;
void test()
{
std::cout << "getDBRootByIdx(1): " <<
Config::getDBRootByIdx(1) << std::endl;
Config::getDBRootByIdx(1) << std::endl;
std::cout << "getDBRootByIdx(3): " <<
Config::getDBRootByIdx(3) << std::endl;
Config::getDBRootByIdx(3) << std::endl;
std::cout << "getDBRootByNum(1): " <<
Config::getDBRootByNum(1) << std::endl;
Config::getDBRootByNum(1) << std::endl;
std::cout << "getDBRootByNum(3): " <<
Config::getDBRootByNum(3) << std::endl;
Config::getDBRootByNum(3) << std::endl;
std::vector<unsigned short> dbRootIds;
Config::getRootIdList( dbRootIds );
std::cout << "getRootIdList: ";
for (unsigned k=0; k<dbRootIds.size(); k++) {
std::cout << dbRootIds[k] << ' '; }
for (unsigned k = 0; k < dbRootIds.size(); k++)
{
std::cout << dbRootIds[k] << ' ';
}
std::cout << std::endl;
std::vector<std::string> dbRootPathList;
Config::getDBRootPathList( dbRootPathList );
std::cout << "getDBRootPathList: " << std::endl;
for (unsigned k=0; k<dbRootPathList.size(); k++)
for (unsigned k = 0; k < dbRootPathList.size(); k++)
std::cout << " " << k << ". " << dbRootPathList[k] << std::endl;
std::cout << "getBulkRoot(): " <<
Config::getBulkRoot() << std::endl;
Config::getBulkRoot() << std::endl;
std::cout << "DBRootCount(): " <<
Config::DBRootCount() << std::endl;
Config::DBRootCount() << std::endl;
std::cout << "totalDBRootCount(): " <<
Config::totalDBRootCount() << std::endl;
Config::totalDBRootCount() << std::endl;
std::cout << "getWaitPeriod(): " <<
Config::getWaitPeriod() << std::endl;
Config::getWaitPeriod() << std::endl;
std::cout << "getFilePerColumnPartition(): " <<
Config::getFilesPerColumnPartition() << std::endl;
Config::getFilesPerColumnPartition() << std::endl;
std::cout << "getExtentsPerSegmentFile(): " <<
Config::getExtentsPerSegmentFile() << std::endl;
Config::getExtentsPerSegmentFile() << std::endl;
std::cout << "getBulkProcessPriority(): " <<
Config::getBulkProcessPriority() << std::endl;
Config::getBulkProcessPriority() << std::endl;
std::cout << "getBulkRollbackDir(): " <<
Config::getBulkRollbackDir() << std::endl;
Config::getBulkRollbackDir() << std::endl;
std::cout << "getMaxFileSystemDiskUsage(): " <<
Config::getMaxFileSystemDiskUsage() << std::endl;
Config::getMaxFileSystemDiskUsage() << std::endl;
std::cout << "getNumCompressedPadBlks(): " <<
Config::getNumCompressedPadBlks() << std::endl;
Config::getNumCompressedPadBlks() << std::endl;
std::cout << "getParentOAMModuleFlag(): " <<
Config::getParentOAMModuleFlag() << std::endl;
Config::getParentOAMModuleFlag() << std::endl;
std::cout << "getLocalModuleType(): " <<
Config::getLocalModuleType() << std::endl;
Config::getLocalModuleType() << std::endl;
std::cout << "getLocalModuleID(): " <<
Config::getLocalModuleID() << std::endl;
Config::getLocalModuleID() << std::endl;
std::cout << "getVBRoot(): " <<
Config::getVBRoot() << std::endl;
Config::getVBRoot() << std::endl;
}
int main()
@ -73,6 +78,7 @@ int main()
int nTest = 0;
std::cout << std::endl;
while (1)
{
std::cout << "test" << nTest << "..." << std::endl;
@ -80,15 +86,17 @@ int main()
std::cout << "Pause..." << std::endl;
std::cin >> resp;
std::cout << std::endl;
if (resp == 'c')
{
std::cout << "Has local DBRootList changed: " <<
(bool)Config::hasLocalDBRootListChanged() << std::endl;
(bool)Config::hasLocalDBRootListChanged() << std::endl;
}
else if (resp == 'q')
{
break;
}
nTest++;
}

File diff suppressed because it is too large Load Diff

View File

@ -64,11 +64,11 @@ BlockOp::~BlockOp()
bool BlockOp::calculateRowId(
RID rowId, const int epb, const int width, int& fbo, int& bio ) const
{
if( std::numeric_limits<WriteEngine::RID>::max() == rowId )
if ( std::numeric_limits<WriteEngine::RID>::max() == rowId )
return false;
fbo = (int)( rowId/epb );
bio = ( rowId & ( epb - 1 )) * width;
fbo = (int)( rowId / epb );
bio = ( rowId & ( epb - 1 )) * width;
return true;
}
@ -88,61 +88,94 @@ uint64_t BlockOp::getEmptyRowValue(
uint64_t emptyVal = 0;
int offset = 0;
switch( colDataType ) {
case CalpontSystemCatalog::TINYINT : emptyVal = joblist::TINYINTEMPTYROW; break;
case CalpontSystemCatalog::SMALLINT: emptyVal = joblist::SMALLINTEMPTYROW; break;
case CalpontSystemCatalog::MEDINT :
case CalpontSystemCatalog::INT : emptyVal = joblist::INTEMPTYROW; break;
case CalpontSystemCatalog::BIGINT : emptyVal = joblist::BIGINTEMPTYROW; break;
case CalpontSystemCatalog::FLOAT :
case CalpontSystemCatalog::UFLOAT : emptyVal = joblist::FLOATEMPTYROW; break;
case CalpontSystemCatalog::DOUBLE :
case CalpontSystemCatalog::UDOUBLE : emptyVal = joblist::DOUBLEEMPTYROW; break;
case CalpontSystemCatalog::DECIMAL :
case CalpontSystemCatalog::UDECIMAL :
/* if( width <= 4 )
emptyVal = joblist::SMALLINTEMPTYROW;
else
if( width <= 9 )
emptyVal = 0x80000001;
else
if( width <= 18 )
emptyVal = 0x8000000000000001LL;
else
emptyVal = 0xFFFFFFFFFFFFFFFFLL;
*/
// @bug 194 use the correct logic in handling empty value for decimal
if (width <= 1)
emptyVal = joblist::TINYINTEMPTYROW;
else if( width <= 2 )
emptyVal = joblist::SMALLINTEMPTYROW;
else if( width <= 4 )
emptyVal = joblist::INTEMPTYROW;
else
emptyVal = joblist::BIGINTEMPTYROW;
break;
case CalpontSystemCatalog::UTINYINT : emptyVal = joblist::UTINYINTEMPTYROW; break;
case CalpontSystemCatalog::USMALLINT: emptyVal = joblist::USMALLINTEMPTYROW; break;
case CalpontSystemCatalog::UMEDINT :
case CalpontSystemCatalog::UINT : emptyVal = joblist::UINTEMPTYROW; break;
case CalpontSystemCatalog::UBIGINT : emptyVal = joblist::UBIGINTEMPTYROW; break;
switch ( colDataType )
{
case CalpontSystemCatalog::TINYINT :
emptyVal = joblist::TINYINTEMPTYROW;
break;
case CalpontSystemCatalog::CHAR :
case CalpontSystemCatalog::VARCHAR :
case CalpontSystemCatalog::SMALLINT:
emptyVal = joblist::SMALLINTEMPTYROW;
break;
case CalpontSystemCatalog::MEDINT :
case CalpontSystemCatalog::INT :
emptyVal = joblist::INTEMPTYROW;
break;
case CalpontSystemCatalog::BIGINT :
emptyVal = joblist::BIGINTEMPTYROW;
break;
case CalpontSystemCatalog::FLOAT :
case CalpontSystemCatalog::UFLOAT :
emptyVal = joblist::FLOATEMPTYROW;
break;
case CalpontSystemCatalog::DOUBLE :
case CalpontSystemCatalog::UDOUBLE :
emptyVal = joblist::DOUBLEEMPTYROW;
break;
case CalpontSystemCatalog::DECIMAL :
case CalpontSystemCatalog::UDECIMAL :
/* if( width <= 4 )
emptyVal = joblist::SMALLINTEMPTYROW;
else
if( width <= 9 )
emptyVal = 0x80000001;
else
if( width <= 18 )
emptyVal = 0x8000000000000001LL;
else
emptyVal = 0xFFFFFFFFFFFFFFFFLL;
*/
// @bug 194 use the correct logic in handling empty value for decimal
if (width <= 1)
emptyVal = joblist::TINYINTEMPTYROW;
else if ( width <= 2 )
emptyVal = joblist::SMALLINTEMPTYROW;
else if ( width <= 4 )
emptyVal = joblist::INTEMPTYROW;
else
emptyVal = joblist::BIGINTEMPTYROW;
break;
case CalpontSystemCatalog::UTINYINT :
emptyVal = joblist::UTINYINTEMPTYROW;
break;
case CalpontSystemCatalog::USMALLINT:
emptyVal = joblist::USMALLINTEMPTYROW;
break;
case CalpontSystemCatalog::UMEDINT :
case CalpontSystemCatalog::UINT :
emptyVal = joblist::UINTEMPTYROW;
break;
case CalpontSystemCatalog::UBIGINT :
emptyVal = joblist::UBIGINTEMPTYROW;
break;
case CalpontSystemCatalog::CHAR :
case CalpontSystemCatalog::VARCHAR :
case CalpontSystemCatalog::DATE :
case CalpontSystemCatalog::DATETIME :
default:
offset = ( colDataType == CalpontSystemCatalog::VARCHAR )? -1 : 0;
offset = ( colDataType == CalpontSystemCatalog::VARCHAR ) ? -1 : 0;
emptyVal = joblist::CHAR1EMPTYROW;
if( width == (2 + offset) )
emptyVal = joblist::CHAR2EMPTYROW;
else
if( width >= (3 + offset) && width <= ( 4 + offset ) )
emptyVal = joblist::CHAR4EMPTYROW;
else
if( width >= (5 + offset) )
emptyVal = joblist::CHAR8EMPTYROW;
break;
if ( width == (2 + offset) )
emptyVal = joblist::CHAR2EMPTYROW;
else if ( width >= (3 + offset) && width <= ( 4 + offset ) )
emptyVal = joblist::CHAR4EMPTYROW;
else if ( width >= (5 + offset) )
emptyVal = joblist::CHAR8EMPTYROW;
break;
}
return emptyVal;
@ -178,7 +211,7 @@ RID BlockOp::getRowId(
/*const int bio, const int bbo*/ ) const
{
// return fbo*BYTE_PER_BLOCK*ROW_PER_BYTE + bio*ROW_PER_BYTE + bbo;
return (BYTE_PER_BLOCK/width) * fbo + rowPos;
return (BYTE_PER_BLOCK / width) * fbo + rowPos;
}
/***********************************************************
@ -228,34 +261,34 @@ void BlockOp::setEmptyBuf(
const int ARRAY_COUNT = 128;
const int NBYTES_IN_ARRAY = width * ARRAY_COUNT;
//unsigned char emptyValArray[NBYTES_IN_ARRAY];
unsigned char* emptyValArray = (unsigned char*)alloca(NBYTES_IN_ARRAY);
unsigned char* emptyValArray = (unsigned char*)alloca(NBYTES_IN_ARRAY);
// Optimize buffer initialization by constructing and copying in an array
// instead of individual values. This reduces the number of calls to
// memcpy().
for (int j=0; j<ARRAY_COUNT; j++)
for (int j = 0; j < ARRAY_COUNT; j++)
{
memcpy(emptyValArray+(j*width), &emptyVal, width);
memcpy(emptyValArray + (j * width), &emptyVal, width);
}
int countFull128 = (bufSize/width) / ARRAY_COUNT;
int countRemain = (bufSize/width) % ARRAY_COUNT;
int countFull128 = (bufSize / width) / ARRAY_COUNT;
int countRemain = (bufSize / width) % ARRAY_COUNT;
// Copy in the 128 element array into "buf" as many times as needed
if (countFull128 > 0)
{
for( int i = 0; i < countFull128; i++ )
for ( int i = 0; i < countFull128; i++ )
memcpy( buf + (i * (NBYTES_IN_ARRAY)),
emptyValArray,
NBYTES_IN_ARRAY );
emptyValArray,
NBYTES_IN_ARRAY );
}
// Initialize the remainder of "buf" that is leftover
if (countRemain > 0)
{
memcpy( buf + (countFull128 * NBYTES_IN_ARRAY),
emptyValArray,
width*countRemain );
emptyValArray,
width * countRemain );
}
}
@ -272,7 +305,7 @@ void BlockOp::setEmptyBuf(
void BlockOp::writeBufValue(
unsigned char* buf, void* val, const size_t width, const bool clear ) const
{
if( clear )
if ( clear )
memset( buf, 0, width );
memcpy( buf, val, width );

View File

@ -40,83 +40,87 @@ namespace WriteEngine
class BlockOp : public WEObj
{
public:
/**
* @brief Constructor
*/
/**
* @brief Constructor
*/
EXPORT BlockOp();
/**
* @brief Default Destructor
*/
/**
* @brief Default Destructor
*/
EXPORT ~BlockOp();
/**
* @brief Calculate the location of Row ID
*/
/**
* @brief Calculate the location of Row ID
*/
EXPORT bool calculateRowId( RID rowId,
const int epb,
const int width,
int& fbo,
int& bio ) const;
/**
* @brief Calculate the location of Row ID
*/
/**
* @brief Calculate the location of Row ID
*/
void clearBlock( DataBlock* block )
{ memset(block->data, 0, sizeof(block->data));
block->no = -1;
block->dirty = false; }
{
memset(block->data, 0, sizeof(block->data));
block->no = -1;
block->dirty = false;
}
/**
* @brief Get bit value after shift
*/
/**
* @brief Get bit value after shift
*/
uint64_t getBitValue( uint64_t val,
int shiftBit,
uint64_t mask ) const
{ return ( val >> shiftBit ) & mask ; }
{
return ( val >> shiftBit ) & mask ;
}
/**
* @brief Get correct row width
*/
/**
* @brief Get correct row width
*/
EXPORT int getCorrectRowWidth( const execplan::CalpontSystemCatalog::ColDataType colDataType,
const int width ) const;
const int width ) const;
/**
* @brief Get an empty row value
*/
/**
* @brief Get an empty row value
*/
EXPORT uint64_t getEmptyRowValue(const execplan::CalpontSystemCatalog::ColDataType colDataType,
const int width ) const;
const int width ) const;
/**
* @brief Calculate row id
*/
/**
* @brief Calculate row id
*/
EXPORT RID getRowId( const long fbo,
const int width,
const int rowPos ) const;
/**
* @brief Get buffer value
*/
/**
* @brief Get buffer value
*/
EXPORT void readBufValue( const unsigned char* buf,
void* val, const short width ) const;
/**
* @brief Reset a buffer
*/
/**
* @brief Reset a buffer
*/
EXPORT void resetBuf( unsigned char* buf,
const int bufSize ) const;
/**
* @brief Fill buffer with empty values
*/
/**
* @brief Fill buffer with empty values
*/
EXPORT void static setEmptyBuf( unsigned char* buf,
const int bufSize,
uint64_t emptyVal, const int width );
/**
* @brief Set a value in a buffer
*/
/**
* @brief Set a value in a buffer
*/
EXPORT void writeBufValue( unsigned char* buf,
void* val,
const size_t width,

File diff suppressed because it is too large Load Diff

View File

@ -60,10 +60,10 @@ public:
* @param errMsg Applicable error message.
*/
EXPORT int startAutoIncrementSequence( OID colOID,
uint64_t startNextValue,
uint32_t colWidth,
execplan::CalpontSystemCatalog::ColDataType colDataType,
std::string& errMsg);
uint64_t startNextValue,
uint32_t colWidth,
execplan::CalpontSystemCatalog::ColDataType colDataType,
std::string& errMsg);
/**
* @brief Reserve a range of Auto Increment numbers for the specified OID
@ -73,9 +73,9 @@ public:
* @param errMsg Applicable error message.
*/
EXPORT int getAutoIncrementRange( OID colOID,
uint64_t count,
uint64_t& firstNum,
std::string& errMsg);
uint64_t count,
uint64_t& firstNum,
std::string& errMsg);
/**
* @brief Inform BRM to add an extent to each of the requested OIDs at
@ -89,41 +89,41 @@ public:
* @param extents (out) List of lbids, numBlks, and fbo for new extents
*/
EXPORT int allocateStripeColExtents(
const std::vector<BRM::CreateStripeColumnExtentsArgIn>& cols,
uint16_t dbRoot,
uint32_t& partition,
uint16_t& segmentNum,
std::vector<BRM::CreateStripeColumnExtentsArgOut>& extents);
const std::vector<BRM::CreateStripeColumnExtentsArgIn>& cols,
uint16_t dbRoot,
uint32_t& partition,
uint16_t& segmentNum,
std::vector<BRM::CreateStripeColumnExtentsArgOut>& extents);
/**
* @brief Inform BRM to add extent to the exact segment file specified by
* OID, DBRoot, partition, and segment.
*/
EXPORT int allocateColExtentExactFile( const OID oid,
const uint32_t colWidth,
uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
execplan::CalpontSystemCatalog::ColDataType colDataType,
BRM::LBID_t& startLbid,
int& allocSize,
uint32_t& startBlock);
const uint32_t colWidth,
uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
execplan::CalpontSystemCatalog::ColDataType colDataType,
BRM::LBID_t& startLbid,
int& allocSize,
uint32_t& startBlock);
/**
* @brief Inform BRM to add a dictionary store extent to the specified OID
*/
EXPORT int allocateDictStoreExtent( const OID oid,
uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
BRM::LBID_t& startLbid,
int& allocSize );
uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
BRM::LBID_t& startLbid,
int& allocSize );
/**
* @brief Inform BRM to delete certain oid
*/
EXPORT int deleteOid( const OID oid );
/**
* @brief Inform BRM to delete list of oids
*/
@ -134,50 +134,50 @@ public:
* and segment
*/
EXPORT int getBrmInfo( const OID oid,
const uint32_t partition,
const uint16_t segment,
const int fbo,
BRM::LBID_t& lbid );
const uint32_t partition,
const uint16_t segment,
const int fbo,
BRM::LBID_t& lbid );
/**
* @brief Get starting LBID from BRM for a specfic OID, DBRoot, partition,
* segment, and block offset.
*/
EXPORT int getStartLbid( const OID oid,
const uint32_t partition,
const uint16_t segment,
const int fbo,
BRM::LBID_t& startLbid );
const uint32_t partition,
const uint16_t segment,
const int fbo,
BRM::LBID_t& startLbid );
/**
* @brief Get the real physical offset based on the LBID
*/
EXPORT int getFboOffset( const uint64_t lbid,
uint16_t& dbRoot,
uint32_t& partition,
uint16_t& segment,
int& fbo );
EXPORT int getFboOffset( const uint64_t lbid,
uint16_t& dbRoot,
uint32_t& partition,
uint16_t& segment,
int& fbo );
EXPORT int getFboOffset( const uint64_t lbid, int& oid,
uint16_t& dbRoot,
uint32_t& partition,
uint16_t& segment,
int& fbo );
uint16_t& dbRoot,
uint32_t& partition,
uint16_t& segment,
int& fbo );
/**
* @brief Get last "local" HWM, partition, and segment for an OID and DBRoot
*/
EXPORT int getLastHWM_DBroot( OID oid,
uint16_t dbRoot,
uint32_t& partition,
uint16_t& segment,
HWM& hwm,
int& status,
bool& bFound);
uint16_t dbRoot,
uint32_t& partition,
uint16_t& segment,
HWM& hwm,
int& status,
bool& bFound);
/**
* @brief Get HWM for a specific OID, partition, and segment
*/
int getLocalHWM( OID oid ,
int getLocalHWM( OID oid,
uint32_t partition,
uint16_t segment,
HWM& hwm,
@ -186,8 +186,8 @@ public:
/**
* @brief Get HWM info for a specific OID and PM
*/
EXPORT int getDbRootHWMInfo( const OID oid ,
BRM::EmDbRootHWMInfo_v& emDbRootHwmInfos);
EXPORT int getDbRootHWMInfo( const OID oid,
BRM::EmDbRootHWMInfo_v& emDbRootHwmInfos);
/**
* @brief Get status or state of the extents in the specified segment file.
@ -204,20 +204,20 @@ public:
*/
unsigned getExtentRows();
/**
* @brief Return the extents info for specified OID
*/
/**
* @brief Return the extents info for specified OID
*/
int getExtents( int oid,
std::vector<struct BRM::EMEntry>& entries,
bool sorted, bool notFoundErr,
bool incOutOfService );
std::vector<struct BRM::EMEntry>& entries,
bool sorted, bool notFoundErr,
bool incOutOfService );
/**
* @brief Return the extents info for specified OID and dbroot
*/
int getExtents_dbroot( int oid,
std::vector<struct BRM::EMEntry>& entries,
const uint16_t dbroot);
std::vector<struct BRM::EMEntry>& entries,
const uint16_t dbroot);
/**
* @brief Return the read/write status of DBRM (helps detect if DBRM is up)
@ -225,13 +225,13 @@ public:
EXPORT int isReadWrite();
/**
* @brief Return the state of the system state shutdown pending
* @brief Return the state of the system state shutdown pending
* flags
*/
EXPORT int isShutdownPending(bool& bRollback, bool& bForce);
/**
* @brief Return the state of the system state suspend pending
* @brief Return the state of the system state suspend pending
* flags
*/
EXPORT int isSuspendPending();
@ -250,14 +250,14 @@ public:
* @brief Mark extent invalid for causal partioning
*/
int markExtentInvalid(const uint64_t lbid,
const execplan::CalpontSystemCatalog::ColDataType colDataType);
const execplan::CalpontSystemCatalog::ColDataType colDataType);
/**
* @brief Mark multiple extents invalid for causal partioning
*/
int markExtentsInvalid(std::vector<BRM::LBID_t>& lbids,
const std::vector<execplan::CalpontSystemCatalog::ColDataType>&
colDataTypes);
const std::vector<execplan::CalpontSystemCatalog::ColDataType>&
colDataTypes);
/**
* @brief set extents CP min/max info into extent map
@ -272,11 +272,11 @@ public:
* extents for the specified oid and dbroot are deleted.
*/
int rollbackColumnExtents_DBroot( const OID oid,
bool bDeleteAll,
uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
BRM::HWM_t hwm );
bool bDeleteAll,
uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
BRM::HWM_t hwm );
/**
* @brief Perform bulk rollback of the extents that follow the specified
@ -288,18 +288,18 @@ public:
* oid and dbroot are deleted.
*/
int rollbackDictStoreExtents_DBroot( OID oid,
uint16_t dbRoot,
uint32_t partition,
const std::vector<uint16_t>& segNums,
const std::vector<BRM::HWM_t>& hwms );
uint16_t dbRoot,
uint32_t partition,
const std::vector<uint16_t>& segNums,
const std::vector<BRM::HWM_t>& hwms );
/**
* @brief Perform delete column extents
* @brief Perform delete column extents
*/
int deleteEmptyColExtents(const std::vector<BRM::ExtentInfo>& extentsInfo);
/**
* @brief Perform delete dictionary extents
* @brief Perform delete dictionary extents
*/
int deleteEmptyDictStoreExtents(
const std::vector<BRM::ExtentInfo>& extentsInfo );
@ -313,8 +313,8 @@ public:
const HWM hwm );
//Set hwm for all columns in a table
int bulkSetHWM( const std::vector<BRM::BulkSetHWMArg> & vec,
BRM::VER_t transID);
int bulkSetHWM( const std::vector<BRM::BulkSetHWMArg>& vec,
BRM::VER_t transID);
/**
* @brief Atomically apply a batch of HWM and CP updates within the scope
@ -323,7 +323,7 @@ public:
* @param mergeCPDataArgs Vector of Casual Partition updates
*/
int bulkSetHWMAndCP( const std::vector<BRM::BulkSetHWMArg>& hwmArgs,
const std::vector<BRM::CPInfoMerge>& mergeCPDataArgs);
const std::vector<BRM::CPInfoMerge>& mergeCPDataArgs);
/**
* @brief Acquire a table lock for the specified table OID.
@ -352,9 +352,9 @@ public:
* @param errMsg Applicable error message.
*/
EXPORT int changeTableLockState ( uint64_t lockID,
BRM::LockState lockState,
bool& bChanged,
std::string& errMsg);
BRM::LockState lockState,
bool& bChanged,
std::string& errMsg);
/**
* @brief Release the specified table lock ID.
@ -400,60 +400,60 @@ public:
* @brief Copy blocks between write engine and version buffer
*/
EXPORT int copyVBBlock( IDBDataFile* pSourceFile,
IDBDataFile* pTargetFile,
const uint64_t sourceFbo,
const uint64_t targetFbo,
DbFileOp* fileOp,
const Column& column );
IDBDataFile* pTargetFile,
const uint64_t sourceFbo,
const uint64_t targetFbo,
DbFileOp* fileOp,
const Column& column );
EXPORT int copyVBBlock( IDBDataFile* pSourceFile,
const OID sourceOid,
IDBDataFile* pTargetFile,
const OID targetOid,
const std::vector<uint32_t>& fboList,
const BRM::VBRange& freeList,
size_t& nBlocksProcessed,
DbFileOp* pFileOp,
const size_t fboCurrentOffset = 0 );
const OID sourceOid,
IDBDataFile* pTargetFile,
const OID targetOid,
const std::vector<uint32_t>& fboList,
const BRM::VBRange& freeList,
size_t& nBlocksProcessed,
DbFileOp* pFileOp,
const size_t fboCurrentOffset = 0 );
/**
* @brief Rollback the specified transaction
*/
EXPORT int rollBack( const BRM::VER_t transID, int sessionId );
/**
* @brief Rollback the specified transaction
*/
/**
* @brief Rollback the specified transaction
*/
EXPORT int rollBackVersion( const BRM::VER_t transID, int sessionId );
/**
* @brief Rollback the specified transaction
*/
/**
* @brief Rollback the specified transaction
*/
EXPORT int rollBackBlocks( const BRM::VER_t transID, int sessionId );
/**
* @brief Write specified LBID to version buffer
*/
EXPORT int writeVB( IDBDataFile* pFile,
const BRM::VER_t transID,
const OID oid,
const uint64_t lbid,
DbFileOp* pFileOp );
const BRM::VER_t transID,
const OID oid,
const uint64_t lbid,
DbFileOp* pFileOp );
int writeVB( IDBDataFile* pFile,
const BRM::VER_t transID,
const OID weOid,
std::vector<uint32_t>& fboList,
std::vector<BRM::LBIDRange>& rangeList,
DbFileOp* pFileOp,
std::vector<BRM::VBRange>& freeList,
uint16_t dbRoot,
bool skipBeginVBCopy = false);
const BRM::VER_t transID,
const OID weOid,
std::vector<uint32_t>& fboList,
std::vector<BRM::LBIDRange>& rangeList,
DbFileOp* pFileOp,
std::vector<BRM::VBRange>& freeList,
uint16_t dbRoot,
bool skipBeginVBCopy = false);
void writeVBEnd(const BRM::VER_t transID,
std::vector<BRM::LBIDRange>& rangeList);
BRM::DBRM* getDbrmObject();
void pruneLBIDList(BRM::VER_t transID,
std::vector<BRM::LBIDRange> *rangeList,
std::vector<uint32_t> *fboList) const;
std::vector<BRM::LBIDRange>& rangeList);
BRM::DBRM* getDbrmObject();
void pruneLBIDList(BRM::VER_t transID,
std::vector<BRM::LBIDRange>* rangeList,
std::vector<uint32_t>* fboList) const;
//--------------------------------------------------------------------------
// Non-inline Versioning Functions End Here
@ -463,9 +463,15 @@ public:
* @brief static functions
*/
EXPORT static BRMWrapper* getInstance();
EXPORT static int getBrmRc(bool reset=true);
static bool getUseVb() { return m_useVb; }
static void setUseVb( const bool val ) { m_useVb = val; }
EXPORT static int getBrmRc(bool reset = true);
static bool getUseVb()
{
return m_useVb;
}
static void setUseVb( const bool val )
{
m_useVb = val;
}
private:
//--------------------------------------------------------------------------
@ -484,8 +490,8 @@ private:
EXPORT void saveBrmRc( int brmRc );
IDBDataFile* openFile( const File& fileInfo,
const char* mode,
const bool bCache = false );
const char* mode,
const bool bCache = false );
@ -521,54 +527,56 @@ inline BRMWrapper::~BRMWrapper()
{
if (blockRsltnMgrPtr)
delete blockRsltnMgrPtr;
blockRsltnMgrPtr = 0;
}
inline BRM::DBRM* BRMWrapper::getDbrmObject()
{
return blockRsltnMgrPtr;
return blockRsltnMgrPtr;
}
inline int BRMWrapper::getRC( int brmRc, int errRc )
{
if (brmRc == BRM::ERR_OK)
return NO_ERROR;
saveBrmRc( brmRc );
return errRc;
}
inline int BRMWrapper::getLastHWM_DBroot( OID oid,
uint16_t dbRoot,
uint32_t& partition,
uint16_t& segment,
HWM& hwm,
int& status,
bool& bFound)
uint16_t dbRoot,
uint32_t& partition,
uint16_t& segment,
HWM& hwm,
int& status,
bool& bFound)
{
int rc = blockRsltnMgrPtr->getLastHWM_DBroot(
(BRM::OID_t)oid, dbRoot, partition, segment, hwm,
status, bFound);
(BRM::OID_t)oid, dbRoot, partition, segment, hwm,
status, bFound);
return getRC( rc, ERR_BRM_GET_HWM );
}
inline int BRMWrapper::getLocalHWM( OID oid ,
uint32_t partition,
uint16_t segment,
HWM& hwm,
int& status)
inline int BRMWrapper::getLocalHWM( OID oid,
uint32_t partition,
uint16_t segment,
HWM& hwm,
int& status)
{
int rc = blockRsltnMgrPtr->getLocalHWM(
(BRM::OID_t)oid, partition, segment, hwm, status);
(BRM::OID_t)oid, partition, segment, hwm, status);
return getRC( rc, ERR_BRM_GET_HWM );
}
inline int BRMWrapper::getExtentState( OID oid,
uint32_t partition,
uint16_t segment,
bool& bFound,
int& status)
uint32_t partition,
uint16_t segment,
bool& bFound,
int& status)
{
int rc = blockRsltnMgrPtr->getExtentState(
(BRM::OID_t)oid, partition, segment, bFound, status);
(BRM::OID_t)oid, partition, segment, bFound, status);
return getRC( rc, ERR_BRM_GET_EXT_STATE );
}
@ -578,21 +586,21 @@ inline unsigned BRMWrapper::getExtentRows()
}
inline int BRMWrapper::getExtents( int oid,
std::vector<struct BRM::EMEntry>& entries,
bool sorted, bool notFoundErr,
bool incOutOfService )
std::vector<struct BRM::EMEntry>& entries,
bool sorted, bool notFoundErr,
bool incOutOfService )
{
int rc = blockRsltnMgrPtr->getExtents(
oid, entries, sorted, notFoundErr, incOutOfService);
oid, entries, sorted, notFoundErr, incOutOfService);
return rc;
}
inline int BRMWrapper::getExtents_dbroot( int oid,
std::vector<struct BRM::EMEntry>& entries,
const uint16_t dbroot )
std::vector<struct BRM::EMEntry>& entries,
const uint16_t dbroot )
{
int rc = blockRsltnMgrPtr->getExtents_dbroot(
oid, entries, dbroot);
oid, entries, dbroot);
return rc;
}
@ -608,20 +616,22 @@ inline int BRMWrapper::lookupLbidRanges( OID oid, BRM::LBIDRange_v& lbidRanges)
}
inline int BRMWrapper::markExtentInvalid( const uint64_t lbid,
const execplan::CalpontSystemCatalog::ColDataType colDataType )
const execplan::CalpontSystemCatalog::ColDataType colDataType )
{
int rc = blockRsltnMgrPtr->markExtentInvalid( lbid, colDataType );
return getRC( rc, ERR_BRM_MARK_INVALID );
}
inline int BRMWrapper::markExtentsInvalid(std::vector<BRM::LBID_t>& lbids,
const std::vector<execplan::CalpontSystemCatalog::ColDataType>&
const std::vector<execplan::CalpontSystemCatalog::ColDataType>&
colDataTypes)
{
int rc = 0;
if (idbdatafile::IDBPolicy::useHdfs())
return rc;
rc = blockRsltnMgrPtr->markExtentsInvalid(lbids, colDataTypes);
int rc = 0;
if (idbdatafile::IDBPolicy::useHdfs())
return rc;
rc = blockRsltnMgrPtr->markExtentsInvalid(lbids, colDataTypes);
return getRC( rc, ERR_BRM_MARK_INVALID );
}
@ -632,7 +642,7 @@ inline int BRMWrapper::bulkSetHWMAndCP(
std::vector<BRM::CPInfo> setCPDataArgs; // not used
BRM::VER_t transID = 0; // n/a
int rc = blockRsltnMgrPtr->bulkSetHWMAndCP(
hwmArgs, setCPDataArgs, mergeCPDataArgs, transID );
hwmArgs, setCPDataArgs, mergeCPDataArgs, transID );
return getRC( rc, ERR_BRM_BULK_UPDATE );
}
@ -644,25 +654,25 @@ inline int BRMWrapper::setExtentsMaxMin(const BRM::CPInfoList_t& cpinfoList)
}
inline int BRMWrapper::rollbackColumnExtents_DBroot( const OID oid,
bool bDeleteAll,
uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
BRM::HWM_t hwm )
bool bDeleteAll,
uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
BRM::HWM_t hwm )
{
int rc = blockRsltnMgrPtr->rollbackColumnExtents_DBroot (
oid, bDeleteAll, dbRoot, partition, segment, hwm );
oid, bDeleteAll, dbRoot, partition, segment, hwm );
return getRC( rc, ERR_BRM_BULK_RB_COLUMN );
}
inline int BRMWrapper::rollbackDictStoreExtents_DBroot( OID oid,
uint16_t dbRoot,
uint32_t partition,
const std::vector<uint16_t>& segNums,
const std::vector<BRM::HWM_t>& hwms )
uint16_t dbRoot,
uint32_t partition,
const std::vector<uint16_t>& segNums,
const std::vector<BRM::HWM_t>& hwms )
{
int rc = blockRsltnMgrPtr->rollbackDictStoreExtents_DBroot (
oid, dbRoot, partition, segNums, hwms );
oid, dbRoot, partition, segNums, hwms );
return getRC( rc, ERR_BRM_BULK_RB_DCTNRY );
}
@ -681,17 +691,17 @@ inline int BRMWrapper::deleteEmptyDictStoreExtents(
}
inline int BRMWrapper::setLocalHWM( OID oid,
uint32_t partition,
uint16_t segment,
const HWM hwm )
uint32_t partition,
uint16_t segment,
const HWM hwm )
{
int rc = blockRsltnMgrPtr->setLocalHWM(
(int)oid, partition, segment, hwm);
(int)oid, partition, segment, hwm);
return getRC( rc, ERR_BRM_SET_HWM );
}
inline int BRMWrapper::bulkSetHWM( const std::vector<BRM::BulkSetHWMArg> & vec,
BRM::VER_t transID = 0)
inline int BRMWrapper::bulkSetHWM( const std::vector<BRM::BulkSetHWMArg>& vec,
BRM::VER_t transID = 0)
{
int rc = blockRsltnMgrPtr->bulkSetHWM( vec, transID);
return getRC( rc, ERR_BRM_SET_HWM );

View File

@ -43,17 +43,17 @@ BulkRollbackFile::BulkRollbackFile(BulkRollbackMgr* mgr) : fMgr(mgr)
// Initialize empty dictionary header block used when reinitializing
// dictionary store extents.
const uint16_t freeSpace = BYTE_PER_BLOCK -
(HDR_UNIT_SIZE + NEXT_PTR_BYTES + HDR_UNIT_SIZE + HDR_UNIT_SIZE);
(HDR_UNIT_SIZE + NEXT_PTR_BYTES + HDR_UNIT_SIZE + HDR_UNIT_SIZE);
const uint64_t nextPtr = NOT_USED_PTR;
const uint16_t offSetZero = BYTE_PER_BLOCK;
const uint16_t endHeader = DCTNRY_END_HEADER;
memcpy(fDctnryHdr, &freeSpace, HDR_UNIT_SIZE);
memcpy(fDctnryHdr+ HDR_UNIT_SIZE, &nextPtr, NEXT_PTR_BYTES);
memcpy(fDctnryHdr+ HDR_UNIT_SIZE + NEXT_PTR_BYTES,
&offSetZero, HDR_UNIT_SIZE);
memcpy(fDctnryHdr+ HDR_UNIT_SIZE + NEXT_PTR_BYTES + HDR_UNIT_SIZE,
&endHeader, HDR_UNIT_SIZE);
memcpy(fDctnryHdr + HDR_UNIT_SIZE, &nextPtr, NEXT_PTR_BYTES);
memcpy(fDctnryHdr + HDR_UNIT_SIZE + NEXT_PTR_BYTES,
&offSetZero, HDR_UNIT_SIZE);
memcpy(fDctnryHdr + HDR_UNIT_SIZE + NEXT_PTR_BYTES + HDR_UNIT_SIZE,
&endHeader, HDR_UNIT_SIZE);
}
//------------------------------------------------------------------------------
@ -83,7 +83,8 @@ void BulkRollbackFile::buildSegmentFileName(
{
char fileName[FILE_NAME_SIZE];
int rc = fDbFile.getFileName( columnOID, fileName,
dbRoot, partNum, segNum );
dbRoot, partNum, segNum );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -114,23 +115,24 @@ void BulkRollbackFile::buildSegmentFileName(
// segFileName - Name of file to be deleted
//------------------------------------------------------------------------------
void BulkRollbackFile::deleteSegmentFile(
OID columnOID,
OID columnOID,
bool fileTypeFlag,
uint32_t dbRoot,
uint32_t partNum,
uint32_t partNum,
uint32_t segNum,
const std::string& segFileName )
{
std::ostringstream msgText;
std::ostringstream msgText;
msgText << "Deleting " << (fileTypeFlag ? "column" : "dictionary store") <<
" file: dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum;
" file: dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum;
fMgr->logAMessage( logging::LOG_TYPE_INFO,
logging::M0075, columnOID, msgText.str() );
logging::M0075, columnOID, msgText.str() );
// delete the db segment file if it exists
int rc = fDbFile.deleteFile( segFileName.c_str() );
if (rc != NO_ERROR)
{
if (rc != ERR_FILE_NOT_EXIST)
@ -170,13 +172,13 @@ void BulkRollbackFile::truncateSegmentFile(
std::ostringstream msgText;
msgText << "Truncating column file"
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; totBlks-" << fileSizeBlocks <<
"; fileSize(bytes)-" << fileSizeBytes;
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; totBlks-" << fileSizeBlocks <<
"; fileSize(bytes)-" << fileSizeBytes;
fMgr->logAMessage( logging::LOG_TYPE_INFO,
logging::M0075, columnOID, msgText.str() );
logging::M0075, columnOID, msgText.str() );
std::string segFile;
IDBDataFile* pFile = fDbFile.openFile(columnOID, dbRoot, partNum, segNum, segFile);
@ -195,6 +197,7 @@ void BulkRollbackFile::truncateSegmentFile(
}
int rc = fDbFile.truncateFile( pFile, fileSizeBytes );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -216,7 +219,7 @@ void BulkRollbackFile::truncateSegmentFile(
//------------------------------------------------------------------------------
// Reinitialize a column segment extent (in the db file) to empty values,
// following the HWM. Remaining extents in the file are truncated.
//
//
// columnOID - OID of segment file to be reinitialized
// dbRoot - DBRoot of segment file to be reinitialized
// partNum - Partition number of segment file to be reinitialized
@ -245,21 +248,22 @@ void BulkRollbackFile::reInitTruncColumnExtent(
std::ostringstream msgText;
msgText << "Reinit HWM column extent in db file"
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; offset(bytes)-" << startOffset <<
"; freeBlks-" << nBlocks;
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; offset(bytes)-" << startOffset <<
"; freeBlks-" << nBlocks;
fMgr->logAMessage( logging::LOG_TYPE_INFO,
logging::M0075, columnOID, msgText.str() );
logging::M0075, columnOID, msgText.str() );
std::string segFile;
IDBDataFile* pFile = fDbFile.openFile(columnOID, dbRoot, partNum, segNum, segFile);
if (pFile == 0)
{
std::ostringstream oss;
oss << "Error opening HWM column segment file to rollback extents "
"from DB for" <<
"from DB for" <<
": OID-" << columnOID <<
"; DbRoot-" << dbRoot <<
"; partition-" << partNum <<
@ -275,6 +279,7 @@ void BulkRollbackFile::reInitTruncColumnExtent(
if ((partNum == 0) && (segNum == 0))
{
long long nBytesInAbbrevExtent = INITIAL_EXTENT_ROWS_TO_DISK * colWidth;
if (startOffset <= nBytesInAbbrevExtent)
{
// This check would prevent us from truncating back to an
@ -285,17 +290,17 @@ void BulkRollbackFile::reInitTruncColumnExtent(
//int rc = fDbFile.getFileSize2(pFile,fileSizeBytes);
//if (fileSizeBytes == nBytesInAbbrevExtent)
{
nBlocks = (nBytesInAbbrevExtent-startOffset) / BYTE_PER_BLOCK;
nBlocks = (nBytesInAbbrevExtent - startOffset) / BYTE_PER_BLOCK;
std::ostringstream msgText2;
msgText2 << "Reinit (abbrev) HWM column extent in db file"
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; offset(bytes)-" << startOffset <<
"; freeBlks-" << nBlocks;
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; offset(bytes)-" << startOffset <<
"; freeBlks-" << nBlocks;
fMgr->logAMessage( logging::LOG_TYPE_INFO,
logging::M0075, columnOID, msgText2.str() );
logging::M0075, columnOID, msgText2.str() );
}
}
}
@ -304,10 +309,11 @@ void BulkRollbackFile::reInitTruncColumnExtent(
uint64_t emptyVal = fDbFile.getEmptyRowValue( colType, colWidth );
int rc = fDbFile.reInitPartialColumnExtent( pFile,
startOffset,
nBlocks,
emptyVal,
colWidth );
startOffset,
nBlocks,
emptyVal,
colWidth );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -348,7 +354,7 @@ void BulkRollbackFile::reInitTruncColumnExtent(
//------------------------------------------------------------------------------
// Reinitialize a dictionary segment extent (in the db file) to empty blocks,
// following the HWM. Remaining extents in the file are truncated.
//
//
// dStoreOID - OID of segment store file to be reinitialized
// dbRoot - DBRoot of segment file to be reinitialized
// partNum - Partition number of segment file to be reinitialized
@ -369,16 +375,17 @@ void BulkRollbackFile::reInitTruncDctnryExtent(
std::ostringstream msgText;
msgText << "Reinit dictionary store extent in db file"
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; offset(bytes)-" << startOffset <<
"; numblks-" << nBlocks;
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; offset(bytes)-" << startOffset <<
"; numblks-" << nBlocks;
fMgr->logAMessage( logging::LOG_TYPE_INFO,
logging::M0075, dStoreOID, msgText.str() );
logging::M0075, dStoreOID, msgText.str() );
std::string segFile;
IDBDataFile* pFile = fDbFile.openFile(dStoreOID, dbRoot, partNum, segNum, segFile);
if (pFile == 0)
{
std::ostringstream oss;
@ -399,6 +406,7 @@ void BulkRollbackFile::reInitTruncDctnryExtent(
const uint32_t PSEUDO_COL_WIDTH = 8; // simulated col width for dictionary
long long nBytesInAbbrevExtent = INITIAL_EXTENT_ROWS_TO_DISK *
PSEUDO_COL_WIDTH;
if (startOffset <= nBytesInAbbrevExtent)
{
// This check would prevent us from truncating back to an
@ -409,26 +417,27 @@ void BulkRollbackFile::reInitTruncDctnryExtent(
//int rc = fDbFile.getFileSize2(pFile,fileSizeBytes);
//if (fileSizeBytes == nBytesInAbbrevExtent)
{
nBlocks = (nBytesInAbbrevExtent-startOffset) / BYTE_PER_BLOCK;
nBlocks = (nBytesInAbbrevExtent - startOffset) / BYTE_PER_BLOCK;
std::ostringstream msgText2;
msgText2 << "Reinit (abbrev) dictionary store extent in db file"
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; offset(bytes)-" << startOffset <<
"; numblks-" << nBlocks;
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; offset(bytes)-" << startOffset <<
"; numblks-" << nBlocks;
fMgr->logAMessage( logging::LOG_TYPE_INFO,
logging::M0075, dStoreOID, msgText2.str() );
logging::M0075, dStoreOID, msgText2.str() );
}
}
// Initialize the remainder of the extent after the HWM block
int rc = fDbFile.reInitPartialDctnryExtent( pFile,
startOffset,
nBlocks,
fDctnryHdr,
DCTNRY_HEADER_SIZE );
startOffset,
nBlocks,
fDctnryHdr,
DCTNRY_HEADER_SIZE );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -475,9 +484,9 @@ void BulkRollbackFile::reInitTruncDctnryExtent(
// on whether the HWM chunk was modified and backed up to disk.
//------------------------------------------------------------------------------
bool BulkRollbackFile::doWeReInitExtent( OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum) const
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum) const
{
return true;
}

View File

@ -38,7 +38,7 @@
namespace WriteEngine
{
class BulkRollbackMgr;
class BulkRollbackMgr;
//------------------------------------------------------------------------------
/** @brief Class used by BulkRollbackMgr to restore db files.
@ -68,11 +68,11 @@ public:
* @param segFileName (out) Name of segment file
*/
void buildSegmentFileName(OID columnOID,
bool fileTypeFlag,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
std::string& segFileName);
bool fileTypeFlag,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
std::string& segFileName);
/** @brief Delete a segment file.
* Warning: This function may throw a WeException.
@ -85,11 +85,11 @@ public:
* @param segFileName Name of segment file to be deleted
*/
void deleteSegmentFile(OID columnOID,
bool fileTypeFlag,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
const std::string& segFileName );
bool fileTypeFlag,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
const std::string& segFileName );
/** @brief Construct a directory path.
*
@ -117,9 +117,9 @@ public:
* @param segNum Segment number for the segment file in question
*/
virtual bool doWeReInitExtent( OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum) const;
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum) const;
/** @brief Reinitialize the specified column segment file starting at
* startOffsetBlk, and truncate trailing extents.
@ -137,14 +137,14 @@ public:
* @param restoreHwmChk Restore HWM chunk (n/a to uncompressed)
*/
virtual void reInitTruncColumnExtent(OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long startOffsetBlk,
int nBlocks,
execplan::CalpontSystemCatalog::ColDataType colType,
uint32_t colWidth,
bool restoreHwmChk );
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long startOffsetBlk,
int nBlocks,
execplan::CalpontSystemCatalog::ColDataType colType,
uint32_t colWidth,
bool restoreHwmChk );
/** @brief Reinitialize the specified dictionary store segment file starting
* at startOffsetBlk, and truncate trailing extents.
@ -159,11 +159,11 @@ public:
* @param nBlocks Number of blocks to be reinitialized
*/
virtual void reInitTruncDctnryExtent(OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long startOffsetBlk,
int nBlocks );
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long startOffsetBlk,
int nBlocks );
/** @brief Truncate the specified segment file to a specified num of bytes
* Warning: This function may throw a WeException.
@ -175,10 +175,10 @@ public:
* @param fileSizeBlocks Number of blocks to retain in the file
*/
virtual void truncateSegmentFile( OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long filesSizeBlocks );
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long filesSizeBlocks );
protected:
BulkRollbackMgr* fMgr; // Bulk Rollback controller
@ -195,9 +195,9 @@ private:
// Inline functions
//------------------------------------------------------------------------------
inline int BulkRollbackFile::buildDirName( OID oid,
uint16_t dbRoot,
uint32_t partition,
std::string& dirName)
uint16_t dbRoot,
uint32_t partition,
std::string& dirName)
{
return fDbFile.getDirName( oid, dbRoot, partition, dirName );
}

View File

@ -80,15 +80,16 @@ void BulkRollbackFileCompressed::truncateSegmentFile(
{
std::ostringstream msgText1;
msgText1 << "Truncating compressed column file"
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; rawTotBlks-" << fileSizeBlocks;
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; rawTotBlks-" << fileSizeBlocks;
fMgr->logAMessage( logging::LOG_TYPE_INFO,
logging::M0075, columnOID, msgText1.str() );
logging::M0075, columnOID, msgText1.str() );
std::string segFile;
IDBDataFile* pFile = fDbFile.openFile(columnOID, dbRoot, partNum, segNum, segFile);
if (pFile == 0)
{
std::ostringstream oss;
@ -107,6 +108,7 @@ void BulkRollbackFileCompressed::truncateSegmentFile(
CompChunkPtrList chunkPtrs;
std::string errMsg;
int rc = loadColumnHdrPtrs(pFile, hdrs, chunkPtrs, errMsg);
if (rc != NO_ERROR)
{
std::ostringstream oss;
@ -131,29 +133,32 @@ void BulkRollbackFileCompressed::truncateSegmentFile(
if (chunkIndex < chunkPtrs.size())
{
long long fileSizeBytes = chunkPtrs[chunkIndex].first +
chunkPtrs[chunkIndex].second;
chunkPtrs[chunkIndex].second;
std::ostringstream msgText2;
msgText2 << "Compressed column file"
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; truncated to " << fileSizeBytes << " bytes";
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; truncated to " << fileSizeBytes << " bytes";
fMgr->logAMessage( logging::LOG_TYPE_INFO,
logging::M0075, columnOID, msgText2.str() );
logging::M0075, columnOID, msgText2.str() );
// Drop off any trailing pointers (that point beyond the last block)
fCompressor.setBlockCount( hdrs, fileSizeBlocks );
std::vector<uint64_t> ptrs;
for (unsigned i=0; i<=chunkIndex; i++)
for (unsigned i = 0; i <= chunkIndex; i++)
{
ptrs.push_back( chunkPtrs[i].first );
}
ptrs.push_back( chunkPtrs[chunkIndex].first +
chunkPtrs[chunkIndex].second );
chunkPtrs[chunkIndex].second );
fCompressor.storePtrs( ptrs, hdrs );
rc = fDbFile.writeHeaders( pFile, hdrs );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -171,6 +176,7 @@ void BulkRollbackFileCompressed::truncateSegmentFile(
// Finally, we truncate the data base column segment file
rc = fDbFile.truncateFile( pFile, fileSizeBytes );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -194,7 +200,7 @@ void BulkRollbackFileCompressed::truncateSegmentFile(
// Reinitialize a column segment extent (in the db file) to empty values,
// following the HWM. Remaining extents in the file are truncated.
// Also updates the header(s) as well.
//
//
// columnOID - OID of segment file to be reinitialized
// dbRoot - DBRoot of segment file to be reinitialized
// partNum - Partition number of segment file to be reinitialized
@ -221,16 +227,17 @@ void BulkRollbackFileCompressed::reInitTruncColumnExtent(
std::ostringstream msgText1;
msgText1 << "Reinit HWM compressed column extent in db file" <<
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; rawOffset(bytes)-"<< startOffset <<
"; rawFreeBlks-" << nBlocks;
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; rawOffset(bytes)-" << startOffset <<
"; rawFreeBlks-" << nBlocks;
fMgr->logAMessage( logging::LOG_TYPE_INFO,
logging::M0075, columnOID, msgText1.str() );
logging::M0075, columnOID, msgText1.str() );
std::string segFile;
IDBDataFile* pFile = fDbFile.openFile(columnOID, dbRoot, partNum, segNum, segFile);
if (pFile == 0)
{
std::ostringstream oss;
@ -249,6 +256,7 @@ void BulkRollbackFileCompressed::reInitTruncColumnExtent(
CompChunkPtrList chunkPtrs;
std::string errMsg;
int rc = loadColumnHdrPtrs(pFile, hdrs, chunkPtrs, errMsg);
if (rc != NO_ERROR)
{
std::ostringstream oss;
@ -262,7 +270,7 @@ void BulkRollbackFileCompressed::reInitTruncColumnExtent(
fDbFile.closeFile( pFile );
throw WeException( oss.str(), rc );
}
// Locate the chunk containing the last block we intend to keep
unsigned int blockOffset = startOffsetBlk - 1;
unsigned int chunkIndex = 0;
@ -274,11 +282,13 @@ void BulkRollbackFileCompressed::reInitTruncColumnExtent(
// Read backup copy of HWM chunk and restore it's contents
uint64_t restoredChunkLen = 0;
uint64_t restoredFileSize = 0;
if (restoreHwmChk)
{
rc = restoreHWMChunk(pFile, columnOID, partNum, segNum,
chunkPtrs[chunkIndex].first,
restoredChunkLen, restoredFileSize, errMsg);
chunkPtrs[chunkIndex].first,
restoredChunkLen, restoredFileSize, errMsg);
if (rc != NO_ERROR)
{
std::ostringstream oss;
@ -305,7 +315,7 @@ void BulkRollbackFileCompressed::reInitTruncColumnExtent(
else
restoredFileSize = (chunkPtrs[chunkIndex].first +
chunkPtrs[chunkIndex].second) +
(uint64_t)(nBlocks * BYTE_PER_BLOCK);
(uint64_t)(nBlocks * BYTE_PER_BLOCK);
}
// nBlocks is based on full extents, but if database file only has an
@ -313,14 +323,16 @@ void BulkRollbackFileCompressed::reInitTruncColumnExtent(
// file with a single abbreviated extent.
// (Only the 1st extent in part0, seg0 employs an abbreviated extent.)
bool bAbbreviatedExtent = false;
// DMC-SHARED_NOTHING_NOTE: Is it safe to assume only part0 seg0 is abbreviated?
if ((partNum == 0) && (segNum == 0))
{
long long nBytesInAbbrevExtent = INITIAL_EXTENT_ROWS_TO_DISK *
colWidth;
if (startOffset <= nBytesInAbbrevExtent)
{
nBlocks = (nBytesInAbbrevExtent-startOffset) / BYTE_PER_BLOCK;
nBlocks = (nBytesInAbbrevExtent - startOffset) / BYTE_PER_BLOCK;
bAbbreviatedExtent = true;
}
}
@ -329,17 +341,21 @@ void BulkRollbackFileCompressed::reInitTruncColumnExtent(
std::ostringstream msgText2;
msgText2 << "HWM compressed column file"
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum;
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum;
if (bAbbreviatedExtent) // log adjusted nBlock count for abbrev extent
msgText2 << "; rawFreeBlks-" << nBlocks << " (abbrev)";
msgText2 << "; restoredChunk-" << restoredChunkLen << " bytes";
if (!restoreHwmChk)
msgText2 << " (no change)";
msgText2 << "; truncated to " << fileSizeBytes << " bytes";
fMgr->logAMessage( logging::LOG_TYPE_INFO,
logging::M0075, columnOID, msgText2.str() );
logging::M0075, columnOID, msgText2.str() );
// Initialize the remainder of the extent after the HWM chunk.
// Just doing an ftruncate() reinits the file to 0's, which may or may
@ -350,18 +366,21 @@ void BulkRollbackFileCompressed::reInitTruncColumnExtent(
const unsigned BLKS_PER_EXTENT =
(BRMWrapper::getInstance()->getExtentRows() * colWidth) /
BYTE_PER_BLOCK;
long long nBlocksToInit = (fileSizeBytes -
(chunkPtrs[chunkIndex].first + restoredChunkLen)) / BYTE_PER_BLOCK;
long long nBlocksToInit = (fileSizeBytes -
(chunkPtrs[chunkIndex].first + restoredChunkLen)) / BYTE_PER_BLOCK;
if (nBlocksToInit > BLKS_PER_EXTENT)
nBlocksToInit = BLKS_PER_EXTENT; // don't init > 1 full extent
if (nBlocksToInit > 0)
{
uint64_t emptyVal = fDbFile.getEmptyRowValue( colType, colWidth );
rc = fDbFile.reInitPartialColumnExtent( pFile,
(chunkPtrs[chunkIndex].first + restoredChunkLen),
nBlocksToInit,
emptyVal,
colWidth );
(chunkPtrs[chunkIndex].first + restoredChunkLen),
nBlocksToInit,
emptyVal,
colWidth );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -384,17 +403,20 @@ void BulkRollbackFileCompressed::reInitTruncColumnExtent(
// just restore the first pointer (set to 8192).
fCompressor.setBlockCount( hdrs, (startOffsetBlk + nBlocks) );
std::vector<uint64_t> newPtrs;
if ((chunkIndex > 0) || (restoredChunkLen > 0))
{
for (unsigned int i=0; i<=chunkIndex; i++)
for (unsigned int i = 0; i <= chunkIndex; i++)
{
newPtrs.push_back( chunkPtrs[i].first );
}
}
newPtrs.push_back( chunkPtrs[chunkIndex].first + restoredChunkLen );
fCompressor.storePtrs( newPtrs, hdrs );
rc = fDbFile.writeHeaders( pFile, hdrs );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -412,6 +434,7 @@ void BulkRollbackFileCompressed::reInitTruncColumnExtent(
// Finally, we truncate the data base column segment file
rc = fDbFile.truncateFile( pFile, fileSizeBytes );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -447,6 +470,7 @@ int BulkRollbackFileCompressed::loadColumnHdrPtrs(
{
// Read the header pointers
int rc = fDbFile.readHeaders( pFile, hdrs );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -459,6 +483,7 @@ int BulkRollbackFileCompressed::loadColumnHdrPtrs(
// Parse the header pointers
int rc1 = fCompressor.getPtrList( hdrs, chunkPtrs );
if (rc1 != 0)
{
rc = ERR_METADATABKUP_COMP_PARSE_HDRS;
@ -478,7 +503,7 @@ int BulkRollbackFileCompressed::loadColumnHdrPtrs(
// Reinitialize a dictionary segment extent (in the db file) to empty blocks,
// following the HWM. Remaining extents in the file are truncated.
// Also updates the header(s) as well.
//
//
// dStoreOID - OID of segment store file to be reinitialized
// dbRoot - DBRoot of segment file to be reinitialized
// partNum - Partition number of segment file to be reinitialized
@ -499,16 +524,17 @@ void BulkRollbackFileCompressed::reInitTruncDctnryExtent(
std::ostringstream msgText1;
msgText1 << "Reinit HWM compressed dictionary store extent in db file"
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; rawOffset(bytes)-" << startOffset <<
"; rawFreeBlks-" << nBlocks;
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; rawOffset(bytes)-" << startOffset <<
"; rawFreeBlks-" << nBlocks;
fMgr->logAMessage( logging::LOG_TYPE_INFO,
logging::M0075, dStoreOID, msgText1.str() );
logging::M0075, dStoreOID, msgText1.str() );
std::string segFile;
IDBDataFile* pFile = fDbFile.openFile(dStoreOID, dbRoot, partNum, segNum, segFile);
if (pFile == 0)
{
std::ostringstream oss;
@ -526,7 +552,8 @@ void BulkRollbackFileCompressed::reInitTruncDctnryExtent(
CompChunkPtrList chunkPtrs;
uint64_t ptrHdrSize;
std::string errMsg;
int rc = loadDctnryHdrPtrs(pFile, controlHdr, chunkPtrs, ptrHdrSize,errMsg);
int rc = loadDctnryHdrPtrs(pFile, controlHdr, chunkPtrs, ptrHdrSize, errMsg);
if (rc != NO_ERROR)
{
std::ostringstream oss;
@ -540,7 +567,7 @@ void BulkRollbackFileCompressed::reInitTruncDctnryExtent(
fDbFile.closeFile( pFile );
throw WeException( oss.str(), rc );
}
// Locate the chunk containing the last block we intend to keep
unsigned int blockOffset = startOffsetBlk - 1;
unsigned int chunkIndex = 0;
@ -553,17 +580,18 @@ void BulkRollbackFileCompressed::reInitTruncDctnryExtent(
uint64_t restoredChunkLen = 0;
uint64_t restoredFileSize = 0;
rc = restoreHWMChunk(pFile, dStoreOID, partNum, segNum,
chunkPtrs[chunkIndex].first,
restoredChunkLen, restoredFileSize, errMsg);
chunkPtrs[chunkIndex].first,
restoredChunkLen, restoredFileSize, errMsg);
if (rc == ERR_FILE_NOT_EXIST)
{
std::ostringstream msgText3;
msgText3 << "No restore needed to Compressed dictionary file" <<
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum;
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum;
fMgr->logAMessage( logging::LOG_TYPE_INFO,
logging::M0075, dStoreOID, msgText3.str() );
logging::M0075, dStoreOID, msgText3.str() );
fDbFile.closeFile( pFile );
return;
@ -591,10 +619,11 @@ void BulkRollbackFileCompressed::reInitTruncDctnryExtent(
bool bAbbreviatedExtent = false;
const uint32_t PSEUDO_COL_WIDTH = 8; // simulated col width for dctnry
long long nBytesInAbbrevExtent = INITIAL_EXTENT_ROWS_TO_DISK *
PSEUDO_COL_WIDTH;
PSEUDO_COL_WIDTH;
if (startOffset <= nBytesInAbbrevExtent)
{
nBlocks = (nBytesInAbbrevExtent-startOffset) / BYTE_PER_BLOCK;
nBlocks = (nBytesInAbbrevExtent - startOffset) / BYTE_PER_BLOCK;
bAbbreviatedExtent = true;
}
@ -602,15 +631,17 @@ void BulkRollbackFileCompressed::reInitTruncDctnryExtent(
std::ostringstream msgText2;
msgText2 << "HWM compressed dictionary file"
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum;
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum;
if (bAbbreviatedExtent) // log adjusted nBlock count for abbrev extent
msgText2 << "; rawFreeBlks-" << nBlocks << " (abbrev)";
msgText2 << "; restoredChunk-" << restoredChunkLen << " bytes" <<
"; truncated to " << fileSizeBytes << " bytes";
"; truncated to " << fileSizeBytes << " bytes";
fMgr->logAMessage( logging::LOG_TYPE_INFO,
logging::M0075, dStoreOID, msgText2.str() );
logging::M0075, dStoreOID, msgText2.str() );
// Initialize the remainder of the extent after the HWM chunk
// Just doing an ftruncate() reinits the file to 0's, which may or may
@ -621,17 +652,20 @@ void BulkRollbackFileCompressed::reInitTruncDctnryExtent(
const unsigned BLKS_PER_EXTENT =
(BRMWrapper::getInstance()->getExtentRows() * PSEUDO_COL_WIDTH) /
BYTE_PER_BLOCK;
long long nBlocksToInit = (fileSizeBytes -
(chunkPtrs[chunkIndex].first + restoredChunkLen)) / BYTE_PER_BLOCK;
long long nBlocksToInit = (fileSizeBytes -
(chunkPtrs[chunkIndex].first + restoredChunkLen)) / BYTE_PER_BLOCK;
if (nBlocksToInit > BLKS_PER_EXTENT)
nBlocksToInit = BLKS_PER_EXTENT; // don't init > 1 full extent
if (nBlocksToInit > 0)
{
rc = fDbFile.reInitPartialDctnryExtent( pFile,
(chunkPtrs[chunkIndex].first + restoredChunkLen),
nBlocksToInit,
fDctnryHdr,
DCTNRY_HEADER_SIZE );
(chunkPtrs[chunkIndex].first + restoredChunkLen),
nBlocksToInit,
fDctnryHdr,
DCTNRY_HEADER_SIZE );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -654,19 +688,22 @@ void BulkRollbackFileCompressed::reInitTruncDctnryExtent(
// just restore the first pointer (set to 8192).
fCompressor.setBlockCount( controlHdr, (startOffsetBlk + nBlocks) );
std::vector<uint64_t> newPtrs;
if ((chunkIndex > 0) || (restoredChunkLen > 0))
{
for (unsigned int i=0; i<=chunkIndex; i++)
for (unsigned int i = 0; i <= chunkIndex; i++)
{
newPtrs.push_back( chunkPtrs[i].first );
}
}
newPtrs.push_back( chunkPtrs[chunkIndex].first + restoredChunkLen );
char* pointerHdr = new char[ptrHdrSize];
fCompressor.storePtrs( newPtrs, pointerHdr, ptrHdrSize );
rc = fDbFile.writeHeaders( pFile, controlHdr, pointerHdr, ptrHdrSize );
delete[] pointerHdr;
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -684,6 +721,7 @@ void BulkRollbackFileCompressed::reInitTruncDctnryExtent(
// Finally, we truncate the data base dictionary store segment file
rc = fDbFile.truncateFile( pFile, fileSizeBytes );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -721,7 +759,8 @@ int BulkRollbackFileCompressed::loadDctnryHdrPtrs(
std::string& errMsg) const
{
int rc = fDbFile.readFile(
pFile, (unsigned char*)controlHdr, IDBCompressInterface::HDR_BUF_LEN);
pFile, (unsigned char*)controlHdr, IDBCompressInterface::HDR_BUF_LEN);
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -733,6 +772,7 @@ int BulkRollbackFileCompressed::loadDctnryHdrPtrs(
}
int rc1 = fCompressor.verifyHdr( controlHdr );
if (rc1 != 0)
{
rc = ERR_METADATABKUP_COMP_VERIFY_HDRS;
@ -742,15 +782,16 @@ int BulkRollbackFileCompressed::loadDctnryHdrPtrs(
oss << "Control header verify error (" << rc1 << "): " <<
ec.errorString(rc);
errMsg = oss.str();
return rc;
}
uint64_t hdrSize = fCompressor.getHdrSize(controlHdr);
ptrHdrSize = hdrSize - IDBCompressInterface::HDR_BUF_LEN;
char* pointerHdr = new char[ptrHdrSize];
rc = fDbFile.readFile(pFile, (unsigned char*)pointerHdr, ptrHdrSize);
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -765,6 +806,7 @@ int BulkRollbackFileCompressed::loadDctnryHdrPtrs(
// Parse the header pointers
rc1 = fCompressor.getPtrList( pointerHdr, ptrHdrSize, chunkPtrs );
delete[] pointerHdr;
if (rc1 != 0)
{
rc = ERR_METADATABKUP_COMP_PARSE_HDRS;
@ -824,11 +866,12 @@ int BulkRollbackFileCompressed::restoreHWMChunk(
}
IDBDataFile* backupFile = IDBDataFile::open(
IDBPolicy::getType( bulkRollbackSubPath.c_str(), IDBPolicy::WRITEENG ),
bulkRollbackSubPath.c_str(),
"rb",
0,
pFile->colWidth() );
IDBPolicy::getType( bulkRollbackSubPath.c_str(), IDBPolicy::WRITEENG ),
bulkRollbackSubPath.c_str(),
"rb",
0,
pFile->colWidth() );
if (!backupFile)
{
int errrc = errno;
@ -836,7 +879,7 @@ int BulkRollbackFileCompressed::restoreHWMChunk(
std::string eMsg;
Convertor::mapErrnoToString(errrc, eMsg);
std::ostringstream oss;
oss << "Error opening backup file " <<
oss << "Error opening backup file " <<
bulkRollbackSubPath << "; " << eMsg;
errMsg = oss.str();
@ -846,8 +889,9 @@ int BulkRollbackFileCompressed::restoreHWMChunk(
// Read the chunk length and file size
uint64_t sizeHdr[2];
size_t bytesRead = readFillBuffer(backupFile, (char*)sizeHdr,
sizeof(uint64_t)*2);
if (bytesRead != sizeof(uint64_t)*2)
sizeof(uint64_t) * 2);
if (bytesRead != sizeof(uint64_t) * 2)
{
int errrc = errno;
@ -861,12 +905,14 @@ int BulkRollbackFileCompressed::restoreHWMChunk(
delete backupFile;
return ERR_METADATABKUP_COMP_READ_BULK_BKUP;
}
restoredChunkLen = sizeHdr[0];
restoredFileSize = sizeHdr[1];
// Position the destination offset in the DB file
int rc = fDbFile.setFileOffset(pFile, fileOffsetByteForRestoredChunk,
SEEK_SET);
SEEK_SET);
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -888,6 +934,7 @@ int BulkRollbackFileCompressed::restoreHWMChunk(
unsigned char* chunk = new unsigned char[restoredChunkLen];
boost::scoped_array<unsigned char> scopedChunk( chunk );
bytesRead = readFillBuffer(backupFile, (char*)chunk, restoredChunkLen);
if (bytesRead != restoredChunkLen)
{
int errrc = errno;
@ -907,6 +954,7 @@ int BulkRollbackFileCompressed::restoreHWMChunk(
// Write/restore the HWM chunk to the applicable database file
rc = fDbFile.writeFile(pFile, chunk, restoredChunkLen);
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -934,9 +982,9 @@ int BulkRollbackFileCompressed::restoreHWMChunk(
// and thus not needed.
//------------------------------------------------------------------------------
bool BulkRollbackFileCompressed::doWeReInitExtent( OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum) const
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum) const
{
std::ostringstream oss;
oss << "/" << columnOID << ".p" << partNum << ".s" << segNum;
@ -970,6 +1018,7 @@ size_t BulkRollbackFileCompressed::readFillBuffer(
while (1)
{
nBytes = pFile->read(pBuf, bytesToRead);
if (nBytes > 0)
totalBytesRead += nBytes;
else

View File

@ -37,7 +37,7 @@
namespace WriteEngine
{
class BulkRollbackMgr;
class BulkRollbackMgr;
//------------------------------------------------------------------------------
/** @brief Class used by BulkRollbackMgr to restore compressed db files.
@ -65,9 +65,9 @@ public:
* @param segNum Segment number for the segment file in question
*/
virtual bool doWeReInitExtent( OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum) const;
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum) const;
/** @brief Reinitialize the specified column segment file starting at
* startOffsetBlk, and truncate trailing extents.
@ -83,14 +83,14 @@ public:
* @param restoreHwmChk Restore HWM chunk
*/
virtual void reInitTruncColumnExtent(OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long startOffsetBlk,
int nBlocks,
execplan::CalpontSystemCatalog::ColDataType colType,
uint32_t colWidth,
bool restoreHwmChk );
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long startOffsetBlk,
int nBlocks,
execplan::CalpontSystemCatalog::ColDataType colType,
uint32_t colWidth,
bool restoreHwmChk );
/** @brief Reinitialize the specified dictionary store segment file starting
* at startOffsetBlk, and truncate trailing extents.
@ -103,11 +103,11 @@ public:
* @param nBlocks Number of blocks to be reinitialized
*/
virtual void reInitTruncDctnryExtent(OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long startOffsetBlk,
int nBlocks );
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long startOffsetBlk,
int nBlocks );
/** @brief Truncate the specified segment file to a specified num of bytes
* @param columnOID OID of the relevant segment file
@ -117,10 +117,10 @@ public:
* @param fileSizeBlocks Number of blocks to retain in the file
*/
virtual void truncateSegmentFile( OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long filesSizeBlocks );
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long filesSizeBlocks );
private:
// Disable unnecessary copy constructor and assignment operator

View File

@ -79,12 +79,12 @@ void BulkRollbackFileCompressedHdfs::truncateSegmentFile(
{
std::ostringstream msgText;
msgText << "Truncating compressed HDFS column file"
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; rawTotBlks-" << fileSizeBlocks;
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; rawTotBlks-" << fileSizeBlocks;
fMgr->logAMessage( logging::LOG_TYPE_INFO,
logging::M0075, columnOID, msgText.str() );
logging::M0075, columnOID, msgText.str() );
restoreFromBackup( "column", columnOID, dbRoot, partNum, segNum );
}
@ -93,7 +93,7 @@ void BulkRollbackFileCompressedHdfs::truncateSegmentFile(
// Reinitialize a column segment extent (in the db file) to empty values,
// following the HWM. Remaining extents in the file are truncated.
// Also updates the header(s) as well.
//
//
// columnOID - OID of segment file to be reinitialized
// dbRoot - DBRoot of segment file to be reinitialized
// partNum - Partition number of segment file to be reinitialized
@ -120,13 +120,13 @@ void BulkRollbackFileCompressedHdfs::reInitTruncColumnExtent(
std::ostringstream msgText;
msgText << "Reinit HWM compressed column extent in HDFS db file" <<
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; rawOffset(bytes)-"<< startOffset <<
"; rawFreeBlks-" << nBlocks;
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; rawOffset(bytes)-" << startOffset <<
"; rawFreeBlks-" << nBlocks;
fMgr->logAMessage( logging::LOG_TYPE_INFO,
logging::M0075, columnOID, msgText.str() );
logging::M0075, columnOID, msgText.str() );
restoreFromBackup( "column", columnOID, dbRoot, partNum, segNum );
}
@ -135,7 +135,7 @@ void BulkRollbackFileCompressedHdfs::reInitTruncColumnExtent(
// Reinitialize a dictionary segment extent (in the db file) to empty blocks,
// following the HWM. Remaining extents in the file are truncated.
// Also updates the header(s) as well.
//
//
// dStoreOID - OID of segment store file to be reinitialized
// dbRoot - DBRoot of segment file to be reinitialized
// partNum - Partition number of segment file to be reinitialized
@ -156,13 +156,13 @@ void BulkRollbackFileCompressedHdfs::reInitTruncDctnryExtent(
std::ostringstream msgText;
msgText << "Reinit HWM compressed dictionary store extent in HDFS db file"
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; rawOffset(bytes)-" << startOffset <<
"; rawFreeBlks-" << nBlocks;
": dbRoot-" << dbRoot <<
"; part#-" << partNum <<
"; seg#-" << segNum <<
"; rawOffset(bytes)-" << startOffset <<
"; rawFreeBlks-" << nBlocks;
fMgr->logAMessage( logging::LOG_TYPE_INFO,
logging::M0075, dStoreOID, msgText.str() );
logging::M0075, dStoreOID, msgText.str() );
restoreFromBackup( "dictionary store", dStoreOID, dbRoot, partNum, segNum );
}
@ -173,9 +173,9 @@ void BulkRollbackFileCompressedHdfs::reInitTruncDctnryExtent(
// existing backup file.
//------------------------------------------------------------------------------
bool BulkRollbackFileCompressedHdfs::doWeReInitExtent( OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum) const
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum) const
{
return true;
}
@ -194,15 +194,16 @@ bool BulkRollbackFileCompressedHdfs::doWeReInitExtent( OID columnOID,
// old leftover backup file.
//------------------------------------------------------------------------------
void BulkRollbackFileCompressedHdfs::restoreFromBackup(const char* colType,
OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum)
OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum)
{
// Construct file name for db file to be restored
char dbFileName[FILE_NAME_SIZE];
int rc = fDbFile.getFileName( columnOID, dbFileName,
dbRoot, partNum, segNum );
dbRoot, partNum, segNum );
if (rc != NO_ERROR)
{
std::ostringstream oss;
@ -232,6 +233,7 @@ void BulkRollbackFileCompressedHdfs::restoreFromBackup(const char* colType,
{
// Rename current db file to make room for restored file
rc = IDBPolicy::rename( dbFileName, dbFileNameTmp.c_str() );
if (rc != 0)
{
std::ostringstream oss;
@ -245,6 +247,7 @@ void BulkRollbackFileCompressedHdfs::restoreFromBackup(const char* colType,
// Rename backup file to replace current db file
rc = IDBPolicy::rename( backupFileName.c_str(), dbFileName );
if (rc != 0)
{
std::ostringstream oss;
@ -264,9 +267,10 @@ void BulkRollbackFileCompressedHdfs::restoreFromBackup(const char* colType,
std::string errMsg;
ConfirmHdfsDbFile confirmHdfs;
rc = confirmHdfs.endDbFileChange( std::string("tmp"),
dbFileName,
false,
errMsg);
dbFileName,
false,
errMsg);
if (rc != 0)
{
std::ostringstream oss;

View File

@ -31,7 +31,7 @@
namespace WriteEngine
{
class BulkRollbackMgr;
class BulkRollbackMgr;
//------------------------------------------------------------------------------
/** @brief Class used by BulkRollbackMgr to restore compressed hdfs db files.
@ -64,9 +64,9 @@ public:
* @param segNum Segment number for the segment file in question
*/
virtual bool doWeReInitExtent(OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum) const;
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum) const;
/** @brief Reinitialize the specified column segment file starting at
* startOffsetBlk, and truncate trailing extents.
@ -82,14 +82,14 @@ public:
* @param restoreHwmChk Restore HWM chunk
*/
virtual void reInitTruncColumnExtent(OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long startOffsetBlk,
int nBlocks,
execplan::CalpontSystemCatalog::ColDataType colType,
uint32_t colWidth,
bool restoreHwmChk );
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long startOffsetBlk,
int nBlocks,
execplan::CalpontSystemCatalog::ColDataType colType,
uint32_t colWidth,
bool restoreHwmChk );
/** @brief Reinitialize the specified dictionary store segment file starting
* at startOffsetBlk, and truncate trailing extents.
@ -102,11 +102,11 @@ public:
* @param nBlocks Number of blocks to be reinitialized
*/
virtual void reInitTruncDctnryExtent(OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long startOffsetBlk,
int nBlocks );
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long startOffsetBlk,
int nBlocks );
/** @brief Truncate the specified segment file to a specified num of bytes
* @param columnOID OID of the relevant segment file
@ -116,10 +116,10 @@ public:
* @param fileSizeBlocks Number of blocks to retain in the file
*/
virtual void truncateSegmentFile(OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long filesSizeBlocks );
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
long long filesSizeBlocks );
private:
// Disable unnecessary copy constructor and assignment operator
@ -128,10 +128,10 @@ private:
const BulkRollbackFileCompressedHdfs& rhs);
void restoreFromBackup(const char* colType,
OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum );
OID columnOID,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum );
};
} //end of namespace

File diff suppressed because it is too large Load Diff

View File

@ -50,8 +50,8 @@
namespace WriteEngine
{
class Log;
class BulkRollbackFile;
class Log;
class BulkRollbackFile;
//------------------------------------------------------------------------------
/** @brief Class to clear a database table lock, and rolls back extents
@ -82,12 +82,15 @@ public:
uint64_t lockID,
const std::string& tableName,
const std::string& applName,
Log* logger=0);
Log* logger = 0);
/**
* @brief BulkRollbackMgr destructor
*/
EXPORT ~BulkRollbackMgr( ) { closeMetaDataFile ( ); }
EXPORT ~BulkRollbackMgr( )
{
closeMetaDataFile ( );
}
/**
* @brief Clear table lock and rollback extents for fTableOID
@ -100,17 +103,26 @@ public:
* @brief Accessor to any error msg related to a bad return code.
* @return error message if rollback rejected or failed.
*/
const std::string& getErrorMsg( ) const { return fErrorMsg; }
const std::string& getErrorMsg( ) const
{
return fErrorMsg;
}
/**
* @brief Accessor to the name of the meta file we are processing
*/
const std::string& getMetaFileName() const { return fMetaFileName; }
const std::string& getMetaFileName() const
{
return fMetaFileName;
}
/**
* @brief Mutator to enable/disable debug logging to console.
*/
const void setDebugConsole ( bool debug ) { fDebugConsole = debug; }
const void setDebugConsole ( bool debug )
{
fDebugConsole = debug;
}
/**
* @brief Log the specified message.
@ -138,9 +150,9 @@ public:
* @param errMsg Error msg if return code is not NO_ERROR
*/
EXPORT static int getSegFileList( const std::string& dirName,
bool bIncludeAlternateSegFileNames,
std::vector<uint32_t>& segList,
std::string& errMsg );
bool bIncludeAlternateSegFileNames,
std::vector<uint32_t>& segList,
std::string& errMsg );
private:
// Declare but don't define copy constructor and assignment operator
@ -159,11 +171,11 @@ private:
};
void createFileDeletionEntry( OID columnOID,
bool fileTypeFlag,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
const std::string& segFileName );
bool fileTypeFlag,
uint32_t dbRoot,
uint32_t partNum,
uint32_t segNum,
const std::string& segFileName );
void deleteColumn1Extents ( const char* inBuf ); // delete col extents
void deleteColumn1ExtentsV3(const char* inBuf );
void deleteColumn1ExtentsV4(const char* inBuf );
@ -175,11 +187,11 @@ private:
void deleteDctnryExtentsV3( );
void deleteDctnryExtentsV4( );
void deleteExtents ( std::istringstream& metaDataStream );
// function that drives extent deletion
// function that drives extent deletion
void readMetaDataRecDctnry(const char* inBuf );//read meta-data dct rec
void deleteSubDir ( const std::string& metaFileName ); // delete
// subdirectory used for backup chunks
// subdirectory used for backup chunks
EXPORT void closeMetaDataFile ( ); // close a metafile
void deleteMetaDataFiles ( ); // delete metafiles
int metaDataFileExists ( bool& exists ); // does meta-data file exists

View File

@ -28,389 +28,441 @@ using namespace std;
namespace WriteEngine
{
CacheControl* Cache::m_cacheParam = NULL;
FreeBufList* Cache::m_freeList = NULL;
CacheMap* Cache::m_lruList = NULL;
CacheMap* Cache::m_writeList = NULL;
CacheControl* Cache::m_cacheParam = NULL;
FreeBufList* Cache::m_freeList = NULL;
CacheMap* Cache::m_lruList = NULL;
CacheMap* Cache::m_writeList = NULL;
#ifdef _MSC_VER
__declspec(dllexport)
__declspec(dllexport)
#endif
bool Cache::m_useCache = false;
/***********************************************************
* DESCRIPTION:
* Clear all list and free memory
* PARAMETERS:
* none
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
void Cache::clear()
{
CacheMapIt it;
BlockBuffer* block;
size_t i;
bool Cache::m_useCache = false;
/***********************************************************
* DESCRIPTION:
* Clear all list and free memory
* PARAMETERS:
* none
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
void Cache::clear()
{
CacheMapIt it;
BlockBuffer* block;
size_t i;
// free list
if( m_freeList != NULL ) {
for( i = 0; i < m_freeList->size(); i++ ) {
// free list
if ( m_freeList != NULL )
{
for ( i = 0; i < m_freeList->size(); i++ )
{
block = m_freeList->at(i);
block->clear();
}
}
}
}
// LRU list
if( m_lruList != NULL ) {
for( it = m_lruList->begin(); it != m_lruList->end(); it++ ) {
// LRU list
if ( m_lruList != NULL )
{
for ( it = m_lruList->begin(); it != m_lruList->end(); it++ )
{
block = it->second;
block->clear();
m_freeList->push_back( block );
}
m_lruList->clear();
}
}
// Write list
if( m_writeList != NULL ) {
for( it = m_writeList->begin(); it != m_writeList->end(); it++ ) {
m_lruList->clear();
}
// Write list
if ( m_writeList != NULL )
{
for ( it = m_writeList->begin(); it != m_writeList->end(); it++ )
{
block = it->second;
block->clear();
m_freeList->push_back( block );
}
m_writeList->clear();
}
}
}
/***********************************************************
* DESCRIPTION:
* Flush write cache
* PARAMETERS:
* none
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
const int Cache::flushCache()
{
bool bHasReadBlock = false;
BlockBuffer* curBuf;
m_writeList->clear();
}
}
// add lock here
if( m_lruList && m_lruList->size() > 0 ) {
bHasReadBlock = true;
for( CacheMapIt it = m_lruList->begin(); it != m_lruList->end(); it++ ) {
/***********************************************************
* DESCRIPTION:
* Flush write cache
* PARAMETERS:
* none
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
const int Cache::flushCache()
{
bool bHasReadBlock = false;
BlockBuffer* curBuf;
// add lock here
if ( m_lruList && m_lruList->size() > 0 )
{
bHasReadBlock = true;
for ( CacheMapIt it = m_lruList->begin(); it != m_lruList->end(); it++ )
{
curBuf = it->second;
curBuf->clear();
m_freeList->push_back( curBuf );
}
m_lruList->clear();
}
}
// must write to disk first
if( m_writeList && m_writeList->size() > 0 ) {
if( !bHasReadBlock )
for( CacheMapIt it = m_writeList->begin(); it != m_writeList->end(); it++ ) {
curBuf = it->second;
curBuf->clear();
m_freeList->push_back( curBuf );
m_lruList->clear();
}
// must write to disk first
if ( m_writeList && m_writeList->size() > 0 )
{
if ( !bHasReadBlock )
for ( CacheMapIt it = m_writeList->begin(); it != m_writeList->end(); it++ )
{
curBuf = it->second;
curBuf->clear();
m_freeList->push_back( curBuf );
}
else
for( CacheMapIt it = m_writeList->begin(); it != m_writeList->end(); it++ ) {
curBuf = it->second;
(*curBuf).block.dirty = false;
processCacheMap( m_lruList, curBuf, INSERT );
else
for ( CacheMapIt it = m_writeList->begin(); it != m_writeList->end(); it++ )
{
curBuf = it->second;
(*curBuf).block.dirty = false;
processCacheMap( m_lruList, curBuf, INSERT );
}
m_writeList->clear();
} // end of if( m_writeList->size()
// add unlock here
return NO_ERROR;
}
m_writeList->clear();
/***********************************************************
* DESCRIPTION:
* Free memory
* PARAMETERS:
* none
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
void Cache::freeMemory()
{
CacheMapIt it;
BlockBuffer* block;
size_t i;
} // end of if( m_writeList->size()
// free list
if( m_freeList != NULL ) {
for( i = 0; i < m_freeList->size(); i++ ) {
// add unlock here
return NO_ERROR;
}
/***********************************************************
* DESCRIPTION:
* Free memory
* PARAMETERS:
* none
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
void Cache::freeMemory()
{
CacheMapIt it;
BlockBuffer* block;
size_t i;
// free list
if ( m_freeList != NULL )
{
for ( i = 0; i < m_freeList->size(); i++ )
{
block = m_freeList->at(i);
block->freeMem();
delete block;
}
m_freeList->clear();
delete m_freeList;
m_freeList = NULL;
}
}
// LRU list
if( m_lruList != NULL ) {
for( it = m_lruList->begin(); it != m_lruList->end(); it++ ) {
m_freeList->clear();
delete m_freeList;
m_freeList = NULL;
}
// LRU list
if ( m_lruList != NULL )
{
for ( it = m_lruList->begin(); it != m_lruList->end(); it++ )
{
block = it->second;
block->freeMem();
delete block;
}
m_lruList->clear();
delete m_lruList;
m_lruList = NULL;
}
}
// Write list
if( m_writeList != NULL ) {
for( it = m_writeList->begin(); it != m_writeList->end(); it++ ) {
m_lruList->clear();
delete m_lruList;
m_lruList = NULL;
}
// Write list
if ( m_writeList != NULL )
{
for ( it = m_writeList->begin(); it != m_writeList->end(); it++ )
{
block = it->second;
block->freeMem();
delete block;
}
m_writeList->clear();
delete m_writeList;
m_writeList = NULL;
}
}
// param
if( m_cacheParam != NULL ) {
delete m_cacheParam;
m_cacheParam = NULL;
}
}
m_writeList->clear();
delete m_writeList;
m_writeList = NULL;
}
/***********************************************************
* DESCRIPTION:
* get a list size
* PARAMETERS:
* listType - List type
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
const int Cache::getListSize( const CacheListType listType )
{
int size = 0;
// param
if ( m_cacheParam != NULL )
{
delete m_cacheParam;
m_cacheParam = NULL;
}
}
if( !m_useCache )
return size;
/***********************************************************
* DESCRIPTION:
* get a list size
* PARAMETERS:
* listType - List type
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
const int Cache::getListSize( const CacheListType listType )
{
int size = 0;
switch( listType ) {
case FREE_LIST: size = m_freeList->size(); break;
case LRU_LIST: size = m_lruList->size(); break;
case WRITE_LIST:
default:
size = m_writeList->size(); break;
}
return size;
}
if ( !m_useCache )
return size;
/***********************************************************
* DESCRIPTION:
* Init all parameters and list
* PARAMETERS:
* totalBlock - total blocks
* chkPoint - checkpoint interval
* pctFree - percentage free
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
void Cache::init( const int totalBlock, const int chkPoint, const int pctFree )
{
BlockBuffer* buffer;
switch ( listType )
{
case FREE_LIST:
size = m_freeList->size();
break;
if( m_cacheParam && m_freeList && m_lruList && m_writeList )
return;
case LRU_LIST:
size = m_lruList->size();
break;
m_cacheParam = new CacheControl();
m_cacheParam->totalBlock = totalBlock;
m_cacheParam->checkInterval = chkPoint;
m_cacheParam->pctFree = pctFree;
case WRITE_LIST:
default:
size = m_writeList->size();
break;
}
m_freeList = new FreeBufList();
m_lruList = new CacheMap();
m_writeList = new CacheMap();
return size;
}
for( int i = 0; i < m_cacheParam->totalBlock; i++ ) {
buffer = new BlockBuffer();
buffer->init();
m_freeList->push_back( buffer );
}
}
/***********************************************************
* DESCRIPTION:
* Init all parameters and list
* PARAMETERS:
* totalBlock - total blocks
* chkPoint - checkpoint interval
* pctFree - percentage free
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
void Cache::init( const int totalBlock, const int chkPoint, const int pctFree )
{
BlockBuffer* buffer;
/***********************************************************
* DESCRIPTION:
* Insert a buffer to LRU list
* PARAMETERS:
* cb - Comm Block
* lbid - lbid value
* fbo - fbo
* buf - input buffer
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
const int Cache::insertLRUList( CommBlock& cb, const uint64_t lbid, const uint64_t fbo, const unsigned char* buf )
{
BlockBuffer* buffer;
vector<BlockBuffer*>::iterator it;
if ( m_cacheParam && m_freeList && m_lruList && m_writeList )
return;
if( m_freeList->size() == 0 )
return ERR_FREE_LIST_EMPTY;
m_cacheParam = new CacheControl();
m_cacheParam->totalBlock = totalBlock;
m_cacheParam->checkInterval = chkPoint;
m_cacheParam->pctFree = pctFree;
// make sure flush first if necessary
it = m_freeList->begin();
buffer = *it;
memcpy( (*buffer).block.data, buf, BYTE_PER_BLOCK );
(*buffer).listType = LRU_LIST;
(*buffer).block.lbid = lbid;
(*buffer).block.fbo = fbo;
(*buffer).block.dirty = false;
(*buffer).block.hitCount = 1;
(*buffer).cb.file.oid = cb.file.oid;
(*buffer).cb.file.pFile = cb.file.pFile;
m_freeList = new FreeBufList();
m_lruList = new CacheMap();
m_writeList = new CacheMap();
RETURN_ON_ERROR( processCacheMap( m_lruList, buffer, INSERT ) );
m_freeList->erase( it );
for ( int i = 0; i < m_cacheParam->totalBlock; i++ )
{
buffer = new BlockBuffer();
buffer->init();
m_freeList->push_back( buffer );
}
}
return NO_ERROR;
}
/***********************************************************
* DESCRIPTION:
* Insert a buffer to LRU list
* PARAMETERS:
* cb - Comm Block
* lbid - lbid value
* fbo - fbo
* buf - input buffer
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
const int Cache::insertLRUList( CommBlock& cb, const uint64_t lbid, const uint64_t fbo, const unsigned char* buf )
{
BlockBuffer* buffer;
vector<BlockBuffer*>::iterator it;
/***********************************************************
* DESCRIPTION:
* Load cache block
* PARAMETERS:
* key - Cache key
* buf - output buffer
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
const int Cache::loadCacheBlock( const CacheKey& key, unsigned char* buf )
{
BlockBuffer* buffer;
CacheMapIt iter;
if ( m_freeList->size() == 0 )
return ERR_FREE_LIST_EMPTY;
iter = m_lruList->find( key );
if( iter != m_lruList->end() )
buffer = iter->second;
else {
iter = m_writeList->find( key );
if( iter != m_writeList->end() )
// make sure flush first if necessary
it = m_freeList->begin();
buffer = *it;
memcpy( (*buffer).block.data, buf, BYTE_PER_BLOCK );
(*buffer).listType = LRU_LIST;
(*buffer).block.lbid = lbid;
(*buffer).block.fbo = fbo;
(*buffer).block.dirty = false;
(*buffer).block.hitCount = 1;
(*buffer).cb.file.oid = cb.file.oid;
(*buffer).cb.file.pFile = cb.file.pFile;
RETURN_ON_ERROR( processCacheMap( m_lruList, buffer, INSERT ) );
m_freeList->erase( it );
return NO_ERROR;
}
/***********************************************************
* DESCRIPTION:
* Load cache block
* PARAMETERS:
* key - Cache key
* buf - output buffer
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
const int Cache::loadCacheBlock( const CacheKey& key, unsigned char* buf )
{
BlockBuffer* buffer;
CacheMapIt iter;
iter = m_lruList->find( key );
if ( iter != m_lruList->end() )
buffer = iter->second;
else
{
iter = m_writeList->find( key );
if ( iter != m_writeList->end() )
buffer = iter->second;
else
else
return ERR_CACHE_KEY_NOT_EXIST;
}
memcpy( buf, (*buffer).block.data, BYTE_PER_BLOCK );
(*buffer).block.hitCount++;
}
return NO_ERROR;
}
memcpy( buf, (*buffer).block.data, BYTE_PER_BLOCK );
(*buffer).block.hitCount++;
/***********************************************************
* DESCRIPTION:
* Modify cache block
* PARAMETERS:
* key - Cache key
* buf - output buffer
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
const int Cache::modifyCacheBlock( const CacheKey& key, const unsigned char* buf )
{
BlockBuffer* buffer;
CacheMapIt iter;
return NO_ERROR;
}
iter = m_lruList->find( key );
if( iter != m_lruList->end() ) {
buffer = iter->second;
(*buffer).listType = WRITE_LIST;
(*buffer).block.dirty = true;
/***********************************************************
* DESCRIPTION:
* Modify cache block
* PARAMETERS:
* key - Cache key
* buf - output buffer
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
const int Cache::modifyCacheBlock( const CacheKey& key, const unsigned char* buf )
{
BlockBuffer* buffer;
CacheMapIt iter;
(*m_writeList)[key] = iter->second;
m_lruList->erase( iter );
iter = m_lruList->find( key );
}
else {
iter = m_writeList->find( key );
if( iter != m_writeList->end() )
if ( iter != m_lruList->end() )
{
buffer = iter->second;
(*buffer).listType = WRITE_LIST;
(*buffer).block.dirty = true;
(*m_writeList)[key] = iter->second;
m_lruList->erase( iter );
}
else
{
iter = m_writeList->find( key );
if ( iter != m_writeList->end() )
buffer = iter->second;
else
else
return ERR_CACHE_KEY_NOT_EXIST;
}
memcpy( (*buffer).block.data, buf, BYTE_PER_BLOCK );
(*buffer).block.hitCount++;
}
return NO_ERROR;
}
memcpy( (*buffer).block.data, buf, BYTE_PER_BLOCK );
(*buffer).block.hitCount++;
/***********************************************************
* DESCRIPTION:
* Print cache list
* PARAMETERS:
* none
* RETURN:
* none
***********************************************************/
void Cache::printCacheList()
{
BlockBuffer* buffer;
int i = 0;
return NO_ERROR;
}
if( !m_useCache )
return;
/***********************************************************
* DESCRIPTION:
* Print cache list
* PARAMETERS:
* none
* RETURN:
* none
***********************************************************/
void Cache::printCacheList()
{
BlockBuffer* buffer;
int i = 0;
cout << "\nFree List has " << m_freeList->size() << " elements" << endl;
cout << "LRU List has " << m_lruList->size() << " elements" << endl;
for( CacheMapIt it = m_lruList->begin(); it != m_lruList->end(); it++ ) {
buffer = it->second;
cout << "\t[" << i++ << "] key=" << it->first << " listType=" << buffer->listType
<< " oid=" << (*buffer).cb.file.oid << " fbo=" << (*buffer).block.fbo
<< " dirty=" << (*buffer).block.dirty << " hitCount=" << (*buffer).block.hitCount << endl;
}
if ( !m_useCache )
return;
i = 0;
cout << "Write List has " << m_writeList->size() << " elements" << endl;
for( CacheMapIt it = m_writeList->begin(); it != m_writeList->end(); it++ ) {
buffer = it->second;
cout << "\t[" << i++ << "] key=" << it->first << " listType=" << buffer->listType
<< " oid=" << (*buffer).cb.file.oid << " fbo=" << (*buffer).block.fbo
<< " dirty=" << (*buffer).block.dirty << " hitCount=" << (*buffer).block.hitCount << endl;
}
}
cout << "\nFree List has " << m_freeList->size() << " elements" << endl;
cout << "LRU List has " << m_lruList->size() << " elements" << endl;
/***********************************************************
* DESCRIPTION:
* Process a buffer in a cache map
* PARAMETERS:
* buffer - block buffer
* opType - insert or delete
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
const int Cache::processCacheMap( CacheMap* map, BlockBuffer* buffer, OpType opType )
{
RETURN_ON_NULL( buffer, ERR_NULL_BLOCK );
CacheMapIt iter;
for ( CacheMapIt it = m_lruList->begin(); it != m_lruList->end(); it++ )
{
buffer = it->second;
cout << "\t[" << i++ << "] key=" << it->first << " listType=" << buffer->listType
<< " oid=" << (*buffer).cb.file.oid << " fbo=" << (*buffer).block.fbo
<< " dirty=" << (*buffer).block.dirty << " hitCount=" << (*buffer).block.hitCount << endl;
}
CacheKey key = getCacheKey( buffer );
iter = map->find( key );
i = 0;
cout << "Write List has " << m_writeList->size() << " elements" << endl;
// only handle insert and delete
if( iter == map->end() ) {
if( opType == INSERT )
for ( CacheMapIt it = m_writeList->begin(); it != m_writeList->end(); it++ )
{
buffer = it->second;
cout << "\t[" << i++ << "] key=" << it->first << " listType=" << buffer->listType
<< " oid=" << (*buffer).cb.file.oid << " fbo=" << (*buffer).block.fbo
<< " dirty=" << (*buffer).block.dirty << " hitCount=" << (*buffer).block.hitCount << endl;
}
}
/***********************************************************
* DESCRIPTION:
* Process a buffer in a cache map
* PARAMETERS:
* buffer - block buffer
* opType - insert or delete
* RETURN:
* NO_ERROR if success, other otherwise
***********************************************************/
const int Cache::processCacheMap( CacheMap* map, BlockBuffer* buffer, OpType opType )
{
RETURN_ON_NULL( buffer, ERR_NULL_BLOCK );
CacheMapIt iter;
CacheKey key = getCacheKey( buffer );
iter = map->find( key );
// only handle insert and delete
if ( iter == map->end() )
{
if ( opType == INSERT )
(*map)[key] = buffer;
else
else
return ERR_CACHE_KEY_NOT_EXIST;
}
else {
if( opType == INSERT )
}
else
{
if ( opType == INSERT )
return ERR_CACHE_KEY_EXIST;
else
else
map->erase( iter );
}
}
return NO_ERROR;
}
return NO_ERROR;
}

View File

@ -53,132 +53,172 @@ typedef uint64_t CacheKey; /** @brief Key definition
//typedef std::map<CacheKey, BlockBuffer*, std::greater<CacheKey> > CacheMap; /** @brief Cache map */
//typedef CacheMap::iterator CacheMapIt; /** @brief CacheMap iterator */
template<class T>struct hashCacheKey{ };
template<class T>struct hashCacheKey { };
template<> struct hashCacheKey<CacheKey>
{
size_t
operator()(CacheKey __x) const
{ return __x; }
size_t
operator()(CacheKey __x) const
{
return __x;
}
};
struct eqCacheKey
{
bool operator() (const CacheKey k1, const CacheKey k2 ) const
{
return k1 == k2;
}
bool operator() (const CacheKey k1, const CacheKey k2 ) const
{
return k1 == k2;
}
};
//typedef hash_map<Signature, TokenStruc, signatureHash<Signature>, eqSig> DCTNRYHASHMAP;
#if __GNUC__ == 4 && __GNUC_MINOR__ < 2
typedef __gnu_cxx::hash_map<CacheKey, BlockBuffer*, hashCacheKey<CacheKey>, eqCacheKey> CacheMap;
typedef __gnu_cxx::hash_map<CacheKey, BlockBuffer*, hashCacheKey<CacheKey>, eqCacheKey> CacheMap;
#else
typedef std::tr1::unordered_map<CacheKey, BlockBuffer*, hashCacheKey<CacheKey>, eqCacheKey> CacheMap;
typedef std::tr1::unordered_map<CacheKey, BlockBuffer*, hashCacheKey<CacheKey>, eqCacheKey> CacheMap;
#endif
//typedef __gnu_cxx::hash_map<CacheKey, BlockBuffer*> CacheMap;
//typedef __gnu_cxx::hash_map<CacheKey, BlockBuffer*> CacheMap;
typedef CacheMap::iterator CacheMapIt;
//typedef CacheMap LRUBufList; /** @brief Least Recent Used Buffer list */
//typedef CacheMap WriteBufList; /** @brief Write buffer list */
/** Class Cache */
class Cache
class Cache
{
public:
/**
* @brief Constructor
*/
Cache() {}
/**
* @brief Constructor
*/
Cache() {}
/**
* @brief Default Destructor
*/
~Cache() {}
/**
* @brief Default Destructor
*/
~Cache() {}
/**
* @brief Check whether cache key exists
*/
static const bool cacheKeyExist( CacheMap* map, const OID oid, const uint64_t lbid ) { CacheKey key = getCacheKey( oid, lbid ); return map->find(key) == map->end() ? false: true; }
static const bool cacheKeyExist( CacheMap* map, BlockBuffer* buffer ) { return cacheKeyExist( map, (*buffer).cb.file.oid, (*buffer).block.lbid ); }
static const bool cacheKeyExist( const OID oid, const uint64_t lbid ) { return cacheKeyExist( m_lruList, oid, lbid ) || cacheKeyExist( m_writeList, oid, lbid ); }
/**
* @brief Check whether cache key exists
*/
static const bool cacheKeyExist( CacheMap* map, const OID oid, const uint64_t lbid )
{
CacheKey key = getCacheKey( oid, lbid );
return map->find(key) == map->end() ? false : true;
}
static const bool cacheKeyExist( CacheMap* map, BlockBuffer* buffer )
{
return cacheKeyExist( map, (*buffer).cb.file.oid, (*buffer).block.lbid );
}
static const bool cacheKeyExist( const OID oid, const uint64_t lbid )
{
return cacheKeyExist( m_lruList, oid, lbid ) || cacheKeyExist( m_writeList, oid, lbid );
}
/**
* @brief Clear the buffer
*/
EXPORT static void clear();
/**
* @brief Clear the buffer
*/
EXPORT static void clear();
/**
* @brief Free the buffer memory
*/
EXPORT static void freeMemory();
/**
* @brief Free the buffer memory
*/
EXPORT static void freeMemory();
/**
* @brief Flush the write cache
*/
EXPORT static const int flushCache();
/**
* @brief Flush the write cache
*/
EXPORT static const int flushCache();
/**
* @brief Get the cache key
*/
static CacheKey getCacheKey( const OID oid, const uint64_t lbid ) { CacheKey key = lbid; /*Convertor::int2Str( oid ) + "|" + Convertor::int2Str(lbid)*/; return key; }
static CacheKey getCacheKey( const BlockBuffer* buffer ) { return getCacheKey( (*buffer).cb.file.oid, (*buffer).block.lbid ); }
/**
* @brief Get the cache key
*/
static CacheKey getCacheKey( const OID oid, const uint64_t lbid )
{
CacheKey key = lbid; /*Convertor::int2Str( oid ) + "|" + Convertor::int2Str(lbid)*/;
return key;
}
static CacheKey getCacheKey( const BlockBuffer* buffer )
{
return getCacheKey( (*buffer).cb.file.oid, (*buffer).block.lbid );
}
EXPORT static const int getListSize( const CacheListType listType );
EXPORT static const int getListSize( const CacheListType listType );
/**
* @brief Init the buffers
*/
EXPORT static void init( const int totalBlock, const int chkPoint, const int pctFree );
static void init() { init( DEFAULT_CACHE_BLOCK, DEFAULT_CHK_INTERVAL, DEFAULT_CACHE_PCT_FREE ); }
/**
* @brief Init the buffers
*/
EXPORT static void init( const int totalBlock, const int chkPoint, const int pctFree );
static void init()
{
init( DEFAULT_CACHE_BLOCK, DEFAULT_CHK_INTERVAL, DEFAULT_CACHE_PCT_FREE );
}
/**
* @brief Insert into LRU list
*/
EXPORT static const int insertLRUList( CommBlock& cb, const uint64_t lbid, const uint64_t fbo, const unsigned char* buf );
static const int insertLRUList( CommBlock& cb, const uint64_t lbid, const uint64_t fbo, const DataBlock& block ) { return insertLRUList( cb, lbid, fbo, block.data ); }
/**
* @brief Insert into LRU list
*/
EXPORT static const int insertLRUList( CommBlock& cb, const uint64_t lbid, const uint64_t fbo, const unsigned char* buf );
static const int insertLRUList( CommBlock& cb, const uint64_t lbid, const uint64_t fbo, const DataBlock& block )
{
return insertLRUList( cb, lbid, fbo, block.data );
}
/**
* @brief Insert into Write list
*/
/**
* @brief Insert into Write list
*/
// static const int insertWriteList( const CacheKey& key );
/**
* @brief Load cache block to a buffer
*/
static const int loadCacheBlock( const CacheKey& key, DataBlock& block ) { return loadCacheBlock( key, block.data ); }
EXPORT static const int loadCacheBlock( const CacheKey& key, unsigned char* buf );
/**
* @brief Load cache block to a buffer
*/
static const int loadCacheBlock( const CacheKey& key, DataBlock& block )
{
return loadCacheBlock( key, block.data );
}
EXPORT static const int loadCacheBlock( const CacheKey& key, unsigned char* buf );
/**
* @brief Modify a cache block
*/
static const int modifyCacheBlock( const CacheKey& key, const DataBlock& block ) { return modifyCacheBlock( key, block.data ); }
EXPORT static const int modifyCacheBlock( const CacheKey& key, const unsigned char* buf );
/**
* @brief Modify a cache block
*/
static const int modifyCacheBlock( const CacheKey& key, const DataBlock& block )
{
return modifyCacheBlock( key, block.data );
}
EXPORT static const int modifyCacheBlock( const CacheKey& key, const unsigned char* buf );
/**
* @brief Print
*/
EXPORT static void printCacheMapList( const CacheMap* map );
EXPORT static void printCacheList();
/**
* @brief Print
*/
EXPORT static void printCacheMapList( const CacheMap* map );
EXPORT static void printCacheList();
/**
* @brief Insert/Delete an element in cache map
*/
EXPORT static const int processCacheMap( CacheMap* map, BlockBuffer* buffer, OpType opType );
/**
* @brief Insert/Delete an element in cache map
*/
EXPORT static const int processCacheMap( CacheMap* map, BlockBuffer* buffer, OpType opType );
// accessory
static const int getTotalBlock() { return m_cacheParam->totalBlock; }
static const bool getUseCache() { return m_useCache; }
static void setUseCache( const bool flag ) { m_useCache = flag; }
// accessory
static const int getTotalBlock()
{
return m_cacheParam->totalBlock;
}
static const bool getUseCache()
{
return m_useCache;
}
static void setUseCache( const bool flag )
{
m_useCache = flag;
}
static CacheControl* m_cacheParam; // Cache parameters
static FreeBufList* m_freeList; // free buffer list
static CacheMap* m_lruList; // LRU buffer list
static CacheMap* m_writeList; // Write buffer list
static CacheControl* m_cacheParam; // Cache parameters
static FreeBufList* m_freeList; // free buffer list
static CacheMap* m_lruList; // LRU buffer list
static CacheMap* m_writeList; // Write buffer list
#if defined(_MSC_VER) && !defined(WRITEENGINE_DLLEXPORT)
__declspec(dllimport)
__declspec(dllimport)
#endif
EXPORT static bool m_useCache; // Use cache flag
EXPORT static bool m_useCache; // Use cache flag
private:
};

File diff suppressed because it is too large Load Diff

View File

@ -68,10 +68,10 @@ const int UNCOMPRESSED_CHUNK_SIZE = compress::IDBCompressInterface::UNCOMPRESSED
const int COMPRESSED_FILE_HEADER_UNIT = compress::IDBCompressInterface::HDR_BUF_LEN;
// assume UNCOMPRESSED_CHUNK_SIZE > 0xBFFF (49151), 8 * 1024 bytes padding
const int COMPRESSED_CHUNK_SIZE = compress::IDBCompressInterface::maxCompressedSize(UNCOMPRESSED_CHUNK_SIZE) + 64+3 + 8*1024;
const int COMPRESSED_CHUNK_SIZE = compress::IDBCompressInterface::maxCompressedSize(UNCOMPRESSED_CHUNK_SIZE) + 64 + 3 + 8 * 1024;
const int BLOCKS_IN_CHUNK = UNCOMPRESSED_CHUNK_SIZE / BYTE_PER_BLOCK;
const int MAXOFFSET_PER_CHUNK = 511*BYTE_PER_BLOCK;
const int MAXOFFSET_PER_CHUNK = 511 * BYTE_PER_BLOCK;
// chunk information
typedef int64_t ChunkId;
@ -83,19 +83,22 @@ struct ChunkData
bool fWriteToFile;
ChunkData(ChunkId id = 0) : fChunkId(id), fLenUnCompressed(0), fWriteToFile(false) {}
bool operator < (const ChunkData& rhs) const { return fChunkId < rhs.fChunkId; }
bool operator < (const ChunkData& rhs) const
{
return fChunkId < rhs.fChunkId;
}
};
// compressed DB file header information
struct CompFileHeader
{
char fHeaderData[COMPRESSED_FILE_HEADER_UNIT * 2];
char *fControlData;
char *fPtrSection;
char* fControlData;
char* fPtrSection;
boost::scoped_array<char> fLongPtrSectData;
CompFileHeader() :
fControlData(fHeaderData), fPtrSection(fHeaderData+COMPRESSED_FILE_HEADER_UNIT) {}
fControlData(fHeaderData), fPtrSection(fHeaderData + COMPRESSED_FILE_HEADER_UNIT) {}
};
@ -111,15 +114,19 @@ struct FileID
fFid(f), fDbRoot(r), fPartition(p), fSegment(s) {}
bool operator < (const FileID& rhs) const
{ return (
(fFid < rhs.fFid) ||
(fFid == rhs.fFid && fDbRoot < rhs.fDbRoot) ||
(fFid == rhs.fFid && fDbRoot == rhs.fDbRoot && fPartition < rhs.fPartition) ||
(fFid == rhs.fFid && fDbRoot == rhs.fDbRoot && fPartition == rhs.fPartition && fSegment < rhs.fSegment)); }
{
return (
(fFid < rhs.fFid) ||
(fFid == rhs.fFid && fDbRoot < rhs.fDbRoot) ||
(fFid == rhs.fFid && fDbRoot == rhs.fDbRoot && fPartition < rhs.fPartition) ||
(fFid == rhs.fFid && fDbRoot == rhs.fDbRoot && fPartition == rhs.fPartition && fSegment < rhs.fSegment));
}
bool operator == (const FileID& rhs) const
{ return (
fFid == rhs.fFid && fDbRoot == rhs.fDbRoot && fPartition == rhs.fPartition && fSegment == rhs.fSegment); }
{
return (
fFid == rhs.fFid && fDbRoot == rhs.fDbRoot && fPartition == rhs.fPartition && fSegment == rhs.fSegment);
}
};
@ -129,8 +136,8 @@ class CompFileData
{
public:
CompFileData(const FileID& id, const FID& fid, const execplan::CalpontSystemCatalog::ColDataType colDataType, int colWidth) :
fFileID(id), fFid(fid), fColDataType(colDataType), fColWidth(colWidth), fDctnryCol(false),
fFilePtr(NULL), fIoBSize(0) {}
fFileID(id), fFid(fid), fColDataType(colDataType), fColWidth(colWidth), fDctnryCol(false),
fFilePtr(NULL), fIoBSize(0) {}
ChunkData* findChunk(int64_t cid) const;
@ -164,34 +171,34 @@ public:
// @brief Retrieve a file pointer in the chunk manager.
// for column file
IDBDataFile* getFilePtr(const Column& column,
uint16_t root,
uint32_t partition,
uint16_t segment,
std::string& filename,
const char* mode,
int size,
bool useTmpSuffix) const;
uint16_t root,
uint32_t partition,
uint16_t segment,
std::string& filename,
const char* mode,
int size,
bool useTmpSuffix) const;
// @brief Retrieve a file pointer in the chunk manager.
// for dictionary file
IDBDataFile* getFilePtr(const FID& fid,
uint16_t root,
uint32_t partition,
uint16_t segment,
std::string& filename,
const char* mode,
int size,
bool useTmpSuffix) const;
uint16_t root,
uint32_t partition,
uint16_t segment,
std::string& filename,
const char* mode,
int size,
bool useTmpSuffix) const;
// @brief Create a compressed dictionary file with an appropriate header.
IDBDataFile* createDctnryFile(const FID& fid,
int64_t width,
uint16_t root,
uint32_t partition,
uint16_t segment,
const char* filename,
const char* mode,
int size);
int64_t width,
uint16_t root,
uint32_t partition,
uint16_t segment,
const char* filename,
const char* mode,
int size);
// @brief Read a block from pFile at offset fbo.
// The data may copied from memory if the chunk it belongs to is already available.
@ -202,10 +209,10 @@ public:
int saveBlock(IDBDataFile* pFile, const unsigned char* writeBuf, uint64_t fbo);
// @brief Write all active chunks to disk, and reset all repository.
EXPORT int flushChunks(int rc, const std::map<FID, FID> & columOids);
EXPORT int flushChunks(int rc, const std::map<FID, FID>& columOids);
// @brief Reset all repository without writing anything to disk.
void cleanUp(const std::map<FID, FID> & columOids);
void cleanUp(const std::map<FID, FID>& columOids);
// @brief Expand an initial column, not dictionary, extent to a full extent.
int expandAbbrevColumnExtent(IDBDataFile* pFile, uint64_t emptyVal, int width);
@ -232,44 +239,59 @@ public:
// @brief Control the number of active chunks being stored in memory
void setMaxActiveChunkNum(unsigned int maxActiveChunkNum)
{ fMaxActiveChunkNum = maxActiveChunkNum; }
{
fMaxActiveChunkNum = maxActiveChunkNum;
}
// @brief Use this flag to avoid logging and backing up chunks, tmp files.
void setBulkFlag(bool isBulkLoad)
{ fIsBulkLoad = isBulkLoad; }
{
fIsBulkLoad = isBulkLoad;
}
// @brief Use this flag to flush chunk when is full.
void setIsInsert(bool isInsert) { fIsInsert = isInsert; }
bool getIsInsert() { return fIsInsert; }
void setIsInsert(bool isInsert)
{
fIsInsert = isInsert;
}
bool getIsInsert()
{
return fIsInsert;
}
void setTransId(const TxnID& transId) { fTransId = transId; }
void setTransId(const TxnID& transId)
{
fTransId = transId;
}
// @brief bug5504, Use non transactional DML for InfiniDB with HDFS
EXPORT int startTransaction(const TxnID& transId) const;
EXPORT int confirmTransaction(const TxnID& transId) const;
EXPORT int endTransaction(const TxnID& transId, bool success) const;
// @brief Use this flag to fix bad chunk.
// @brief Use this flag to fix bad chunk.
void setFixFlag(bool isFix)
{ fIsFix = isFix; }
{
fIsFix = isFix;
}
EXPORT int checkFixLastDictChunk(const FID& fid,
uint16_t root,
uint32_t partition,
uint16_t segment);
EXPORT int checkFixLastDictChunk(const FID& fid,
uint16_t root,
uint32_t partition,
uint16_t segment);
protected:
// @brief Retrieve pointer to a compressed DB file.
CompFileData* getFileData(const FID& fid,
uint16_t root,
uint32_t partition,
uint16_t segment,
std::string& filename,
const char* mode,
int size,
const execplan::CalpontSystemCatalog::ColDataType colDataType,
int colWidth,
bool useTmpSuffix,
bool dictnry = false) const;
uint16_t root,
uint32_t partition,
uint16_t segment,
std::string& filename,
const char* mode,
int size,
const execplan::CalpontSystemCatalog::ColDataType colDataType,
int colWidth,
bool useTmpSuffix,
bool dictnry = false) const;
// @brief Retrieve a chunk of pFile from disk.
int fetchChunkFromFile(IDBDataFile* pFile, int64_t id, ChunkData*& chunkData);
@ -288,7 +310,7 @@ protected:
// @brief open a compressed DB file.
int openFile(CompFileData* fileData, const char* mode, int colWidth,
bool useTmpSuffix, int ln) const;
bool useTmpSuffix, int ln) const;
// @brief set offset in a compressed DB file from beginning.
int setFileOffset(IDBDataFile* pFile, const std::string& fileName, off64_t offset, int ln) const;
@ -316,12 +338,12 @@ protected:
int verifyChunksAfterRealloc(CompFileData* fileData);
// @brief log a message to the syslog
void logMessage(int code, int level, int lineNum, int fromLine=-1) const;
void logMessage(int code, int level, int lineNum, int fromLine = -1) const;
void logMessage(const std::string& msg, int level) const;
// @brief Write a DML recovery log
int writeLog(TxnID txnId, std::string backUpFileType, std::string filename,
std::string &aDMLLogFileName, int64_t size=0, int64_t offset=0) const;
std::string& aDMLLogFileName, int64_t size = 0, int64_t offset = 0) const;
// @brief remove DML recovery logs
int removeBackups(TxnID txnId);
@ -350,8 +372,8 @@ protected:
TxnID fTransId;
int fLocalModuleId;
idbdatafile::IDBFileSystem& fFs;
bool fIsFix;
bool fIsFix;
private:
};

View File

@ -41,42 +41,42 @@ using namespace idbdatafile;
namespace WriteEngine
{
const int DEFAULT_WAIT_PERIOD = 10;
const unsigned DEFAULT_FILES_PER_COLUMN_PARTITION = 4;
const unsigned DEFAULT_EXTENTS_PER_SEGMENT_FILE = 2;
const int DEFAULT_BULK_PROCESS_PRIORITY = -1;
const unsigned DEFAULT_MAX_FILESYSTEM_DISK_USAGE = 98; // allow 98% full
const unsigned DEFAULT_COMPRESSED_PADDING_BLKS = 1;
const int DEFAULT_LOCAL_MODULE_ID = 1;
const bool DEFAULT_PARENT_OAM = true;
const char* DEFAULT_LOCAL_MODULE_TYPE = "pm";
const int DEFAULT_WAIT_PERIOD = 10;
const unsigned DEFAULT_FILES_PER_COLUMN_PARTITION = 4;
const unsigned DEFAULT_EXTENTS_PER_SEGMENT_FILE = 2;
const int DEFAULT_BULK_PROCESS_PRIORITY = -1;
const unsigned DEFAULT_MAX_FILESYSTEM_DISK_USAGE = 98; // allow 98% full
const unsigned DEFAULT_COMPRESSED_PADDING_BLKS = 1;
const int DEFAULT_LOCAL_MODULE_ID = 1;
const bool DEFAULT_PARENT_OAM = true;
const char* DEFAULT_LOCAL_MODULE_TYPE = "pm";
int Config::m_dbRootCount = 0;
Config::strvec_t Config::m_dbRootPath;
Config::intstrmap_t Config::m_dbRootPathMap;
Config::uint16vec_t Config::m_dbRootId;
string Config::m_bulkRoot;
int Config::m_dbRootCount = 0;
Config::strvec_t Config::m_dbRootPath;
Config::intstrmap_t Config::m_dbRootPathMap;
Config::uint16vec_t Config::m_dbRootId;
string Config::m_bulkRoot;
unsigned long Config::fDBRootChangeCount = 0;
time_t Config::fCacheTime = 0;
boost::mutex Config::fCacheLock;
unsigned long Config::fDBRootChangeCount = 0;
time_t Config::fCacheTime = 0;
boost::mutex Config::fCacheLock;
#ifdef SHARED_NOTHING_DEMO_2
boost::mutex Config::m_bulkRoot_lk;
boost::mutex Config::m_bulkRoot_lk;
#endif
int Config::m_WaitPeriod = DEFAULT_WAIT_PERIOD;
unsigned Config::m_FilesPerColumnPartition =
DEFAULT_FILES_PER_COLUMN_PARTITION;
unsigned Config::m_ExtentsPerSegmentFile =
DEFAULT_EXTENTS_PER_SEGMENT_FILE;
int Config::m_BulkProcessPriority = DEFAULT_BULK_PROCESS_PRIORITY;
string Config::m_BulkRollbackDir;
unsigned Config::m_MaxFileSystemDiskUsage =
DEFAULT_MAX_FILESYSTEM_DISK_USAGE;
unsigned Config::m_NumCompressedPadBlks =DEFAULT_COMPRESSED_PADDING_BLKS;
bool Config::m_ParentOAMModuleFlag = DEFAULT_PARENT_OAM;
string Config::m_LocalModuleType;
int Config::m_LocalModuleID = DEFAULT_LOCAL_MODULE_ID;
string Config::m_VersionBufferDir;
int Config::m_WaitPeriod = DEFAULT_WAIT_PERIOD;
unsigned Config::m_FilesPerColumnPartition =
DEFAULT_FILES_PER_COLUMN_PARTITION;
unsigned Config::m_ExtentsPerSegmentFile =
DEFAULT_EXTENTS_PER_SEGMENT_FILE;
int Config::m_BulkProcessPriority = DEFAULT_BULK_PROCESS_PRIORITY;
string Config::m_BulkRollbackDir;
unsigned Config::m_MaxFileSystemDiskUsage =
DEFAULT_MAX_FILESYSTEM_DISK_USAGE;
unsigned Config::m_NumCompressedPadBlks = DEFAULT_COMPRESSED_PADDING_BLKS;
bool Config::m_ParentOAMModuleFlag = DEFAULT_PARENT_OAM;
string Config::m_LocalModuleType;
int Config::m_LocalModuleID = DEFAULT_LOCAL_MODULE_ID;
string Config::m_VersionBufferDir;
/*******************************************************************************
* DESCRIPTION:
@ -117,13 +117,14 @@ void Config::checkReload( )
// Initialize bulk root directory
//--------------------------------------------------------------------------
m_bulkRoot = cf->getConfig("WriteEngine", "BulkRoot");
if ( m_bulkRoot.length() == 0 )
{
m_bulkRoot = startup::StartUp::installDir();
#ifndef _MSC_VER
m_bulkRoot += "/data";
m_bulkRoot += "/data";
#endif
m_bulkRoot += "/bulk";
m_bulkRoot += "/bulk";
}
// Get latest Columnstore.xml timestamp after first access forced a reload
@ -134,15 +135,17 @@ void Config::checkReload( )
//--------------------------------------------------------------------------
m_WaitPeriod = DEFAULT_WAIT_PERIOD;
string waitPeriodStr = cf->getConfig("SystemConfig", "WaitPeriod");
if ( waitPeriodStr.length() != 0 )
m_WaitPeriod = static_cast<int>(config::Config::fromText(
waitPeriodStr));
waitPeriodStr));
//--------------------------------------------------------------------------
// Initialize files per column partition
//--------------------------------------------------------------------------
m_FilesPerColumnPartition = DEFAULT_FILES_PER_COLUMN_PARTITION;
string fpc = cf->getConfig("ExtentMap", "FilesPerColumnPartition");
if ( fpc.length() != 0 )
m_FilesPerColumnPartition = cf->uFromText(fpc);
@ -151,6 +154,7 @@ void Config::checkReload( )
//--------------------------------------------------------------------------
m_ExtentsPerSegmentFile = DEFAULT_EXTENTS_PER_SEGMENT_FILE;
string epsf = cf->getConfig("ExtentMap", "ExtentsPerSegmentFile");
if ( epsf.length() != 0 )
m_ExtentsPerSegmentFile = cf->uFromText(epsf);
@ -159,11 +163,12 @@ void Config::checkReload( )
//--------------------------------------------------------------------------
m_BulkProcessPriority = DEFAULT_BULK_PROCESS_PRIORITY;
string prior = cf->getConfig("WriteEngine", "Priority");
if ( prior.length() != 0 )
{
int initialBPP = cf->fromText(prior);
// config file priority is 40..1 (highest..lowest)
// config file priority is 40..1 (highest..lowest)
// convert config file value to setpriority(2) value(-20..19, -1 is the
// default)
if (initialBPP > 0)
@ -181,6 +186,7 @@ void Config::checkReload( )
// that sets m_bulkRoot.
//--------------------------------------------------------------------------
m_BulkRollbackDir = cf->getConfig("WriteEngine", "BulkRollbackDir");
if (m_BulkRollbackDir.length() == 0)
{
m_BulkRollbackDir.assign( m_bulkRoot );
@ -192,8 +198,10 @@ void Config::checkReload( )
//--------------------------------------------------------------------------
m_MaxFileSystemDiskUsage = DEFAULT_MAX_FILESYSTEM_DISK_USAGE;
string usg = cf->getConfig("WriteEngine", "MaxFileSystemDiskUsagePct");
if ( usg.length() != 0 )
m_MaxFileSystemDiskUsage = cf->uFromText(usg);
if (m_MaxFileSystemDiskUsage > 100)
m_MaxFileSystemDiskUsage = DEFAULT_MAX_FILESYSTEM_DISK_USAGE;
@ -202,6 +210,7 @@ void Config::checkReload( )
//--------------------------------------------------------------------------
m_NumCompressedPadBlks = DEFAULT_COMPRESSED_PADDING_BLKS;
string ncpb = cf->getConfig("WriteEngine", "CompressedPaddingBlocks");
if ( ncpb.length() != 0 )
m_NumCompressedPadBlks = cf->uFromText(ncpb);
@ -211,10 +220,11 @@ void Config::checkReload( )
//--------------------------------------------------------------------------
bool idblog = false;
string idblogstr = cf->getConfig("SystemConfig", "DataFileLog");
if ( idblogstr.length() != 0 )
{
boost::to_upper(idblogstr);
idblog = ( idblogstr == "ON" );
boost::to_upper(idblogstr);
idblog = ( idblogstr == "ON" );
}
//--------------------------------------------------------------------------
@ -222,6 +232,7 @@ void Config::checkReload( )
// then the system will use HDFS for all IDB data files
//--------------------------------------------------------------------------
string fsplugin = cf->getConfig("SystemConfig", "DataFilePlugin");
if ( fsplugin.length() != 0 )
{
IDBPolicy::installPlugin(fsplugin);
@ -234,6 +245,7 @@ void Config::checkReload( )
bool bUseRdwrMemBuffer = true; // If true, use in-memory buffering, else use file buffering
int64_t hdfsRdwrBufferMaxSize = 0;
string strBufferMaxSize = cf->getConfig("SystemConfig", "hdfsRdwrBufferMaxSize");
if (strBufferMaxSize.length() == 0)
{
// Default is use membuf with no maximum size.
@ -242,6 +254,7 @@ void Config::checkReload( )
else
{
hdfsRdwrBufferMaxSize = static_cast<int64_t>(cf->uFromText(strBufferMaxSize));
if ( hdfsRdwrBufferMaxSize == 0 )
{
// If we're given a size of 0, turn off membuffering.
@ -251,15 +264,16 @@ void Config::checkReload( )
// Directory in which to place file buffer temporary files.
string hdfsRdwrScratch = cf->getConfig("SystemConfig", "hdfsRdwrScratch");
if ( hdfsRdwrScratch.length() == 0 )
{
hdfsRdwrScratch = "/tmp/hdfsscratch";
}
IDBPolicy::init( idblog, bUseRdwrMemBuffer, hdfsRdwrScratch, hdfsRdwrBufferMaxSize );
IDBPolicy::init( idblog, bUseRdwrMemBuffer, hdfsRdwrScratch, hdfsRdwrBufferMaxSize );
#endif
IDBPolicy::configIDBPolicy();
IDBPolicy::configIDBPolicy();
//--------------------------------------------------------------------------
// Initialize Parent OAM Module flag
@ -268,13 +282,16 @@ void Config::checkReload( )
//--------------------------------------------------------------------------
oam::Oam oam;
oam::oamModuleInfo_t t;
try {
try
{
t = oam.getModuleInfo();
m_ParentOAMModuleFlag = boost::get<4>(t);
m_LocalModuleType = boost::get<1>(t);
m_LocalModuleID = boost::get<2>(t);
}
catch (exception&) {
catch (exception&)
{
m_ParentOAMModuleFlag = DEFAULT_PARENT_OAM;
m_LocalModuleType.assign( DEFAULT_LOCAL_MODULE_TYPE );
m_LocalModuleID = DEFAULT_LOCAL_MODULE_ID;
@ -284,6 +301,7 @@ void Config::checkReload( )
// Initialize Version Buffer
//--------------------------------------------------------------------------
m_VersionBufferDir = cf->getConfig("SystemConfig", "DBRMRoot");
if ( m_VersionBufferDir.length() == 0 )
{
#ifdef _MSC_VER
@ -310,25 +328,28 @@ void Config::checkReload( )
if (m_LocalModuleType == "pm")
{
oam::DBRootConfigList oamRootList;
try {
try
{
oam.getPmDbrootConfig( m_LocalModuleID, oamRootList );
std::sort( oamRootList.begin(), oamRootList.end() );
m_dbRootCount = oamRootList.size();
for (unsigned int idx=0; idx<oamRootList.size(); idx++)
for (unsigned int idx = 0; idx < oamRootList.size(); idx++)
{
ostringstream oss;
oss << "DBRoot" << oamRootList[idx];
std::string DbRootPath =
std::string DbRootPath =
cf->getConfig("SystemConfig", oss.str());
m_dbRootPath.push_back( DbRootPath );
m_dbRootPathMap[ oamRootList[idx] ] = DbRootPath;
m_dbRootId.push_back( oamRootList[idx] );
}
}
catch (exception&) {
catch (exception&)
{
m_dbRootCount = 0;
}
}
@ -341,7 +362,7 @@ void Config::checkReload( )
if (!bFirstLoad)
{
if ((dbRootIdPrevious != m_dbRootId) ||
(dbRootPathPrevious != m_dbRootPath))
(dbRootPathPrevious != m_dbRootPath))
{
fDBRootChangeCount++;
}
@ -380,7 +401,7 @@ size_t Config::DBRootCount()
/*******************************************************************************
* DESCRIPTION:
* Get db root
* Get db root
* PARAMETERS:
* idx - Index of the DBRootn entry to fetch (0 fetches DBRoot[0],etc.)
* RETURN:
@ -419,7 +440,7 @@ void Config::getDBRootPathList( std::vector<std::string>& dbRootPathList )
/*******************************************************************************
* DESCRIPTION:
* Get db root
* Get db root
* PARAMETERS:
* num - DBRootN entry to fetch (1 fetches DBRoot1, etc.)
* RETURN:
@ -431,13 +452,14 @@ std::string Config::getDBRootByNum(unsigned num)
checkReload( );
Config::intstrmap_t::const_iterator iter = m_dbRootPathMap.find( num );
if (iter == m_dbRootPathMap.end())
{
std::string emptyResult;
return emptyResult;
}
return iter->second;
return iter->second;
}
/*******************************************************************************
@ -458,13 +480,13 @@ void Config::getRootIdList( std::vector<uint16_t>& rootIds )
/*******************************************************************************
* DESCRIPTION:
* Get bulk root
* Get bulk root
* PARAMETERS:
* none
* none
* RETURN:
* NO_ERROR if success, other otherwise
******************************************************************************/
std::string Config::getBulkRoot()
std::string Config::getBulkRoot()
{
boost::mutex::scoped_lock lk(fCacheLock);
checkReload( );
@ -473,13 +495,13 @@ std::string Config::getBulkRoot()
}
#ifdef SHARED_NOTHING_DEMO_2
void Config::getSharedNothingRoot(char *ret)
void Config::getSharedNothingRoot(char* ret)
{
string root;
boost::mutex::scoped_lock lk(m_bulkRoot_lk);
root = config::Config::makeConfig()->getConfig(
"WriteEngine", "SharedNothingRoot");
"WriteEngine", "SharedNothingRoot");
strncpy(ret, root.c_str(), FILE_NAME_SIZE);
}
#endif
@ -530,7 +552,7 @@ unsigned Config::getExtentsPerSegmentFile()
checkReload( );
return m_ExtentsPerSegmentFile;
}
}
/*******************************************************************************
* DESCRIPTION:
@ -559,7 +581,7 @@ int Config::getBulkProcessPriority()
* DESCRIPTION:
* Get bulk rollback directory path.
* PARAMETERS:
* none
* none
******************************************************************************/
std::string Config::getBulkRollbackDir()
{
@ -573,7 +595,7 @@ std::string Config::getBulkRollbackDir()
* DESCRIPTION:
* Get Max percentage of allowable file system disk usage for each DBRoot
* PARAMETERS:
* none
* none
******************************************************************************/
unsigned Config::getMaxFileSystemDiskUsage()
{
@ -588,7 +610,7 @@ unsigned Config::getMaxFileSystemDiskUsage()
* Get number of blocks to use in padding each compressed chunk (only
* applies to compressed columns).
* PARAMETERS:
* none
* none
******************************************************************************/
unsigned Config::getNumCompressedPadBlks()
{
@ -642,13 +664,13 @@ uint16_t Config::getLocalModuleID()
/*******************************************************************************
* DESCRIPTION:
* Get version buffer root
* Get version buffer root
* PARAMETERS:
* none
* none
* RETURN:
* NO_ERROR if success, other otherwise
******************************************************************************/
std::string Config::getVBRoot()
std::string Config::getVBRoot()
{
boost::mutex::scoped_lock lk(fCacheLock);
checkReload( );
@ -668,6 +690,7 @@ std::string Config::getVBRoot()
bool Config::hasLocalDBRootListChanged()
{
boost::mutex::scoped_lock lk(fCacheLock);
if (fDBRootChangeCount > 0)
{
fDBRootChangeCount = 0;

View File

@ -44,7 +44,7 @@ namespace WriteEngine
{
/** Class Config */
class Config
class Config
{
public:
/**
@ -82,7 +82,7 @@ public:
EXPORT static void getRootIdList( std::vector<uint16_t>& dbRootIds );
#ifdef SHARED_NOTHING_DEMO_2
EXPORT static void getSharedNothingRoot(char *); // pass in an char[FILE_NAME_SIZE]
EXPORT static void getSharedNothingRoot(char*); // pass in an char[FILE_NAME_SIZE]
#endif
/**
@ -99,7 +99,7 @@ public:
* @brief Wait Period
*/
EXPORT static int getWaitPeriod();
/**
* @brief FilesPerColumnPartition
*/
@ -150,7 +150,7 @@ public:
* @brief Version Buffer root
*/
EXPORT static std::string getVBRoot();
/**
* @brief Cache the config parameters locally
* Initialize Config cache. Cache will be updated as needed.
@ -165,7 +165,7 @@ public:
private:
typedef std::vector<std::string> strvec_t;
typedef std::map<int,std::string> intstrmap_t;
typedef std::map<int, std::string> intstrmap_t;
typedef std::vector<uint16_t> uint16vec_t;
static void checkReload();
@ -191,7 +191,7 @@ private:
static bool m_ParentOAMModuleFlag; // are we running on parent PM
static std::string m_LocalModuleType; // local node type (ex: "pm")
static int m_LocalModuleID; // local node id (ex: 1 )
static std::string m_VersionBufferDir; // Version buffer directory
static std::string m_VersionBufferDir; // Version buffer directory
};
} //end of namespace

View File

@ -35,7 +35,7 @@
namespace
{
const int BUF_SIZE = 1024; // size of buffer used to read meta data records
const int BUF_SIZE = 1024; // size of buffer used to read meta data records
}
namespace WriteEngine
@ -50,8 +50,8 @@ namespace WriteEngine
//------------------------------------------------------------------------------
ConfirmHdfsDbFile::ConfirmHdfsDbFile() :
fFs( (idbdatafile::IDBPolicy::useHdfs()) ?
idbdatafile::IDBFileSystem::getFs(idbdatafile::IDBDataFile::HDFS) :
idbdatafile::IDBFileSystem::getFs(idbdatafile::IDBDataFile::BUFFERED))
idbdatafile::IDBFileSystem::getFs(idbdatafile::IDBDataFile::HDFS) :
idbdatafile::IDBFileSystem::getFs(idbdatafile::IDBDataFile::BUFFERED))
{
}
@ -92,6 +92,7 @@ int ConfirmHdfsDbFile::confirmDbFileChange(
// add safety checks, just in case
std::string tmp(filename + ".tmp");
if (!fFs.exists(tmp.c_str())) // file already swapped
return rc;
@ -108,8 +109,9 @@ int ConfirmHdfsDbFile::confirmDbFileChange(
// remove the old orig if exists
std::string orig(filename + ".orig");
errno = 0;
if ((fFs.exists(orig.c_str())) &&
(fFs.remove(orig.c_str())) != 0)
(fFs.remove(orig.c_str())) != 0)
{
int errNum = errno;
std::ostringstream oss;
@ -122,6 +124,7 @@ int ConfirmHdfsDbFile::confirmDbFileChange(
// backup the original
errno = 0;
if (fFs.rename(filename.c_str(), orig.c_str()) != 0)
{
int errNum = errno;
@ -136,6 +139,7 @@ int ConfirmHdfsDbFile::confirmDbFileChange(
// rename the new file
errno = 0;
if (fFs.rename(tmp.c_str(), filename.c_str()) != 0)
{
int errNum = errno;
@ -169,6 +173,7 @@ int ConfirmHdfsDbFile::endDbFileChange(
if (backUpFileType.compare("rlc") == 0)
{
std::string rlc(filename + ".rlc");
if (fFs.exists(rlc.c_str()))
fFs.remove(rlc.c_str()); // TBD-okay to ignore failed removal?
@ -187,12 +192,14 @@ int ConfirmHdfsDbFile::endDbFileChange(
}
std::string orig(filename + ".orig");
if (success)
{
// remove the orig file
errno = 0;
if ((fFs.exists(orig.c_str())) &&
(fFs.remove(orig.c_str())) != 0)
(fFs.remove(orig.c_str())) != 0)
{
int errNum = errno;
std::ostringstream oss;
@ -209,9 +216,10 @@ int ConfirmHdfsDbFile::endDbFileChange(
if (fFs.exists(orig.c_str()))
{
errno = 0;
// Try to remove file only if it exists
if ((fFs.exists(filename.c_str())) &&
(fFs.remove(filename.c_str()) != 0))
(fFs.remove(filename.c_str()) != 0))
{
int errNum = errno;
std::ostringstream oss;
@ -224,6 +232,7 @@ int ConfirmHdfsDbFile::endDbFileChange(
}
errno = 0;
if (fFs.rename(orig.c_str(), filename.c_str()) != 0)
{
int errNum = errno;
@ -240,8 +249,9 @@ int ConfirmHdfsDbFile::endDbFileChange(
// remove the tmp file
std::string tmp(filename + ".tmp");
errno = 0;
if ((fFs.exists(tmp.c_str())) &&
(fFs.remove(tmp.c_str())) != 0)
(fFs.remove(tmp.c_str())) != 0)
{
int errNum = errno;
std::ostringstream oss;
@ -256,8 +266,9 @@ int ConfirmHdfsDbFile::endDbFileChange(
// remove the chunk shifting helper
std::string rlc(filename + ".rlc");
errno = 0;
if ((fFs.exists(rlc.c_str())) &&
(fFs.remove(rlc.c_str())) != 0)
(fFs.remove(rlc.c_str())) != 0)
{
int errNum = errno;
std::ostringstream oss;
@ -288,11 +299,11 @@ int ConfirmHdfsDbFile::confirmDbFileListFromMetaFile(
std::vector<uint16_t> dbRoots;
Config::getRootIdList( dbRoots );
for (unsigned m=0; m<dbRoots.size(); m++)
for (unsigned m = 0; m < dbRoots.size(); m++)
{
std::istringstream metaDataStream;
openMetaDataFile ( tableOID,
dbRoots[m], metaDataStream );
dbRoots[m], metaDataStream );
confirmDbFiles( metaDataStream );
}
@ -311,7 +322,7 @@ int ConfirmHdfsDbFile::confirmDbFileListFromMetaFile(
oss << "Error confirming changes to table " << tableOID <<
"; " << ex.what();
errMsg = oss.str();
rc = ERR_UNKNOWN;
rc = ERR_UNKNOWN;
}
return rc;
@ -359,9 +370,10 @@ void ConfirmHdfsDbFile::confirmColumnDbFile(const char* inBuf) const
// Read meta-data record
int numFields = sscanf(inBuf, "%s %u %u %u %u %u %d %s %u %d",
recType, &columnOID,
&dbRootHwm, &partNumHwm, &segNumHwm, &lastLocalHwm,
&colTypeInt, colTypeName, &colWidth, &compressionType );
recType, &columnOID,
&dbRootHwm, &partNumHwm, &segNumHwm, &lastLocalHwm,
&colTypeInt, colTypeName, &colWidth, &compressionType );
if (numFields < 9) // compressionType is optional
{
std::ostringstream oss;
@ -375,10 +387,11 @@ void ConfirmHdfsDbFile::confirmColumnDbFile(const char* inBuf) const
char dbFileName[FILE_NAME_SIZE];
FileOp dbFile(false);
int rc = dbFile.getFileName( columnOID,
dbFileName,
dbRootHwm,
partNumHwm,
segNumHwm );
dbFileName,
dbRootHwm,
partNumHwm,
segNumHwm );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -396,8 +409,9 @@ void ConfirmHdfsDbFile::confirmColumnDbFile(const char* inBuf) const
// Confirm the changes to the DB file name
std::string errMsg;
rc = confirmDbFileChange( std::string("tmp"),
dbFileName,
errMsg );
dbFileName,
errMsg );
if (rc != NO_ERROR)
{
throw WeException( errMsg, rc );
@ -421,8 +435,9 @@ void ConfirmHdfsDbFile::confirmDctnryStoreDbFile(const char* inBuf) const
// Read meta-data record
int numFields = sscanf(inBuf, "%s %u %u %u %u %u %u %d",
recType, &dColumnOID, &dStoreOID,
&dbRootHwm, &partNumHwm, &segNumHwm, &localHwm, &compressionType );
recType, &dColumnOID, &dStoreOID,
&dbRootHwm, &partNumHwm, &segNumHwm, &localHwm, &compressionType );
if (numFields < 7) // compressionType optional
{
std::ostringstream oss;
@ -436,15 +451,16 @@ void ConfirmHdfsDbFile::confirmDctnryStoreDbFile(const char* inBuf) const
char dbFileName[FILE_NAME_SIZE];
FileOp dbFile(false);
int rc = dbFile.getFileName( dStoreOID,
dbFileName,
dbRootHwm,
partNumHwm,
segNumHwm );
dbFileName,
dbRootHwm,
partNumHwm,
segNumHwm );
if (rc != NO_ERROR)
{
WErrorCodes ec;
std::ostringstream oss;
oss<<"Error constructing dictionary store filename to confirm changes"<<
oss << "Error constructing dictionary store filename to confirm changes" <<
"; columnOID-" << dStoreOID <<
"; dbRoot-" << dbRootHwm <<
"; partNum-" << partNumHwm <<
@ -457,8 +473,9 @@ void ConfirmHdfsDbFile::confirmDctnryStoreDbFile(const char* inBuf) const
// Confirm the changes to the DB file name
std::string errMsg;
rc = confirmDbFileChange( std::string("tmp"),
dbFileName,
errMsg );
dbFileName,
errMsg );
if (rc != NO_ERROR)
{
throw WeException( errMsg, rc );
@ -480,14 +497,15 @@ int ConfirmHdfsDbFile::endDbFileListFromMetaFile(
std::vector<uint16_t> dbRoots;
Config::getRootIdList( dbRoots );
for (unsigned m=0; m<dbRoots.size(); m++)
for (unsigned m = 0; m < dbRoots.size(); m++)
{
std::istringstream metaDataStream;
try
{
std::istringstream metaDataStream;
openMetaDataFile ( tableOID,
dbRoots[m], metaDataStream );
dbRoots[m], metaDataStream );
endDbFiles( metaDataStream, success );
}
@ -519,7 +537,7 @@ int ConfirmHdfsDbFile::endDbFileListFromMetaFile(
oss << "Error deleting temp files for table " << tableOID <<
"; " << ex.what();
errMsg = oss.str();
rc = ERR_UNKNOWN;
rc = ERR_UNKNOWN;
}
else
{
@ -573,18 +591,20 @@ void ConfirmHdfsDbFile::endDbFiles(
{
errMsg += "; ";
}
errMsg += ex.what();
}
catch (std::exception& ex)
{
if (errMsg.size() == 0)
{
rc = ERR_UNKNOWN;
rc = ERR_UNKNOWN;
}
else
{
errMsg += "; ";
}
errMsg += ex.what();
}
}
@ -617,9 +637,10 @@ void ConfirmHdfsDbFile::endColumnDbFile(
// Read meta-data record
int numFields = sscanf(inBuf, "%s %u %u %u %u %u %d %s %u %d",
recType, &columnOID,
&dbRootHwm, &partNumHwm, &segNumHwm, &lastLocalHwm,
&colTypeInt, colTypeName, &colWidth, &compressionType );
recType, &columnOID,
&dbRootHwm, &partNumHwm, &segNumHwm, &lastLocalHwm,
&colTypeInt, colTypeName, &colWidth, &compressionType );
if (numFields < 9) // compressionType is optional
{
std::ostringstream oss;
@ -633,10 +654,11 @@ void ConfirmHdfsDbFile::endColumnDbFile(
char dbFileName[FILE_NAME_SIZE];
FileOp dbFile(false);
int rc = dbFile.getFileName( columnOID,
dbFileName,
dbRootHwm,
partNumHwm,
segNumHwm );
dbFileName,
dbRootHwm,
partNumHwm,
segNumHwm );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -654,9 +676,10 @@ void ConfirmHdfsDbFile::endColumnDbFile(
// Confirm the changes to the DB file name
std::string errMsg;
rc = endDbFileChange( std::string("tmp"),
dbFileName,
success,
errMsg );
dbFileName,
success,
errMsg );
if (rc != NO_ERROR)
{
throw WeException( errMsg, rc );
@ -682,8 +705,9 @@ void ConfirmHdfsDbFile::endDctnryStoreDbFile(
// Read meta-data record
int numFields = sscanf(inBuf, "%s %u %u %u %u %u %u %d",
recType, &dColumnOID, &dStoreOID,
&dbRootHwm, &partNumHwm, &segNumHwm, &localHwm, &compressionType );
recType, &dColumnOID, &dStoreOID,
&dbRootHwm, &partNumHwm, &segNumHwm, &localHwm, &compressionType );
if (numFields < 7) // compressionType optional
{
std::ostringstream oss;
@ -697,15 +721,16 @@ void ConfirmHdfsDbFile::endDctnryStoreDbFile(
char dbFileName[FILE_NAME_SIZE];
FileOp dbFile(false);
int rc = dbFile.getFileName( dStoreOID,
dbFileName,
dbRootHwm,
partNumHwm,
segNumHwm );
dbFileName,
dbRootHwm,
partNumHwm,
segNumHwm );
if (rc != NO_ERROR)
{
WErrorCodes ec;
std::ostringstream oss;
oss<<"Error constructing dictionary store filename to end changes"<<
oss << "Error constructing dictionary store filename to end changes" <<
"; columnOID-" << dStoreOID <<
"; dbRoot-" << dbRootHwm <<
"; partNum-" << partNumHwm <<
@ -718,9 +743,10 @@ void ConfirmHdfsDbFile::endDctnryStoreDbFile(
// Confirm the changes to the DB file name
std::string errMsg;
rc = endDbFileChange( std::string("tmp"),
dbFileName,
success,
errMsg );
dbFileName,
success,
errMsg );
if (rc != NO_ERROR)
{
throw WeException( errMsg, rc );
@ -733,8 +759,8 @@ void ConfirmHdfsDbFile::endDctnryStoreDbFile(
// DataStream argument.
//------------------------------------------------------------------------------
void ConfirmHdfsDbFile::openMetaDataFile(OID tableOID,
uint16_t dbRoot,
std::istringstream& metaDataStream)
uint16_t dbRoot,
std::istringstream& metaDataStream)
{
std::string bulkRollbackPath( Config::getDBRootByNum( dbRoot ) );
@ -758,9 +784,9 @@ void ConfirmHdfsDbFile::openMetaDataFile(OID tableOID,
boost::scoped_ptr<IDBDataFile> metaFile;
errno = 0;
metaFile.reset(idbdatafile::IDBDataFile::open(
idbdatafile::IDBPolicy::getType(fMetaFileName.c_str(),
idbdatafile::IDBPolicy::WRITEENG),
fMetaFileName.c_str(), "rb", 0) );
idbdatafile::IDBPolicy::getType(fMetaFileName.c_str(),
idbdatafile::IDBPolicy::WRITEENG),
fMetaFileName.c_str(), "rb", 0) );
if ( !metaFile )
{
@ -781,17 +807,20 @@ void ConfirmHdfsDbFile::openMetaDataFile(OID tableOID,
ssize_t readSofar = 0; // bytes read so far
ssize_t bytes = 0; // bytes read by one pread
char* p = buf.get();
for (int i = 0; i < 10 && readSofar < metaFileSize; i++)
{
errno = 0;
bytes = metaFile->pread( p+readSofar,
readSofar,
metaFileSize-readSofar);
if (bytes < 0)
break;
errno = 0;
bytes = metaFile->pread( p + readSofar,
readSofar,
metaFileSize - readSofar);
readSofar += bytes;
if (bytes < 0)
break;
readSofar += bytes;
}
if ( readSofar != metaFileSize )
{
int errRc = errno;
@ -810,6 +839,7 @@ void ConfirmHdfsDbFile::openMetaDataFile(OID tableOID,
// read data
metaDataStream.getline( inBuf, BUF_SIZE );
if (!RBMetaWriter::verifyVersion4(inBuf))
{
std::ostringstream oss;

View File

@ -60,8 +60,8 @@ public:
* @return Returns NO_ERROR if call is successful
*/
EXPORT int confirmDbFileChange( const std::string& backUpFileType,
const std::string& filename,
std::string& errMsg ) const;
const std::string& filename,
std::string& errMsg ) const;
/** @brief Finalize changes to the specified db file
*
@ -80,9 +80,9 @@ public:
* @return Returns NO_ERROR if call is successful
*/
EXPORT int endDbFileChange( const std::string& backUpFileType,
const std::string& filename,
bool success,
std::string& errMsg ) const;
const std::string& filename,
bool success,
std::string& errMsg ) const;
/** @brief Confirm changes to the db files modified for tableOID
*
@ -94,7 +94,7 @@ public:
* @return Returns NO_ERROR if call is successful
*/
EXPORT int confirmDbFileListFromMetaFile( OID tableOID,
std::string& errMsg );
std::string& errMsg );
/** @brief Finalize changes to the db files modified for tableOID
*
@ -112,13 +112,13 @@ public:
* @return Returns NO_ERROR if call is successful
*/
EXPORT int endDbFileListFromMetaFile( OID tableOID,
bool success,
std::string& errMsg );
bool success,
std::string& errMsg );
private:
void openMetaDataFile( OID tableOID,
uint16_t dbRoot,
std::istringstream& metaDataStream );
uint16_t dbRoot,
std::istringstream& metaDataStream );
void confirmDbFiles( std::istringstream& metaDataStream ) const;
void confirmColumnDbFile( const char* inBuf ) const;

View File

@ -35,7 +35,7 @@ using namespace std;
using namespace execplan;
namespace
{
const char DATE_TIME_FORMAT[] = "%04d-%02d-%02d %02d:%02d:%02d";
const char DATE_TIME_FORMAT[] = "%04d-%02d-%02d %02d:%02d:%02d";
/*******************************************************************************
* DESCRIPTION:
@ -55,9 +55,11 @@ int _doDir(char* pBuffer, int blen, unsigned int val)
if (!pBuffer)
{
rc = -1;
} else {
}
else
{
rc = snprintf(pBuffer, blen, "%03u.dir", val);
pBuffer[blen-1] = (char)0;
pBuffer[blen - 1] = (char)0;
}
return rc;
@ -81,9 +83,11 @@ int _doFile(char* pBuffer, int blen, unsigned char val)
if (!pBuffer)
{
rc = -1;
} else {
}
else
{
rc = snprintf(pBuffer, blen, "FILE%03d.cdf", val);
pBuffer[blen-1] = (char)0;
pBuffer[blen - 1] = (char)0;
}
return rc;
@ -124,21 +128,21 @@ struct Convertor::dmFilePathArgs_t
* time string
******************************************************************************/
/* static */
const std::string Convertor::getTimeStr()
const std::string Convertor::getTimeStr()
{
char buf[sizeof(DATE_TIME_FORMAT)+10] = {0};
char buf[sizeof(DATE_TIME_FORMAT) + 10] = {0};
time_t curTime = time(NULL);
struct tm pTime;
localtime_r(&curTime, &pTime);
localtime_r(&curTime, &pTime);
string timeStr;
snprintf(buf, sizeof(buf), DATE_TIME_FORMAT, pTime.tm_year + 1900,
pTime.tm_mon + 1, pTime.tm_mday,
pTime.tm_hour, pTime.tm_min, pTime.tm_sec);
pTime.tm_mon + 1, pTime.tm_mday,
pTime.tm_hour, pTime.tm_min, pTime.tm_sec);
timeStr = buf;
return timeStr;
return timeStr;
}
/*******************************************************************************
@ -187,6 +191,7 @@ long long Convertor::convertDecimalString(
// Determine the number of digits before and after the decimal point
char* posDecPt = (char*)memchr(field, '.', fieldLength);
if (posDecPt)
{
nDigitsBeforeDecPt = posDecPt - field;
@ -197,8 +202,9 @@ long long Convertor::convertDecimalString(
if (nDigitsAfterDecPt > scale)
{
char roundOffDigit = *(posDecPt + 1 + scale);
if ( (roundOffDigit > '4') &&
(roundOffDigit <='9') ) // round up
(roundOffDigit <= '9') ) // round up
{
roundUp = 1;
@ -207,12 +213,13 @@ long long Convertor::convertDecimalString(
// end up parsing "-0.00", which yields 0; meaning we lose the
// sign. So better (though maybe slower) to look for any lead-
// ing negative sign in the input string.
for (int k=0; k<fieldLength; k++)
for (int k = 0; k < fieldLength; k++)
{
if (!isspace(field[k]))
{
if (field[k] == '-')
roundUp = -1;
break;
}
}
@ -225,12 +232,13 @@ long long Convertor::convertDecimalString(
nDigitsAfterDecPt = 0;
}
// Strip out the decimal point by stringing together
// the digits before and after the decimal point.
char* data = (char*)alloca(nDigitsBeforeDecPt + scale + 1);
memcpy(data, field, nDigitsBeforeDecPt);
if (nDigitsAfterDecPt)
{
// Strip out the decimal point by stringing together
// the digits before and after the decimal point.
char* data = (char*)alloca(nDigitsBeforeDecPt + scale + 1);
memcpy(data, field, nDigitsBeforeDecPt);
if (nDigitsAfterDecPt)
{
if (scale > nDigitsAfterDecPt)
memcpy(data + nDigitsBeforeDecPt,
field + nDigitsBeforeDecPt + 1,
@ -278,10 +286,10 @@ long long Convertor::convertDecimalString(
******************************************************************************/
/* static */
int Convertor::oid2FileName(FID fid,
char* fullFileName,
char dbDirName[][MAX_DB_DIR_NAME_SIZE],
uint32_t partition,
uint16_t segment)
char* fullFileName,
char dbDirName[][MAX_DB_DIR_NAME_SIZE],
uint32_t partition,
uint16_t segment)
{
dmFilePathArgs_t args;
int rc;
@ -315,10 +323,10 @@ int Convertor::oid2FileName(FID fid,
args.FNrc = 0;
RETURN_ON_WE_ERROR(
(rc = dmOid2FPath(fid, partition, segment, &args)),
ERR_DM_CONVERT_OID);
(rc = dmOid2FPath(fid, partition, segment, &args)),
ERR_DM_CONVERT_OID);
sprintf(fullFileName, "%s/%s/%s/%s/%s/%s", args.pDirA,
args.pDirB, args.pDirC, args.pDirD, args.pDirE, args.pFName);
args.pDirB, args.pDirC, args.pDirD, args.pDirE, args.pFName);
strcpy(dbDirName[0], args.pDirA);
strcpy(dbDirName[1], args.pDirB);
@ -347,16 +355,20 @@ void Convertor::mapErrnoToString(int errNum, std::string& errString)
char errnoMsgBuf[1024];
#if STRERROR_R_CHAR_P
char* errnoMsg = strerror_r(errNum, errnoMsgBuf, sizeof(errnoMsgBuf));
if (errnoMsg)
errString = errnoMsg;
else
errString.clear();
#else
int errnoMsg = strerror_r(errNum, errnoMsgBuf, sizeof(errnoMsgBuf));
if (errnoMsg == 0)
errString = errnoMsgBuf;
else
errString.clear();
#endif
}
@ -371,7 +383,7 @@ void Convertor::mapErrnoToString(int errNum, std::string& errString)
******************************************************************************/
/* static */
void Convertor::convertColType(CalpontSystemCatalog::ColDataType dataType,
ColType& internalType, bool isToken)
ColType& internalType, bool isToken)
{
if (isToken)
{
@ -379,53 +391,63 @@ void Convertor::convertColType(CalpontSystemCatalog::ColDataType dataType,
return;
}
switch(dataType) {
switch (dataType)
{
// Map BIT and TINYINT to WR_BYTE
case CalpontSystemCatalog::BIT :
case CalpontSystemCatalog::TINYINT :
internalType = WriteEngine::WR_BYTE; break;
internalType = WriteEngine::WR_BYTE;
break;
// Map SMALLINT to WR_SHORT
case CalpontSystemCatalog::SMALLINT :
internalType = WriteEngine::WR_SHORT; break;
internalType = WriteEngine::WR_SHORT;
break;
// Map MEDINT, INT, and DATE to WR_INT
case CalpontSystemCatalog::MEDINT :
case CalpontSystemCatalog::INT :
case CalpontSystemCatalog::DATE :
internalType = WriteEngine::WR_INT; break;
internalType = WriteEngine::WR_INT;
break;
// Map FLOAT and UFLOAT to WR_FLOAT
case CalpontSystemCatalog::FLOAT :
case CalpontSystemCatalog::UFLOAT:
internalType = WriteEngine::WR_FLOAT; break;
internalType = WriteEngine::WR_FLOAT;
break;
// Map BIGINT and DATETIME to WR_LONGLONG
case CalpontSystemCatalog::BIGINT :
case CalpontSystemCatalog::DATETIME :
internalType = WriteEngine::WR_LONGLONG; break;
internalType = WriteEngine::WR_LONGLONG;
break;
// Map DOUBLE and UDOUBLE to WR_DOUBLE
case CalpontSystemCatalog::DOUBLE :
case CalpontSystemCatalog::UDOUBLE:
internalType = WriteEngine::WR_DOUBLE; break;
internalType = WriteEngine::WR_DOUBLE;
break;
// Map BLOB to WR_BLOB
case CalpontSystemCatalog::BLOB :
internalType = WriteEngine::WR_BLOB; break;
internalType = WriteEngine::WR_BLOB;
break;
// Map TEXT to WR_TEXT
case CalpontSystemCatalog::TEXT :
internalType = WriteEngine::WR_TEXT; break;
internalType = WriteEngine::WR_TEXT;
break;
// Map VARBINARY to WR_VARBINARY
case CalpontSystemCatalog::VARBINARY:
internalType = WriteEngine::WR_VARBINARY; break;
internalType = WriteEngine::WR_VARBINARY;
break;
// Map DECIMAL to applicable WR_CHAR
// We can't map them to their proper int size, since in this version
// of convertColType(), we don't know what that is. Hopefully
// this is never used for DECIMAL.
// this is never used for DECIMAL.
case CalpontSystemCatalog::DECIMAL :
case CalpontSystemCatalog::UDECIMAL:
@ -433,27 +455,33 @@ void Convertor::convertColType(CalpontSystemCatalog::ColDataType dataType,
case CalpontSystemCatalog::CHAR :
case CalpontSystemCatalog::VARCHAR :
case CalpontSystemCatalog::CLOB :
internalType = WriteEngine::WR_CHAR; break;
internalType = WriteEngine::WR_CHAR;
break;
// Map UTINYINT to WR_UBYTE
case CalpontSystemCatalog::UTINYINT:
internalType = WriteEngine::WR_UBYTE; break;
internalType = WriteEngine::WR_UBYTE;
break;
// Map USMALLINT to WR_USHORT
case CalpontSystemCatalog::USMALLINT:
internalType = WriteEngine::WR_USHORT; break;
internalType = WriteEngine::WR_USHORT;
break;
// Map UMEDINT and UINT to WR_UINT
case CalpontSystemCatalog::UMEDINT:
case CalpontSystemCatalog::UINT:
internalType = WriteEngine::WR_UINT; break;
internalType = WriteEngine::WR_UINT;
break;
// Map UBIGINT to WR_ULONGLONG
case CalpontSystemCatalog::UBIGINT:
internalType = WriteEngine::WR_ULONGLONG; break;
internalType = WriteEngine::WR_ULONGLONG;
break;
default:
internalType = WriteEngine::WR_CHAR; break;
internalType = WriteEngine::WR_CHAR;
break;
}
}
@ -468,69 +496,84 @@ void Convertor::convertColType(CalpontSystemCatalog::ColDataType dataType,
* none
******************************************************************************/
/* static */
void Convertor::convertWEColType(ColType internalType,
CalpontSystemCatalog::ColDataType& dataType)
void Convertor::convertWEColType(ColType internalType,
CalpontSystemCatalog::ColDataType& dataType)
{
switch(internalType)
switch (internalType)
{
// Map BIT and TINYINT to WR_BYTE
case WriteEngine::WR_BYTE :
dataType = CalpontSystemCatalog::TINYINT; break;
dataType = CalpontSystemCatalog::TINYINT;
break;
// Map SMALLINT to WR_SHORT
case WriteEngine::WR_SHORT :
dataType = CalpontSystemCatalog::SMALLINT; break;
dataType = CalpontSystemCatalog::SMALLINT;
break;
// Map MEDINT, INT, and DATE to WR_INT
case WriteEngine::WR_INT :
dataType = CalpontSystemCatalog::INT; break;
dataType = CalpontSystemCatalog::INT;
break;
// Map FLOAT and UFLOAT to WR_FLOAT
case WriteEngine::WR_FLOAT:
dataType = CalpontSystemCatalog::FLOAT; break;
dataType = CalpontSystemCatalog::FLOAT;
break;
// Map BIGINT and DATETIME to WR_LONGLONG
case WriteEngine::WR_LONGLONG :
dataType = CalpontSystemCatalog::BIGINT; break;
dataType = CalpontSystemCatalog::BIGINT;
break;
// Map DOUBLE and UDOUBLE to WR_DOUBLE
case WriteEngine::WR_DOUBLE :
dataType = CalpontSystemCatalog::DOUBLE; break;
dataType = CalpontSystemCatalog::DOUBLE;
break;
// Map BLOB to WR_BLOB
case WriteEngine::WR_BLOB :
dataType = CalpontSystemCatalog::BLOB; break;
dataType = CalpontSystemCatalog::BLOB;
break;
// Map TEXT to WR_TEXT
case WriteEngine::WR_TEXT :
dataType = CalpontSystemCatalog::TEXT; break;
dataType = CalpontSystemCatalog::TEXT;
break;
// Map VARBINARY to WR_VARBINARY
case WriteEngine::WR_VARBINARY:
dataType = CalpontSystemCatalog::VARBINARY; break;
dataType = CalpontSystemCatalog::VARBINARY;
break;
// Map CHAR, VARCHAR, and CLOB to WR_CHAR
case WriteEngine::WR_CHAR :
dataType = CalpontSystemCatalog::CHAR; break;
dataType = CalpontSystemCatalog::CHAR;
break;
// Map UTINYINT to WR_UBYTE
case WriteEngine::WR_UBYTE:
dataType = CalpontSystemCatalog::UTINYINT; break;
dataType = CalpontSystemCatalog::UTINYINT;
break;
// Map USMALLINT to WR_USHORT
case WriteEngine::WR_USHORT:
dataType = CalpontSystemCatalog::USMALLINT; break;
dataType = CalpontSystemCatalog::USMALLINT;
break;
// Map UMEDINT and UINT to WR_UINT
case WriteEngine::WR_UINT:
dataType = CalpontSystemCatalog::UINT; break;
dataType = CalpontSystemCatalog::UINT;
break;
// Map UBIGINT to WR_ULONGLONG
case WriteEngine::WR_ULONGLONG:
dataType = CalpontSystemCatalog::UBIGINT; break;
dataType = CalpontSystemCatalog::UBIGINT;
break;
default:
dataType = CalpontSystemCatalog::CHAR; break;
dataType = CalpontSystemCatalog::CHAR;
break;
}
}
@ -547,7 +590,7 @@ void Convertor::convertWEColType(ColType internalType,
void Convertor::convertColType(ColStruct* curStruct)
{
CalpontSystemCatalog::ColDataType dataType // This will be updated later,
= CalpontSystemCatalog::CHAR; // CHAR used only for initialization.
= CalpontSystemCatalog::CHAR; // CHAR used only for initialization.
ColType* internalType = NULL;
bool bTokenFlag = false;
int* width = NULL;
@ -557,36 +600,43 @@ void Convertor::convertColType(ColStruct* curStruct)
bTokenFlag = curStruct->tokenFlag;
width = &(curStruct->colWidth);
switch(dataType) {
switch (dataType)
{
// Map BIT and TINYINT to WR_BYTE
case CalpontSystemCatalog::BIT :
case CalpontSystemCatalog::TINYINT :
*internalType = WriteEngine::WR_BYTE; break;
*internalType = WriteEngine::WR_BYTE;
break;
// Map SMALLINT to WR_SHORT
case CalpontSystemCatalog::SMALLINT :
*internalType = WriteEngine::WR_SHORT; break;
*internalType = WriteEngine::WR_SHORT;
break;
// Map MEDINT, INT, and DATE to WR_INT
case CalpontSystemCatalog::MEDINT :
case CalpontSystemCatalog::INT :
case CalpontSystemCatalog::DATE :
*internalType = WriteEngine::WR_INT; break;
*internalType = WriteEngine::WR_INT;
break;
// Map FLOAT and UFLOAT to WR_FLOAT
case CalpontSystemCatalog::FLOAT :
case CalpontSystemCatalog::UFLOAT :
*internalType = WriteEngine::WR_FLOAT; break;
*internalType = WriteEngine::WR_FLOAT;
break;
// Map BIGINT and DATETIME to WR_LONGLONG
case CalpontSystemCatalog::BIGINT :
case CalpontSystemCatalog::DATETIME :
*internalType = WriteEngine::WR_LONGLONG; break;
*internalType = WriteEngine::WR_LONGLONG;
break;
// Map DOUBLE and UDOUBLE to WR_DOUBLE
case CalpontSystemCatalog::DOUBLE :
case CalpontSystemCatalog::UDOUBLE :
*internalType = WriteEngine::WR_DOUBLE; break;
*internalType = WriteEngine::WR_DOUBLE;
break;
// Map DECIMAL to applicable integer type
case CalpontSystemCatalog::DECIMAL :
@ -594,51 +644,72 @@ void Convertor::convertColType(ColStruct* curStruct)
{
switch (*width)
{
case 1 : *internalType = WriteEngine::WR_BYTE; break;
case 2 : *internalType = WriteEngine::WR_SHORT; break;
case 4 : *internalType = WriteEngine::WR_INT; break;
default: *internalType = WriteEngine::WR_LONGLONG; break;
case 1 :
*internalType = WriteEngine::WR_BYTE;
break;
case 2 :
*internalType = WriteEngine::WR_SHORT;
break;
case 4 :
*internalType = WriteEngine::WR_INT;
break;
default:
*internalType = WriteEngine::WR_LONGLONG;
break;
}
break;
}
// Map BLOB to WR_BLOB
case CalpontSystemCatalog::BLOB :
*internalType = WriteEngine::WR_BLOB; break;
*internalType = WriteEngine::WR_BLOB;
break;
// Map TEXT to WR_TEXT
case CalpontSystemCatalog::TEXT :
*internalType = WriteEngine::WR_TEXT; break;
*internalType = WriteEngine::WR_TEXT;
break;
// Map VARBINARY to WR_VARBINARY
case CalpontSystemCatalog::VARBINARY:
*internalType = WriteEngine::WR_VARBINARY; break;
*internalType = WriteEngine::WR_VARBINARY;
break;
// Map CHAR, VARCHAR, and CLOB to WR_CHAR
case CalpontSystemCatalog::CHAR :
case CalpontSystemCatalog::VARCHAR :
case CalpontSystemCatalog::CLOB :
*internalType = WriteEngine::WR_CHAR; break;
*internalType = WriteEngine::WR_CHAR;
break;
// Map UTINYINT to WR_UBYTE
case CalpontSystemCatalog::UTINYINT:
*internalType = WriteEngine::WR_UBYTE; break;
*internalType = WriteEngine::WR_UBYTE;
break;
// Map USMALLINT to WR_USHORT
case CalpontSystemCatalog::USMALLINT:
*internalType = WriteEngine::WR_USHORT; break;
*internalType = WriteEngine::WR_USHORT;
break;
// Map UMEDINT and UINT to WR_UINT
case CalpontSystemCatalog::UMEDINT:
case CalpontSystemCatalog::UINT:
*internalType = WriteEngine::WR_UINT; break;
*internalType = WriteEngine::WR_UINT;
break;
// Map UBIGINT to WR_ULONGLONG
case CalpontSystemCatalog::UBIGINT:
*internalType = WriteEngine::WR_ULONGLONG; break;
*internalType = WriteEngine::WR_ULONGLONG;
break;
default:
*internalType = WriteEngine::WR_CHAR; break;
*internalType = WriteEngine::WR_CHAR;
break;
}
if (bTokenFlag) // token overwrite any other types
@ -669,27 +740,40 @@ int Convertor::getCorrectRowWidth(CalpontSystemCatalog::ColDataType dataType, in
{
int offset, newWidth = 4;
switch(dataType) {
case CalpontSystemCatalog::TINYINT:
case CalpontSystemCatalog::UTINYINT:
newWidth = 1; break;
case CalpontSystemCatalog::SMALLINT:
case CalpontSystemCatalog::USMALLINT:
newWidth = 2; break;
switch (dataType)
{
case CalpontSystemCatalog::TINYINT:
case CalpontSystemCatalog::UTINYINT:
newWidth = 1;
break;
case CalpontSystemCatalog::SMALLINT:
case CalpontSystemCatalog::USMALLINT:
newWidth = 2;
break;
case CalpontSystemCatalog::MEDINT:
case CalpontSystemCatalog::INT:
case CalpontSystemCatalog::UMEDINT:
case CalpontSystemCatalog::UINT:
newWidth = 4; break;
newWidth = 4;
break;
case CalpontSystemCatalog::BIGINT:
case CalpontSystemCatalog::UBIGINT:
newWidth = 8; break;
newWidth = 8;
break;
case CalpontSystemCatalog::FLOAT:
case CalpontSystemCatalog::UFLOAT:
newWidth = 4; break;
newWidth = 4;
break;
case CalpontSystemCatalog::DOUBLE:
case CalpontSystemCatalog::UDOUBLE:
newWidth = 8; break;
newWidth = 8;
break;
case CalpontSystemCatalog::DECIMAL:
case CalpontSystemCatalog::UDECIMAL:
if (width == 1)
@ -700,24 +784,31 @@ int Convertor::getCorrectRowWidth(CalpontSystemCatalog::ColDataType dataType, in
newWidth = 4;
else
newWidth = 8;
break;
case CalpontSystemCatalog::DATE:
newWidth = 4; break;
case CalpontSystemCatalog::DATE:
newWidth = 4;
break;
case CalpontSystemCatalog::DATETIME:
newWidth = 8; break;
newWidth = 8;
break;
case CalpontSystemCatalog::CHAR:
case CalpontSystemCatalog::VARCHAR:
case CalpontSystemCatalog::VARBINARY: // treat same as varchar for now
default:
offset = (dataType == CalpontSystemCatalog::VARCHAR)? -1 : 0;
offset = (dataType == CalpontSystemCatalog::VARCHAR) ? -1 : 0;
newWidth = 1;
if (width == (2 + offset))
newWidth = 2;
else if (width >= (3 + offset) && width <= (4 + offset))
newWidth = 4;
else if (width >= (5 + offset))
newWidth = 8;
break;
}
@ -766,67 +857,67 @@ int Convertor::getCorrectRowWidth(CalpontSystemCatalog::ColDataType dataType, in
******************************************************************************/
/*static*/
int Convertor::dmOid2FPath(uint32_t oid, uint32_t partition, uint32_t segment,
dmFilePathArgs_t* pArgs)
dmFilePathArgs_t* pArgs)
{
pArgs->Arc = _doDir(
pArgs->pDirA,
pArgs->ALen,
(unsigned int)oid>>24);
pArgs->pDirA,
pArgs->ALen,
(unsigned int)oid >> 24);
pArgs->Brc = _doDir(
pArgs->pDirB,
pArgs->BLen,
(unsigned int)(oid&0x00ff0000)>>16);
pArgs->pDirB,
pArgs->BLen,
(unsigned int)(oid & 0x00ff0000) >> 16);
pArgs->Crc = _doDir(
pArgs->pDirC,
pArgs->CLen,
(unsigned int)(oid&0x0000ff00)>>8);
pArgs->pDirC,
pArgs->CLen,
(unsigned int)(oid & 0x0000ff00) >> 8);
// include partition and seg num in the file path if they are present
if (pArgs->DLen > 0)
{
pArgs->Drc = _doDir(
pArgs->pDirD,
pArgs->DLen,
(unsigned int)(oid&0x000000ff));
pArgs->pDirD,
pArgs->DLen,
(unsigned int)(oid & 0x000000ff));
pArgs->Erc = _doDir(
pArgs->pDirE,
pArgs->ELen,
partition);
pArgs->pDirE,
pArgs->ELen,
partition);
pArgs->FNrc = _doFile(
pArgs->pFName,
pArgs->FNLen,
segment);
pArgs->pFName,
pArgs->FNLen,
segment);
if ( (pArgs->Drc < 0) ||
(pArgs->Erc < 0) )
(pArgs->Erc < 0) )
return -1;
if ( (pArgs->Drc >= pArgs->ALen) ||
(pArgs->Erc >= pArgs->ALen) )
(pArgs->Erc >= pArgs->ALen) )
return -1;
}
else
{
pArgs->FNrc = _doFile(
pArgs->pFName,
pArgs->FNLen,
(unsigned int)(oid&0x000000ff));
pArgs->pFName,
pArgs->FNLen,
(unsigned int)(oid & 0x000000ff));
}
if ( (pArgs->Arc < 0) ||
(pArgs->Brc < 0) ||
(pArgs->Crc < 0) ||
(pArgs->FNrc < 0) )
(pArgs->Brc < 0) ||
(pArgs->Crc < 0) ||
(pArgs->FNrc < 0) )
return -1;
if ( (pArgs->Arc >= pArgs->ALen) ||
(pArgs->Brc >= pArgs->BLen) ||
(pArgs->Crc >= pArgs->CLen) ||
(pArgs->FNrc >= pArgs->FNLen) )
(pArgs->Brc >= pArgs->BLen) ||
(pArgs->Crc >= pArgs->CLen) ||
(pArgs->FNrc >= pArgs->FNLen) )
return -1;
else
return 0;

View File

@ -27,7 +27,7 @@
#include <time.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <unistd.h>
#include <string>
#include <iostream>
@ -47,97 +47,97 @@ namespace WriteEngine
{
/** Class Convertor */
class Convertor
class Convertor
{
public:
/**
* @brief Default Constructor
*/
Convertor(){}
/**
* @brief Default Constructor
*/
Convertor() {}
/**
* @brief Destructor
*/
~Convertor(){}
/**
* @brief Destructor
*/
~Convertor() {}
/**
* @brief Get date/time string based on current date and time
*/
/**
* @brief Get date/time string based on current date and time
*/
EXPORT static const std::string getTimeStr();
/**
* @brief Convert specified integer value to a string
*
* @param val Integer value to be converted to a string
*/
/**
* @brief Convert specified integer value to a string
*
* @param val Integer value to be converted to a string
*/
EXPORT static const std::string int2Str(int val);
/**
* @brief Convert an oid to a full file name (with partition and segment
* being included in the filename). This is used for all column and
* dictionary store db files. If dealing with a version buffer file,
* a partition and segment number of 0 should be used.
*/
/**
* @brief Convert an oid to a full file name (with partition and segment
* being included in the filename). This is used for all column and
* dictionary store db files. If dealing with a version buffer file,
* a partition and segment number of 0 should be used.
*/
EXPORT static int oid2FileName(FID fid, char* fullFileName,
char dbDirName[][MAX_DB_DIR_NAME_SIZE],
uint32_t partition, uint16_t segment);
char dbDirName[][MAX_DB_DIR_NAME_SIZE],
uint32_t partition, uint16_t segment);
/**
* @brief Convert specified errno to associated error msg string
*
* @param errNum System errno to be converted.
* @param errString Error msg string associated with the specified errno.
*/
/**
* @brief Convert specified errno to associated error msg string
*
* @param errNum System errno to be converted.
* @param errString Error msg string associated with the specified errno.
*/
EXPORT static void mapErrnoToString(int errNum, std::string& errString);
/**
* @brief Convert specified ColDataType to internal storage type (ColType)
*
* @param dataType Interface data-type
* @param internalType Internal data-type used for storing
*/
/**
* @brief Convert specified ColDataType to internal storage type (ColType)
*
* @param dataType Interface data-type
* @param internalType Internal data-type used for storing
*/
//BUG931
EXPORT static void convertColType(execplan::CalpontSystemCatalog::ColDataType dataType,
ColType& internalType, bool isToken=false);
/**
* @brief Convert specified internal storage type (ColType) to
* ColDataType
*
* @param internalType Internal data-type used for storing
* @param dataType Interface data-type
*/
EXPORT static void convertWEColType(ColType internalType,
execplan::CalpontSystemCatalog::ColDataType& dataType);
ColType& internalType, bool isToken = false);
/**
* @brief Convert specified internal storage type (ColType) to
* ColDataType
*
* @param internalType Internal data-type used for storing
* @param dataType Interface data-type
*/
EXPORT static void convertWEColType(ColType internalType,
execplan::CalpontSystemCatalog::ColDataType& dataType);
/**
* @brief Convert interface column type to a internal column type.
* curStruct is interpreted as a ColStruct.
*/
/**
* @brief Convert interface column type to a internal column type.
* curStruct is interpreted as a ColStruct.
*/
EXPORT static void convertColType(ColStruct* curStruct);
/*
* @brief Get the correct width for a row
*/
/*
* @brief Get the correct width for a row
*/
EXPORT static int getCorrectRowWidth( execplan::CalpontSystemCatalog::ColDataType dataType, int width );
/*
* @brief Convert a Decimal string to it's equivalent integer value.
* errno can be checked upon return to see if input value was
* out of range (ERANGE).
*
* field decimal string to be converted
* fieldLengh length of "field" in bytes
* scale decimal scale to be applied to value
*/
/*
* @brief Convert a Decimal string to it's equivalent integer value.
* errno can be checked upon return to see if input value was
* out of range (ERANGE).
*
* field decimal string to be converted
* fieldLengh length of "field" in bytes
* scale decimal scale to be applied to value
*/
EXPORT static long long convertDecimalString ( const char* field,
int fieldLength,
int scale );
int fieldLength,
int scale );
private:
struct dmFilePathArgs_t;
static int dmOid2FPath(uint32_t oid, uint32_t partition, uint32_t segment,
dmFilePathArgs_t* pArgs);
struct dmFilePathArgs_t;
static int dmOid2FPath(uint32_t oid, uint32_t partition, uint32_t segment,
dmFilePathArgs_t* pArgs);
};

View File

@ -50,7 +50,7 @@ DbFileOp::~DbFileOp()
{}
/***********************************************************
* DESCRIPTION:
* DESCRIPTION:
* flush the cache
* PARAMETERS:
* none
@ -61,11 +61,12 @@ int DbFileOp::flushCache()
{
BlockBuffer* curBuf;
if( !Cache::getUseCache() )
if ( !Cache::getUseCache() )
return NO_ERROR;
for( CacheMapIt it = Cache::m_writeList->begin();
it != Cache::m_writeList->end(); it++ ) {
for ( CacheMapIt it = Cache::m_writeList->begin();
it != Cache::m_writeList->end(); it++ )
{
curBuf = it->second;
RETURN_ON_ERROR( writeDBFile( (*curBuf).cb.file.pFile,
(*curBuf).block.data,
@ -78,7 +79,7 @@ int DbFileOp::flushCache()
/***********************************************************
* DESCRIPTION:
* DESCRIPTION:
* get an entry within a sub block
* NOTE: the difference with readSubBlockEntry is that
* getSubBlockEntry only works for buffer while
@ -93,17 +94,17 @@ int DbFileOp::flushCache()
* none
***********************************************************/
void DbFileOp::getSubBlockEntry( unsigned char* blockBuf,
const int sbid, const int entryNo,
const int sbid, const int entryNo,
const int width, void* pStruct )
{
unsigned char* pBlock;
pBlock = blockBuf + BYTE_PER_SUBBLOCK * sbid + entryNo *MAX_COLUMN_BOUNDARY;
pBlock = blockBuf + BYTE_PER_SUBBLOCK * sbid + entryNo * MAX_COLUMN_BOUNDARY;
memcpy( pStruct, pBlock, width );
}
/***********************************************************
* DESCRIPTION:
* DESCRIPTION:
* Read a block from a file at specified location
* PARAMETERS:
* pFile - file handle
@ -120,11 +121,13 @@ int DbFileOp::readDBFile( IDBDataFile* pFile,
{
long long fboOffset = 0;
if( !isFbo ) {
if ( !isFbo )
{
RETURN_ON_ERROR( setFileOffsetBlock( pFile, lbid ) );
}
else {
fboOffset = (lbid)*(long)BYTE_PER_BLOCK;
else
{
fboOffset = (lbid) * (long)BYTE_PER_BLOCK;
RETURN_ON_ERROR( setFileOffset( pFile, fboOffset ) );
}
@ -136,31 +139,33 @@ int DbFileOp::readDBFile( IDBDataFile* pFile,
const uint64_t lbid,
const bool isFbo )
{
block->dirty = false;
block->no = lbid;
block->dirty = false;
block->no = lbid;
Stats::incIoBlockRead();
Stats::incIoBlockRead();
return readDBFile( pFile, block->data, lbid, isFbo );
return readDBFile( pFile, block->data, lbid, isFbo );
}
int DbFileOp::readDBFile( CommBlock& cb,
unsigned char* readBuf,
const uint64_t lbid )
{
const uint64_t lbid )
{
CacheKey key;
if( Cache::getUseCache() )
if ( Cache::getUseCache() )
{
if( Cache::cacheKeyExist( cb.file.oid, lbid ) ) {
if ( Cache::cacheKeyExist( cb.file.oid, lbid ) )
{
key = Cache::getCacheKey( cb.file.oid, lbid );
RETURN_ON_ERROR( Cache::loadCacheBlock( key, readBuf ) );
return NO_ERROR;
}
}
RETURN_ON_ERROR( readDBFile( cb.file.pFile, readBuf, lbid ) );
if( Cache::getUseCache() )
RETURN_ON_ERROR( readDBFile( cb.file.pFile, readBuf, lbid ) );
if ( Cache::getUseCache() )
{
int fbo = lbid;
@ -168,21 +173,26 @@ int DbFileOp::readDBFile( CommBlock& cb,
uint32_t partition;
uint16_t segment;
RETURN_ON_ERROR( BRMWrapper::getInstance()->getFboOffset(
lbid, dbRoot, partition, segment, fbo ) );
if( Cache::getListSize( FREE_LIST ) == 0 ) {
if ( isDebug( DEBUG_1 ) ) {
lbid, dbRoot, partition, segment, fbo ) );
if ( Cache::getListSize( FREE_LIST ) == 0 )
{
if ( isDebug( DEBUG_1 ) )
{
printf( "\nBefore flushing cache " );
Cache::printCacheList();
}
// flush cache to give up more space
RETURN_ON_ERROR( flushCache() );
if ( isDebug( DEBUG_1 ) ) {
if ( isDebug( DEBUG_1 ) )
{
printf( "\nAfter flushing cache " );
Cache::printCacheList();
}
}
RETURN_ON_ERROR( Cache::insertLRUList( cb, lbid, fbo, readBuf ) );
}
@ -208,8 +218,8 @@ int DbFileOp::readDBFile( CommBlock& cb,
* other number if something wrong
***********************************************************/
const int DbFileOp::readSubBlockEntry( IDBDataFile* pFile, DataBlock* block,
const uint64_t lbid, const int sbid,
const int entryNo, const int width,
const uint64_t lbid, const int sbid,
const int entryNo, const int width,
void* pStruct )
{
RETURN_ON_ERROR( readDBFile( pFile, block->data, lbid ) );
@ -219,9 +229,9 @@ const int DbFileOp::readSubBlockEntry( IDBDataFile* pFile, DataBlock* block,
}
const int DbFileOp::readSubBlockEntry( CommBlock& cb, DataBlock* block,
const uint64_t lbid, const int sbid,
const int entryNo, const int width,
const int DbFileOp::readSubBlockEntry( CommBlock& cb, DataBlock* block,
const uint64_t lbid, const int sbid,
const int entryNo, const int width,
void* pStruct )
{
RETURN_ON_ERROR( readDBFile( cb, block->data, lbid ) );
@ -232,7 +242,7 @@ const int DbFileOp::readSubBlockEntry( CommBlock& cb, DataBlock* block,
/***********************************************************
* DESCRIPTION:
* DESCRIPTION:
* Set an entry within a sub block
* NOTE: the difference with writeSubBlockEntry is that
* setSubBlockEntry only works for buffer while
@ -246,18 +256,18 @@ const int DbFileOp::readSubBlockEntry( CommBlock& cb, DataBlock* block,
* RETURN:
* none
***********************************************************/
void DbFileOp::setSubBlockEntry( unsigned char* blockBuf, const int sbid,
const int entryNo, const int width,
void DbFileOp::setSubBlockEntry( unsigned char* blockBuf, const int sbid,
const int entryNo, const int width,
const void* pStruct )
{
unsigned char* pBlock;
pBlock = blockBuf + BYTE_PER_SUBBLOCK * sbid + entryNo *MAX_COLUMN_BOUNDARY;
pBlock = blockBuf + BYTE_PER_SUBBLOCK * sbid + entryNo * MAX_COLUMN_BOUNDARY;
memcpy( pBlock, pStruct, width );
}
/***********************************************************
* DESCRIPTION:
* DESCRIPTION:
* Write a number of blocks to the file at specified location
* PARAMETERS:
* pFile - file handle
@ -268,48 +278,52 @@ void DbFileOp::setSubBlockEntry( unsigned char* blockBuf, const int sbid,
* NO_ERROR if success
* other number if something wrong
***********************************************************/
int DbFileOp::writeDBFile( CommBlock& cb, const unsigned char* writeBuf,
const uint64_t lbid, const int numOfBlock )
{
int DbFileOp::writeDBFile( CommBlock& cb, const unsigned char* writeBuf,
const uint64_t lbid, const int numOfBlock )
{
CacheKey key;
int ret;
if( Cache::getUseCache() )
if ( Cache::getUseCache() )
{
if( Cache::cacheKeyExist( cb.file.oid, lbid ) ) {
if ( Cache::cacheKeyExist( cb.file.oid, lbid ) )
{
key = Cache::getCacheKey( cb.file.oid, lbid );
RETURN_ON_ERROR( Cache::modifyCacheBlock( key, writeBuf ) );
return NO_ERROR;
}
}
if (BRMWrapper::getUseVb())
{
RETURN_ON_ERROR( writeVB( cb.file.pFile, cb.file.oid, lbid ) );
}
if (BRMWrapper::getUseVb())
{
RETURN_ON_ERROR( writeVB( cb.file.pFile, cb.file.oid, lbid ) );
}
ret = writeDBFile( cb.file.pFile, writeBuf, lbid, numOfBlock );
if (BRMWrapper::getUseVb())
{
LBIDRange_v ranges;
LBIDRange range;
range.start = lbid;
range.size = 1;
ranges.push_back(range);
BRMWrapper::getInstance()->writeVBEnd(getTransId(), ranges);
}
if (BRMWrapper::getUseVb())
{
LBIDRange_v ranges;
LBIDRange range;
range.start = lbid;
range.size = 1;
ranges.push_back(range);
BRMWrapper::getInstance()->writeVBEnd(getTransId(), ranges);
}
return ret;
}
int DbFileOp::writeDBFileNoVBCache(CommBlock & cb,
const unsigned char * writeBuf,
int DbFileOp::writeDBFileNoVBCache(CommBlock& cb,
const unsigned char* writeBuf,
const int fbo,
const int numOfBlock)
{
return writeDBFileNoVBCache( cb.file.pFile, writeBuf, fbo, numOfBlock );
return writeDBFileNoVBCache( cb.file.pFile, writeBuf, fbo, numOfBlock );
}
/***********************************************************
* DESCRIPTION:
* DESCRIPTION:
* Core function for writing data w/o using VB cache
* (bulk load dictionary store inserts)
***********************************************************/
@ -324,7 +338,8 @@ int DbFileOp::writeDBFileNoVBCache( IDBDataFile* pFile,
Stats::startParseEvent(WE_STATS_WRITE_DCT);
#endif
for( int i = 0; i < numOfBlock; i++ ) {
for ( int i = 0; i < numOfBlock; i++ )
{
Stats::incIoBlockWrite();
RETURN_ON_ERROR( writeFile( pFile, writeBuf, BYTE_PER_BLOCK ) );
}
@ -337,7 +352,7 @@ int DbFileOp::writeDBFileNoVBCache( IDBDataFile* pFile,
}
/***********************************************************
* DESCRIPTION:
* DESCRIPTION:
* Core function for writing data using VB cache
***********************************************************/
int DbFileOp::writeDBFile( IDBDataFile* pFile, const unsigned char* writeBuf,
@ -345,7 +360,8 @@ int DbFileOp::writeDBFile( IDBDataFile* pFile, const unsigned char* writeBuf,
{
RETURN_ON_ERROR( setFileOffsetBlock( pFile, lbid ) );
for( int i = 0; i < numOfBlock; i++ ) {
for ( int i = 0; i < numOfBlock; i++ )
{
Stats::incIoBlockWrite();
RETURN_ON_ERROR( writeFile( pFile, writeBuf, BYTE_PER_BLOCK ) );
}
@ -360,10 +376,11 @@ int DbFileOp::writeDBFileFbo(IDBDataFile* pFile, const unsigned char* writeBuf,
{
long long fboOffset = 0;
fboOffset = (fbo)*(long)BYTE_PER_BLOCK;
fboOffset = (fbo) * (long)BYTE_PER_BLOCK;
RETURN_ON_ERROR( setFileOffset( pFile, fboOffset ) );
for( int i = 0; i < numOfBlock; i++ ) {
for ( int i = 0; i < numOfBlock; i++ )
{
Stats::incIoBlockWrite();
RETURN_ON_ERROR( writeFile( pFile, writeBuf, BYTE_PER_BLOCK ) );
}
@ -372,7 +389,7 @@ int DbFileOp::writeDBFileFbo(IDBDataFile* pFile, const unsigned char* writeBuf,
}
/***********************************************************
* DESCRIPTION:
* DESCRIPTION:
* Write an entry within a sub block to a file
* NOTE: the difference with getSubBlockEntry is that
* setSubBlockEntry only works for buffer while
@ -390,8 +407,8 @@ int DbFileOp::writeDBFileFbo(IDBDataFile* pFile, const unsigned char* writeBuf,
* other number if something wrong
***********************************************************/
const int DbFileOp::writeSubBlockEntry( IDBDataFile* pFile, DataBlock* block,
const uint64_t lbid, const int sbid,
const int entryNo, const int width,
const uint64_t lbid, const int sbid,
const int entryNo, const int width,
void* pStruct )
{
setSubBlockEntry( block->data, sbid, entryNo, width, pStruct );
@ -400,9 +417,9 @@ const int DbFileOp::writeSubBlockEntry( IDBDataFile* pFile, DataBlock* block,
return writeDBFile( pFile, block->data, lbid );
}
const int DbFileOp::writeSubBlockEntry( CommBlock& cb, DataBlock* block,
const uint64_t lbid, const int sbid,
const int entryNo, const int width,
const int DbFileOp::writeSubBlockEntry( CommBlock& cb, DataBlock* block,
const uint64_t lbid, const int sbid,
const int entryNo, const int width,
void* pStruct )
{
setSubBlockEntry( block->data, sbid, entryNo, width, pStruct );
@ -412,7 +429,7 @@ const int DbFileOp::writeSubBlockEntry( CommBlock& cb, DataBlock* block,
}
/***********************************************************
* DESCRIPTION:
* DESCRIPTION:
* Write to version buffer
* PARAMETERS:
* oid - file oid
@ -423,42 +440,42 @@ const int DbFileOp::writeSubBlockEntry( CommBlock& cb, DataBlock* block,
***********************************************************/
const int DbFileOp::writeVB( IDBDataFile* pFile, const OID oid, const uint64_t lbid )
{
if( !BRMWrapper::getUseVb() )
if ( !BRMWrapper::getUseVb() )
return NO_ERROR;
int rc;
TxnID transId = getTransId();
TxnID transId = getTransId();
if (transId !=((TxnID)INVALID_NUM))
{
rc= BRMWrapper::getInstance()->writeVB( pFile,
(const VER_t)transId,
oid, lbid, this );
if (transId != ((TxnID)INVALID_NUM))
{
rc = BRMWrapper::getInstance()->writeVB( pFile,
(const VER_t)transId,
oid, lbid, this );
//@Bug 4671. The error is already logged by worker node.
/* if (rc != NO_ERROR)
{
char msg[2048];
snprintf(msg, 2048,
"we_dbfileop->BRMWrapper::getInstance()->writeVB "
"transId %i oid %i lbid "
#if __LP64__
"%lu"
#else
"%llu"
#endif
" Error Code %i", transId, oid, lbid, rc);
puts(msg);
{
logging::MessageLog ml(logging::LoggingID(19));
logging::Message m;
logging::Message::Args args;
args.add(msg);
m.format(args);
ml.logCriticalMessage(m);
}
return rc;
} */
return rc;
/* if (rc != NO_ERROR)
{
char msg[2048];
snprintf(msg, 2048,
"we_dbfileop->BRMWrapper::getInstance()->writeVB "
"transId %i oid %i lbid "
#if __LP64__
"%lu"
#else
"%llu"
#endif
" Error Code %i", transId, oid, lbid, rc);
puts(msg);
{
logging::MessageLog ml(logging::LoggingID(19));
logging::Message m;
logging::Message::Args args;
args.add(msg);
m.format(args);
ml.logCriticalMessage(m);
}
return rc;
} */
return rc;
}
return NO_ERROR;
@ -469,12 +486,14 @@ int DbFileOp::readDbBlocks(IDBDataFile* pFile,
uint64_t fbo,
size_t n)
{
if (m_chunkManager) {
if (m_chunkManager)
{
return m_chunkManager->readBlocks(pFile, readBuf, fbo, n);
}
}
if (setFileOffset(pFile, fbo*BYTE_PER_BLOCK, SEEK_SET) != NO_ERROR)
if (setFileOffset(pFile, fbo * BYTE_PER_BLOCK, SEEK_SET) != NO_ERROR)
return -1;
return pFile->read(readBuf, BYTE_PER_BLOCK * n) / BYTE_PER_BLOCK;
}
@ -483,7 +502,7 @@ int DbFileOp::restoreBlock(IDBDataFile* pFile, const unsigned char* writeBuf, ui
if (m_chunkManager)
return m_chunkManager->restoreBlock(pFile, writeBuf, fbo);
if (setFileOffset(pFile, fbo*BYTE_PER_BLOCK, SEEK_SET) != NO_ERROR)
if (setFileOffset(pFile, fbo * BYTE_PER_BLOCK, SEEK_SET) != NO_ERROR)
return -1;
return pFile->write(writeBuf, BYTE_PER_BLOCK);

View File

@ -46,40 +46,42 @@ class ChunkManager;
class DbFileOp : public FileOp
{
public:
/**
* @brief Constructor
*/
/**
* @brief Constructor
*/
EXPORT DbFileOp();
/**
* @brief Default Destructor
*/
/**
* @brief Default Destructor
*/
EXPORT virtual ~DbFileOp();
EXPORT virtual int flushCache();
/**
* @brief Get an entry within a subblock
*/
/**
* @brief Get an entry within a subblock
*/
EXPORT void getSubBlockEntry( unsigned char* blockBuf,
const int sbid,
const int entryNo,
const int width,
void* pStruct ) ;
/**
* @brief Get an entry within a subblock using block information
*/
/**
* @brief Get an entry within a subblock using block information
*/
void getSubBlockEntry( DataBlock* block,
const int sbid,
const int entryNo,
const int width,
void* pStruct )
{ getSubBlockEntry( block->data, sbid, entryNo, width, pStruct );}
{
getSubBlockEntry( block->data, sbid, entryNo, width, pStruct );
}
/**
* @brief Read DB file to a buffer
*/
/**
* @brief Read DB file to a buffer
*/
EXPORT virtual int readDBFile( IDBDataFile* pFile,
unsigned char* readBuf,
const uint64_t lbid,
@ -95,12 +97,14 @@ public:
int readDBFile( CommBlock& cb,
DataBlock* block,
const uint64_t lbid )
{ return readDBFile( cb, block->data, lbid );}
{
return readDBFile( cb, block->data, lbid );
}
/**
* @brief Get an entry within a subblock and also populate block buffer
*
*/
/**
* @brief Get an entry within a subblock and also populate block buffer
*
*/
EXPORT const int readSubBlockEntry(IDBDataFile* pFile,
DataBlock* block,
const uint64_t lbid,
@ -117,29 +121,31 @@ public:
const int width,
void* pStruct );
/**
* @brief Set an entry within a subblock
*/
/**
* @brief Set an entry within a subblock
*/
EXPORT void setSubBlockEntry( unsigned char* blockBuf,
const int sbid,
const int entryNo,
const int width,
const void* pStruct ) ;
/**
* @brief Set an entry within a subblock using block information
*/
/**
* @brief Set an entry within a subblock using block information
*/
void setSubBlockEntry( DataBlock* block,
const int sbid,
const int entryNo,
const int width,
const void* pStruct )
{ block->dirty = true;
setSubBlockEntry( block->data, sbid, entryNo, width, pStruct ); }
{
block->dirty = true;
setSubBlockEntry( block->data, sbid, entryNo, width, pStruct );
}
/**
* @brief Lbid Write a buffer to a DB file
*/
/**
* @brief Lbid Write a buffer to a DB file
*/
EXPORT virtual int writeDBFile( IDBDataFile* pFile,
const unsigned char* writeBuf,
const uint64_t lbid,
@ -149,59 +155,66 @@ public:
const uint64_t lbid,
const int numOfBlock = 1 );
/**
* @brief Write designated block(s) w/o writing to Version Buffer or cache.
*/
EXPORT int writeDBFileNoVBCache(CommBlock & cb,
const unsigned char * writeBuf,
const int fbo,
const int numOfBlock = 1);
EXPORT virtual int writeDBFileNoVBCache(IDBDataFile *pFile,
const unsigned char * writeBuf,
const int fbo,
const int numOfBlock = 1);
/**
* @brief Write designated block(s) w/o writing to Version Buffer or cache.
*/
EXPORT int writeDBFileNoVBCache(CommBlock& cb,
const unsigned char* writeBuf,
const int fbo,
const int numOfBlock = 1);
EXPORT virtual int writeDBFileNoVBCache(IDBDataFile* pFile,
const unsigned char* writeBuf,
const int fbo,
const int numOfBlock = 1);
int writeDBFile( IDBDataFile* pFile,
DataBlock* block,
const uint64_t lbid )
{ block->dirty=false; return writeDBFile( pFile, block->data, lbid ); }
{
block->dirty = false;
return writeDBFile( pFile, block->data, lbid );
}
int writeDBFile( CommBlock& cb,
DataBlock* block,
const uint64_t lbid )
{ return writeDBFile( cb, block->data, lbid ); }
{
return writeDBFile( cb, block->data, lbid );
}
EXPORT virtual int writeDBFileFbo( IDBDataFile* pFile,
const unsigned char* writeBuf,
const uint64_t fbo,
const int numOfBlock );
int writeDBFileNoVBCache(CommBlock & cb,
DataBlock * block,
const int fbo)
{ return writeDBFileNoVBCache(cb, block->data, fbo); }
int writeDBFileNoVBCache(CommBlock& cb,
DataBlock* block,
const int fbo)
{
return writeDBFileNoVBCache(cb, block->data, fbo);
}
/**
* @brief Write a sub block entry directly to a DB file
*/
/**
* @brief Write a sub block entry directly to a DB file
*/
EXPORT const int writeSubBlockEntry(IDBDataFile* pFile,
DataBlock* block,
const uint64_t lbid,
const int sbid,
const int entryNo,
const int width,
void* pStruct );
DataBlock* block,
const uint64_t lbid,
const int sbid,
const int entryNo,
const int width,
void* pStruct );
EXPORT const int writeSubBlockEntry(CommBlock& cb,
DataBlock* block,
const uint64_t lbid,
const int sbid,
const int entryNo,
const int width,
void* pStruct ) ;
DataBlock* block,
const uint64_t lbid,
const int sbid,
const int entryNo,
const int width,
void* pStruct ) ;
/**
* @brief Write to version buffer
*/
/**
* @brief Write to version buffer
*/
EXPORT const int writeVB( IDBDataFile* pFile,
const OID oid,
const uint64_t lbid );
@ -218,8 +231,14 @@ public:
EXPORT virtual IDBDataFile* getFilePtr(const Column& column,
bool useTmpSuffix);
virtual void chunkManager(ChunkManager* ptr) { m_chunkManager = ptr; }
virtual ChunkManager* chunkManager() { return m_chunkManager; }
virtual void chunkManager(ChunkManager* ptr)
{
m_chunkManager = ptr;
}
virtual ChunkManager* chunkManager()
{
return m_chunkManager;
}
protected:
ChunkManager* m_chunkManager;

View File

@ -29,11 +29,12 @@
namespace
{
const char* stateStrings[] = { "initState" ,
"PartialExtent" ,
"EmptyDbRoot" ,
"ExtentBoundary",
"OutOfService" };
const char* stateStrings[] = { "initState",
"PartialExtent",
"EmptyDbRoot",
"ExtentBoundary",
"OutOfService"
};
}
namespace WriteEngine
@ -68,6 +69,7 @@ bool DBRootExtentInfo::operator<(
{
if (fDbRoot < entry.fDbRoot)
return true;
return false;
}
@ -78,10 +80,10 @@ bool DBRootExtentInfo::operator<(
// before processing threads are spawned.
//------------------------------------------------------------------------------
DBRootExtentTracker::DBRootExtentTracker ( OID oid,
const std::vector<int>& colWidths,
const std::vector<BRM::EmDbRootHWMInfo_v>& dbRootHWMInfoColVec,
unsigned int columnIdx,
Log* logger ) :
const std::vector<int>& colWidths,
const std::vector<BRM::EmDbRootHWMInfo_v>& dbRootHWMInfoColVec,
unsigned int columnIdx,
Log* logger ) :
fOID(oid),
fLog(logger),
fCurrentDBRootIdx(-1),
@ -94,17 +96,18 @@ DBRootExtentTracker::DBRootExtentTracker ( OID oid,
int colWidth = colWidths[columnIdx];
fBlksPerExtent = (long long)BRMWrapper::getInstance()->getExtentRows() *
(long long)colWidth / (long long)BYTE_PER_BLOCK;
(long long)colWidth / (long long)BYTE_PER_BLOCK;
std::vector<bool> resetState;
for (unsigned int i=0; i<emDbRootHWMInfo.size(); i++)
for (unsigned int i = 0; i < emDbRootHWMInfo.size(); i++)
{
resetState.push_back(false);
DBRootExtentInfoState state = determineState(
colWidths[columnIdx],
emDbRootHWMInfo[i].localHWM,
emDbRootHWMInfo[i].totalBlocks,
emDbRootHWMInfo[i].status);
colWidths[columnIdx],
emDbRootHWMInfo[i].localHWM,
emDbRootHWMInfo[i].totalBlocks,
emDbRootHWMInfo[i].status);
// For a full extent...
// check to see if any of the column HWMs are partially full, in which
@ -114,19 +117,20 @@ DBRootExtentTracker::DBRootExtentTracker ( OID oid,
// still have free blocks for wider columns.)
if (state == DBROOT_EXTENT_EXTENT_BOUNDARY)
{
for (unsigned int kCol=0; kCol<dbRootHWMInfoColVec.size(); kCol++)
for (unsigned int kCol = 0; kCol < dbRootHWMInfoColVec.size(); kCol++)
{
const BRM::EmDbRootHWMInfo_v& emDbRootHWMInfo2 =
dbRootHWMInfoColVec[kCol];
DBRootExtentInfoState state2 = determineState(
colWidths[kCol],
emDbRootHWMInfo2[i].localHWM,
emDbRootHWMInfo2[i].totalBlocks,
emDbRootHWMInfo2[i].status);
colWidths[kCol],
emDbRootHWMInfo2[i].localHWM,
emDbRootHWMInfo2[i].totalBlocks,
emDbRootHWMInfo2[i].status);
if (state2 == DBROOT_EXTENT_PARTIAL_EXTENT)
{
state = DBROOT_EXTENT_PARTIAL_EXTENT;
resetState[ resetState.size()-1 ] = true;
resetState[ resetState.size() - 1 ] = true;
break;
}
}
@ -153,7 +157,8 @@ DBRootExtentTracker::DBRootExtentTracker ( OID oid,
{
std::ostringstream oss;
oss << "Starting DBRoot info for OID " << fOID;
for (unsigned int k=0; k<fDBRootExtentList.size(); k++)
for (unsigned int k = 0; k < fDBRootExtentList.size(); k++)
{
oss << std::endl;
oss << " DBRoot-" << fDBRootExtentList[k].fDbRoot <<
@ -164,9 +169,11 @@ DBRootExtentTracker::DBRootExtentTracker ( OID oid,
"/" << fDBRootExtentList[k].fStartLbid <<
"/" << fDBRootExtentList[k].fDBRootTotalBlocks <<
"/" << stateStrings[ fDBRootExtentList[k].fState ];
if (resetState[k])
oss << ".";
}
fLog->logMsg( oss.str(), MSGLVL_INFO2 );
}
}
@ -177,9 +184,9 @@ DBRootExtentTracker::DBRootExtentTracker ( OID oid,
// current BRM status, HWM, and total block count for the DBRoot.
//------------------------------------------------------------------------------
DBRootExtentInfoState DBRootExtentTracker::determineState(int colWidth,
HWM localHwm,
uint64_t dbRootTotalBlocks,
int16_t status)
HWM localHwm,
uint64_t dbRootTotalBlocks,
int16_t status)
{
DBRootExtentInfoState extentState;
@ -201,9 +208,10 @@ DBRootExtentInfoState DBRootExtentTracker::determineState(int colWidth,
// is full and we won't be adding rows to the current HWM extent;
// we will instead need to allocate a new extent in order to begin
// adding any rows.
long long nRows= ((long long)(localHwm+1) *
(long long)BYTE_PER_BLOCK)/ (long long)colWidth;
long long nRows = ((long long)(localHwm + 1) *
(long long)BYTE_PER_BLOCK) / (long long)colWidth;
long long nRem = nRows % BRMWrapper::getInstance()->getExtentRows();
if (nRem == 0)
{
extentState = DBROOT_EXTENT_EXTENT_BOUNDARY;
@ -249,24 +257,26 @@ int DBRootExtentTracker::selectFirstSegFile(
// we can end up with 2 partially filled HWM extents on 2 DBRoots, on the
// same PM. That's why we loop through the DBRoots to see if we have more
// than 1 partially filled HWM extent.
for (unsigned int iroot=0;
iroot<fDBRootExtentList.size();
iroot++)
for (unsigned int iroot = 0;
iroot < fDBRootExtentList.size();
iroot++)
{
// Skip over DBRoots which have no extents
if (fDBRootExtentList[iroot].fState == DBROOT_EXTENT_EMPTY_DBROOT)
continue;
fEmptyPM = false;
// Find DBRoot and segment file with most incomplete extent.
// Break a tie by selecting the lowest segment number.
long long remBlks = (long long)(fDBRootExtentList[iroot].fLocalHwm + 1)%
fBlksPerExtent;
long long remBlks = (long long)(fDBRootExtentList[iroot].fLocalHwm + 1) %
fBlksPerExtent;
if (remBlks > 0)
{
if ( (remBlks < fewestLocalBlks) ||
((remBlks == fewestLocalBlks) &&
(fDBRootExtentList[iroot].fSegment < fewestLocalBlkSegNum)) )
((remBlks == fewestLocalBlks) &&
(fDBRootExtentList[iroot].fSegment < fewestLocalBlkSegNum)) )
{
fewestLocalBlocksIdx = iroot;
fewestLocalBlks = remBlks;
@ -277,8 +287,8 @@ int DBRootExtentTracker::selectFirstSegFile(
// Find DBRoot with fewest total of blocks.
// Break a tie by selecting the highest segment number.
if ( (fDBRootExtentList[iroot].fDBRootTotalBlocks < fewestTotalBlks) ||
((fDBRootExtentList[iroot].fDBRootTotalBlocks== fewestTotalBlks) &&
(fDBRootExtentList[iroot].fSegment > fewestTotalBlkSegNum)) )
((fDBRootExtentList[iroot].fDBRootTotalBlocks == fewestTotalBlks) &&
(fDBRootExtentList[iroot].fSegment > fewestTotalBlkSegNum)) )
{
fewestTotalBlocksIdx = iroot;
fewestTotalBlks = fDBRootExtentList[iroot].fDBRootTotalBlocks;
@ -287,13 +297,14 @@ int DBRootExtentTracker::selectFirstSegFile(
}
// Select HWM extent with fewest number of blocks;
// If chosen extent is disabled, then treat like an empty PM,
// If chosen extent is disabled, then treat like an empty PM,
// meaning we have to allocate a new extent before adding any rows
if (fewestLocalBlocksIdx != -1)
{
startExtentIdx = fewestLocalBlocksIdx;
if (fDBRootExtentList[startExtentIdx].fState ==
DBROOT_EXTENT_OUT_OF_SERVICE)
DBROOT_EXTENT_OUT_OF_SERVICE)
{
fDisabledHWM = true;
}
@ -301,13 +312,14 @@ int DBRootExtentTracker::selectFirstSegFile(
// If the HWM on each DBRoot ends on an extent boundary, then
// select the DBRoot with the fewest total number of blocks;
// If chosen extent is disabled, then treat like an empty PM,
// If chosen extent is disabled, then treat like an empty PM,
// meaning we have to allocate a new extent before adding any rows
else if (fewestTotalBlocksIdx != -1)
{
startExtentIdx = fewestTotalBlocksIdx;
if (fDBRootExtentList[startExtentIdx].fState ==
DBROOT_EXTENT_OUT_OF_SERVICE)
DBROOT_EXTENT_OUT_OF_SERVICE)
{
fDisabledHWM = true;
}
@ -325,6 +337,7 @@ int DBRootExtentTracker::selectFirstSegFile(
if ((fEmptyOrDisabledPM) || (fDisabledHWM))
bNoStartExtentOnThisPM = true;
bEmptyPM = fEmptyPM;
fCurrentDBRootIdx = startExtentIdx;
@ -366,23 +379,23 @@ int DBRootExtentTracker::selectFirstSegFileForEmptyPM( std::string& errMsg )
//------------------------------------------------------------------------------
void DBRootExtentTracker::initEmptyDBRoots( )
{
int startExtentIdx= fCurrentDBRootIdx;
int startExtentIdx = fCurrentDBRootIdx;
bool bAnyChanges = false; // If fDBRootExtentList changes, log the contents
// Fill in starting partition for any DBRoots having no extents
for (unsigned int iroot=0;
iroot<fDBRootExtentList.size();
iroot++)
for (unsigned int iroot = 0;
iroot < fDBRootExtentList.size();
iroot++)
{
if ((fDBRootExtentList[iroot].fState == DBROOT_EXTENT_EMPTY_DBROOT) &&
((int)iroot != startExtentIdx)) // skip over selected dbroot
((int)iroot != startExtentIdx)) // skip over selected dbroot
{
if (fDBRootExtentList[iroot].fPartition !=
fDBRootExtentList[startExtentIdx].fPartition)
fDBRootExtentList[startExtentIdx].fPartition)
{
bAnyChanges = true;
fDBRootExtentList[iroot].fPartition =
fDBRootExtentList[iroot].fPartition =
fDBRootExtentList[startExtentIdx].fPartition;
}
}
@ -396,7 +409,8 @@ void DBRootExtentTracker::initEmptyDBRoots( )
{
std::ostringstream oss;
oss << "Updated starting (empty) DBRoot info for OID " << fOID;
for (unsigned int k=0; k<fDBRootExtentList.size(); k++)
for (unsigned int k = 0; k < fDBRootExtentList.size(); k++)
{
oss << std::endl;
oss << " DBRoot-" << fDBRootExtentList[k].fDbRoot <<
@ -408,6 +422,7 @@ void DBRootExtentTracker::initEmptyDBRoots( )
"/" << fDBRootExtentList[k].fDBRootTotalBlocks <<
"/" << stateStrings[ fDBRootExtentList[k].fState ];
}
fLog->logMsg( oss.str(), MSGLVL_INFO2 );
}
}
@ -432,7 +447,7 @@ void DBRootExtentTracker::assignFirstSegFile(
fEmptyOrDisabledPM = refTracker.fEmptyOrDisabledPM;
fEmptyPM = refTracker.fEmptyPM;
fDisabledHWM = refTracker.fDisabledHWM;
// Always start empty PM with partition number 0. If the DBRoot has a HWM
// extent that is disabled, then BRM will override this partition number.
if (fEmptyOrDisabledPM)
@ -466,7 +481,7 @@ void DBRootExtentTracker::logFirstDBRootSelection( ) const
oss << "No active extents; will add partition to start adding "
"rows for oid-" << fOID <<
"; DBRoot-" << fDBRootExtentList[extentIdx].fDbRoot;
fLog->logMsg( oss.str(), MSGLVL_INFO2 );
fLog->logMsg( oss.str(), MSGLVL_INFO2 );
}
else if (fDisabledHWM)
{
@ -474,19 +489,19 @@ void DBRootExtentTracker::logFirstDBRootSelection( ) const
oss << "HWM extent disabled; will add partition to start adding "
"rows for oid-" << fOID <<
"; DBRoot-" << fDBRootExtentList[extentIdx].fDbRoot;
fLog->logMsg( oss.str(), MSGLVL_INFO2 );
fLog->logMsg( oss.str(), MSGLVL_INFO2 );
}
else
{
std::ostringstream oss;
oss<<"Selecting existing segFile to begin adding rows: oid-"<<fOID<<
oss << "Selecting existing segFile to begin adding rows: oid-" << fOID <<
"; DBRoot-" << fDBRootExtentList[extentIdx].fDbRoot <<
", part/seg/hwm/LBID/totBlks/state: " <<
fDBRootExtentList[extentIdx].fPartition <<
"/" << fDBRootExtentList[extentIdx].fSegment <<
"/" << fDBRootExtentList[extentIdx].fLocalHwm <<
"/" << fDBRootExtentList[extentIdx].fStartLbid <<
"/" << fDBRootExtentList[extentIdx].fDBRootTotalBlocks<<
"/" << fDBRootExtentList[extentIdx].fDBRootTotalBlocks <<
"/" << stateStrings[ fDBRootExtentList[extentIdx].fState ];
fLog->logMsg( oss.str(), MSGLVL_INFO2 );
}
@ -517,8 +532,10 @@ bool DBRootExtentTracker::nextSegFile(
boost::mutex::scoped_lock lock(fDBRootExtTrkMutex);
fCurrentDBRootIdx++;
if ((unsigned int)fCurrentDBRootIdx >= fDBRootExtentList.size())
fCurrentDBRootIdx = 0;
dbRoot = fDBRootExtentList[fCurrentDBRootIdx].fDbRoot;
segment = fDBRootExtentList[fCurrentDBRootIdx].fSegment;
partition = fDBRootExtentList[fCurrentDBRootIdx].fPartition;
@ -534,8 +551,9 @@ bool DBRootExtentTracker::nextSegFile(
// << std::endl;
bool bAllocExtentFlag = true;
if (fDBRootExtentList[fCurrentDBRootIdx].fState ==
DBROOT_EXTENT_PARTIAL_EXTENT)
DBROOT_EXTENT_PARTIAL_EXTENT)
bAllocExtentFlag = false;
// After we have taken care of the "first" extent for each DBRoot, we can

View File

@ -49,7 +49,7 @@
namespace WriteEngine
{
class Log;
class Log;
//
// PARTIAL_EXTENT - Extent is partially filled
@ -121,10 +121,10 @@ public:
* @param logger Logger to be used for logging messages.
*/
EXPORT DBRootExtentTracker ( OID oid,
const std::vector<int>& colWidths,
const std::vector<BRM::EmDbRootHWMInfo_v>& dbRootHWMInfoColVec,
unsigned int columnIdx,
Log* logger );
const std::vector<int>& colWidths,
const std::vector<BRM::EmDbRootHWMInfo_v>& dbRootHWMInfoColVec,
unsigned int columnIdx,
Log* logger );
/** @brief Select the first DBRoot/segment file to add rows to, for this PM.
* @param dbRootExtent Dbroot/segment file selected for first set of rows.
@ -136,9 +136,9 @@ public:
* @return Returns NO_ERROR if success, else returns error code.
*/
EXPORT int selectFirstSegFile ( DBRootExtentInfo& dbRootExtent,
bool& bNoStartExtentOnThisPM,
bool& bEmptyPM,
std::string& errMsg );
bool& bNoStartExtentOnThisPM,
bool& bEmptyPM,
std::string& errMsg );
/** @brief Set up this Tracker to select the same first DBRoot/segment file
* as the reference DBRootExtentTracker that is specified from a ref column.
@ -149,7 +149,7 @@ public:
* @param dbRootExtent Dbroot/segment file selected for first set of rows.
*/
EXPORT void assignFirstSegFile( const DBRootExtentTracker& refTracker,
DBRootExtentInfo& dbRootExtent );
DBRootExtentInfo& dbRootExtent );
/** @brief Iterate/return next DBRoot to be used for the next extent.
*
@ -167,7 +167,7 @@ public:
* filled in extent instead of adding a new extent. Case 2 is intended to
* cover this use case.
* In this case, in the middle of an import, if the next extent to receive
* rows is a partially filled in extent, then the DBRoot, partition, and
* rows is a partially filled in extent, then the DBRoot, partition, and
* segment number for the partial extent are returned. In addition, the
* current HWM and starting LBID for the relevant extent are returned.
*
@ -189,10 +189,10 @@ public:
* if extent is partially full, and has room for more rows.
*/
EXPORT bool nextSegFile( uint16_t& dbRoot,
uint32_t& partition,
uint16_t& segment,
HWM& localHwm,
BRM::LBID_t& startLbid );
uint32_t& partition,
uint16_t& segment,
HWM& localHwm,
BRM::LBID_t& startLbid );
/** @brief get the DBRootExtentInfo list
*/
@ -208,9 +208,9 @@ public:
private:
DBRootExtentInfoState determineState(int colWidth,
HWM localHwm,
uint64_t dbRootTotalBlocks,
int16_t status);
HWM localHwm,
uint64_t dbRootTotalBlocks,
int16_t status);
// Select First DBRoot/segment file on a PM having no extents for fOID
int selectFirstSegFileForEmptyPM ( std::string& errMsg );
void initEmptyDBRoots(); // init ExtentList for empty DBRoots
@ -221,16 +221,16 @@ private:
Log* fLog; // logger
boost::mutex fDBRootExtTrkMutex; // mutex to access fDBRootExtentList
int fCurrentDBRootIdx; // Index into fDBRootExtentList,
// DBRoot where current extent is
// being added
// DBRoot where current extent is
// being added
std::vector<DBRootExtentInfo> fDBRootExtentList; // List of current pending
// DBRoot/extents for each DBRoot
// assigned to the local PM.
// DBRoot/extents for each DBRoot
// assigned to the local PM.
bool fEmptyOrDisabledPM; // true if PM has no extents or all
// extents are disabled
// extents are disabled
bool fEmptyPM; // true if PM has no available or
// disabled extents
bool fDisabledHWM; // Did job start with disabled HWM
// disabled extents
bool fDisabledHWM; // Did job start with disabled HWM
};
} //end of namespace

View File

@ -62,7 +62,7 @@ WErrorCodes::WErrorCodes() : fErrorCodes()
fErrorCodes[ERR_FILE_FBO_NEG] = " Specified file FBO is negative. ";
fErrorCodes[ERR_FILE_TRUNCATE] = " Error truncating db file. ";
fErrorCodes[ERR_FILE_DISK_SPACE] = "Not able to add extent; adding extent "
"would exceed max file system disk usage. ";
"would exceed max file system disk usage. ";
fErrorCodes[ERR_FILE_STAT] = " Error getting stats on db file. ";
fErrorCodes[ERR_VB_FILE_NOT_EXIST] = " Version buffer file does not exists.";
fErrorCodes[ERR_FILE_FLUSH] = " Error flushing db file. ";
@ -75,18 +75,18 @@ WErrorCodes::WErrorCodes() : fErrorCodes()
fErrorCodes[ERR_XML_PARSE] = " An XML Parsing error";
// table lock level error
fErrorCodes[ERR_TBLLOCK_LOCK_NOT_FOUND]= "Table is not locked.";
fErrorCodes[ERR_TBLLOCK_LOCK_NOT_FOUND] = "Table is not locked.";
fErrorCodes[ERR_TBLLOCK_GET_LOCK] = "Error getting table lock.";
fErrorCodes[ERR_TBLLOCK_GET_LOCK_LOCKED]="Table locked by another user.";
fErrorCodes[ERR_TBLLOCK_GET_LOCK_LOCKED] = "Table locked by another user.";
fErrorCodes[ERR_TBLLOCK_RELEASE_LOCK] = "Error releasing table lock.";
fErrorCodes[ERR_TBLLOCK_CHANGE_STATE] = "Error changing table lock state.";
fErrorCodes[ERR_TBLLOCK_GET_INFO] = "Error getting table lock info.";
fErrorCodes[ERR_TBLLOCK_LOCKID_CONFLICT]="Table LockID for different table than expected.";
fErrorCodes[ERR_TBLLOCK_LOCKID_CONFLICT] = "Table LockID for different table than expected.";
// DDL/DML Interface level error
fErrorCodes[ERR_STRUCT_VALUE_NOT_MATCH] = " the number of structs does not match with the number of value sets";
fErrorCodes[ERR_ROWID_VALUE_NOT_MATCH] = " the number of rowids does not match with the number of values";
fErrorCodes[ERR_TBL_SYSCAT_ERROR] = "Error occured when querying systemcatalog.";
fErrorCodes[ERR_TBL_SYSCAT_ERROR] = "Error occured when querying systemcatalog.";
// index error
fErrorCodes[ERR_IDX_TREE_MOVE_ENTRY] = " an error in moving part of an index tree to a new subblock";
@ -98,7 +98,7 @@ WErrorCodes::WErrorCodes() : fErrorCodes()
//index list error
fErrorCodes[ERR_IDX_LIST_INVALID_ADDHDR] = " a Create indexlist header error";
fErrorCodes[ERR_IDX_LIST_INVALID_UPDATE] = " an pdate Index List error ";
fErrorCodes[ERR_IDX_LIST_INVALID_DELETE] = " a Delete rowid in indexlist err";
fErrorCodes[ERR_IDX_LIST_INVALID_DELETE] = " a Delete rowid in indexlist err";
fErrorCodes[ERR_IDX_LIST_INVALID_KEY] = " an Invalid index listbppseeder.cpp Key passed";
fErrorCodes[ERR_IDX_LIST_GET_RID_ARRARY] = " an index list RID array";
fErrorCodes[ERR_IDX_LIST_WRONG_KEY ] = " a not matched Key passed to an index list";
@ -132,7 +132,7 @@ WErrorCodes::WErrorCodes() : fErrorCodes()
fErrorCodes[ERR_FM_BAD_TYPE] = "an invalid type that must be pointer or list";
fErrorCodes[ERR_FM_NO_SPACE] = " that No blocks are available";
fErrorCodes[ERR_FM_EXTEND] = " while extending a file";
// Dictionary error
fErrorCodes[ERR_DICT_NO_SPACE_INSERT] = " no space for a dictionary insert";
fErrorCodes[ERR_DICT_SIZE_GT_2G] = " the dictionary size was > 2GB";
@ -140,7 +140,7 @@ WErrorCodes::WErrorCodes() : fErrorCodes()
fErrorCodes[ERR_DICT_NO_OFFSET_DELETE] = " a dictionary bad Delete offset";
fErrorCodes[ERR_DICT_INVALID_HDR] = " a dictionary bad Delete Hdr";
fErrorCodes[ERR_DICT_ZERO_LEN] = " a dictionary zero len";
fErrorCodes[ERR_DICT_TOKEN_NOT_FOUND] = " a dictionary token not found";
fErrorCodes[ERR_DICT_TOKEN_NOT_FOUND] = " a dictionary token not found";
fErrorCodes[ERR_DICT_FILE_NOT_FOUND] = " a dictionary file not found";
fErrorCodes[ERR_DICT_BAD_TOKEN_LBID] = " a dictionary token lbid is bad";
fErrorCodes[ERR_DICT_BAD_TOKEN_OP] = " a dictionary token op is bad";
@ -196,10 +196,10 @@ WErrorCodes::WErrorCodes() : fErrorCodes()
fErrorCodes[ERR_BRM_HWMS_NOT_EQUAL] = " HWMs for same width columns not equal. ";
fErrorCodes[ERR_BRM_HWMS_OUT_OF_SYNC] = " HWMs for different width columns not in sync. ";
fErrorCodes[ERR_BRM_DBROOT_HWMS] = " BRM error getting HWMs for DBRoots. ";
fErrorCodes[ERR_BRM_NETWORK] = " Network error in DBRM call. ";
fErrorCodes[ERR_BRM_READONLY] = " DBRM is read only. ";
fErrorCodes[ERR_INVALID_VBOID] = " The VB oid is invalid ";
fErrorCodes[ERR_BRM_SET_EXTENTS_CP] = " BRM error setting extents min/max ";
fErrorCodes[ERR_BRM_NETWORK] = " Network error in DBRM call. ";
fErrorCodes[ERR_BRM_READONLY] = " DBRM is read only. ";
fErrorCodes[ERR_INVALID_VBOID] = " The VB oid is invalid ";
fErrorCodes[ERR_BRM_SET_EXTENTS_CP] = " BRM error setting extents min/max ";
fErrorCodes[ERR_BRM_SHUTDOWN] = " The system is being shutdown ";
fErrorCodes[ERR_BRM_GET_SHUTDOWN] = " BRM error get the system shutdown flag ";
fErrorCodes[ERR_BRM_SUSPEND] = " The system is in write suspended mode";
@ -209,13 +209,13 @@ WErrorCodes::WErrorCodes() : fErrorCodes()
// DM error
fErrorCodes[ERR_DM_CONVERT_OID] = " a DM Conversion error";
// Cache error
// Cache error
fErrorCodes[ERR_CACHE_KEY_EXIST ] = " a Cache key exists";
fErrorCodes[ERR_CACHE_KEY_NOT_EXIST] = " a Cache key does not exist";
fErrorCodes[ERR_NULL_BLOCK] = " a Block is NULL";
fErrorCodes[ERR_FREE_LIST_EMPTY] = " a Free list is empty";
// Compression error
// Compression error
fErrorCodes[ERR_COMP_COMPRESS] = " Error in compressing data. ";
fErrorCodes[ERR_COMP_UNCOMPRESS] = " Error in uncompressing data. ";
fErrorCodes[ERR_COMP_PARSE_HDRS] = " Error parsing compression headers. ";
@ -238,9 +238,9 @@ WErrorCodes::WErrorCodes() : fErrorCodes()
// Auto-increment error
fErrorCodes[ERR_AUTOINC_GEN_EXCEED_MAX] = " Generated auto-increment value "
"exceeds maximum value for the column type.";
"exceeds maximum value for the column type.";
fErrorCodes[ERR_AUTOINC_USER_OUT_OF_RANGE] = " User specified auto-"
"increment value is out of range for the column type.";
"increment value is out of range for the column type.";
fErrorCodes[ERR_AUTOINC_TABLE_NAME] = " Invalid schema/tablename for auto increment. ";
fErrorCodes[ERR_AUTOINC_INIT1] = " Unable to initialize auto-increment value. ";
fErrorCodes[ERR_AUTOINC_INIT2] = " Unable to initialize auto-increment value. Unknown exception. ";
@ -258,9 +258,9 @@ WErrorCodes::WErrorCodes() : fErrorCodes()
fErrorCodes[ERR_METADATABKUP_FILE_RENAME] = " Unable to rename temporary bulk meta data file. ";
fErrorCodes[ERR_METADATABKUP_COMP_PARSE_HDRS] = " Error parsing compression headers in bulk backup file. ";
fErrorCodes[ERR_METADATABKUP_COMP_VERIFY_HDRS] = " Error verifying compression headers in bulk backup file. ";
fErrorCodes[ERR_METADATABKUP_COMP_CHUNK_NOT_FOUND]= " Error searching for compressed chunk in db file being backed up. ";
fErrorCodes[ERR_METADATABKUP_COMP_CHUNK_NOT_FOUND] = " Error searching for compressed chunk in db file being backed up. ";
fErrorCodes[ERR_METADATABKUP_COMP_OPEN_BULK_BKUP] = " Error opening compressed chunk in bulk backup file. ";
fErrorCodes[ERR_METADATABKUP_COMP_WRITE_BULK_BKUP]= " Error writing compressed chunk to bulk backup file. ";
fErrorCodes[ERR_METADATABKUP_COMP_WRITE_BULK_BKUP] = " Error writing compressed chunk to bulk backup file. ";
fErrorCodes[ERR_METADATABKUP_COMP_READ_BULK_BKUP] = " Error reading compressed chunk from bulk backup file. ";
fErrorCodes[ERR_METADATABKUP_COMP_RENAME] = " Unable to rename compressed chunk bulk backup file. ";
}
@ -276,12 +276,13 @@ std::string WErrorCodes::errorString(int code)
std::string msgArg; // empty str arg; no extra info in this context
args.add( msgArg );
return logging::IDBErrorInfo::instance()->errorMsg(
logging::ERR_EXTENT_DISK_SPACE, args);
logging::ERR_EXTENT_DISK_SPACE, args);
break;
}
}
int brmRc = BRMWrapper::getBrmRc();
if (brmRc == BRM::ERR_OK)
return (fErrorCodes[code]);

View File

@ -36,343 +36,343 @@
/** Namespace WriteEngine */
namespace WriteEngine
{
const short MAX_COLUMN_BOUNDARY = 8; // Max bytes for one column
const int MAX_SIGNATURE_SIZE = 8000; // Max len of dict sig val
const int MAX_FIELD_SIZE = 1000; // Max len non-dict fld val
const int MAX_DB_DIR_LEVEL = 6; // Max lvl of db dir struct
const int MAX_DB_DIR_NAME_SIZE = 20; // Max len of db dir size
const short ROW_PER_BYTE = 8; // Rows/byte in bitmap file
const int BYTE_PER_BLOCK = 8192; // Num bytes per data block
const int BYTE_PER_SUBBLOCK = 256; // Num bytes per sub block
const int ENTRY_PER_SUBBLOCK = 32; // Num entries per sub block
const int INITIAL_EXTENT_ROWS_TO_DISK = 256 * 1024;
// Num rows reserved to disk for 'initial' extent
const int FILE_NAME_SIZE = 200; // Max size of file name
const long long MAX_ALLOW_ERROR_COUNT = 100000; //Max allowable error count
const short MAX_COLUMN_BOUNDARY = 8; // Max bytes for one column
const int MAX_SIGNATURE_SIZE = 8000; // Max len of dict sig val
const int MAX_FIELD_SIZE = 1000; // Max len non-dict fld val
const int MAX_DB_DIR_LEVEL = 6; // Max lvl of db dir struct
const int MAX_DB_DIR_NAME_SIZE = 20; // Max len of db dir size
const short ROW_PER_BYTE = 8; // Rows/byte in bitmap file
const int BYTE_PER_BLOCK = 8192; // Num bytes per data block
const int BYTE_PER_SUBBLOCK = 256; // Num bytes per sub block
const int ENTRY_PER_SUBBLOCK = 32; // Num entries per sub block
const int INITIAL_EXTENT_ROWS_TO_DISK = 256 * 1024;
// Num rows reserved to disk for 'initial' extent
const int FILE_NAME_SIZE = 200; // Max size of file name
const long long MAX_ALLOW_ERROR_COUNT = 100000; //Max allowable error count
//--------------------------------------------------------------------------
// Dictionary related constants
//--------------------------------------------------------------------------
const uint16_t DCTNRY_END_HEADER = 0xffff ; // end of header
const uint64_t NOT_USED_PTR = 0x0 ; // not continuous ptr
const int HDR_UNIT_SIZE = 2; // hdr unit size
const int NEXT_PTR_BYTES = 8; // const ptr size
const int MAX_OP_COUNT = 1024; // op max size
const int DCTNRY_HEADER_SIZE = 14; // header total size
const int MAX_STRING_CACHE_SIZE = 1000;
// End of Dictionary related constants
//--------------------------------------------------------------------------
// Dictionary related constants
//--------------------------------------------------------------------------
const uint16_t DCTNRY_END_HEADER = 0xffff ; // end of header
const uint64_t NOT_USED_PTR = 0x0 ; // not continuous ptr
const int HDR_UNIT_SIZE = 2; // hdr unit size
const int NEXT_PTR_BYTES = 8; // const ptr size
const int MAX_OP_COUNT = 1024; // op max size
const int DCTNRY_HEADER_SIZE = 14; // header total size
const int MAX_STRING_CACHE_SIZE = 1000;
// End of Dictionary related constants
const int COLPOSPAIR_NULL_TOKEN_OFFSET= -1; // offset value denoting a null token
const uint32_t BULK_SYSCAT_SESSION_ID = 0; // SessionID for syscat queries
const int COLPOSPAIR_NULL_TOKEN_OFFSET = -1; // offset value denoting a null token
const uint32_t BULK_SYSCAT_SESSION_ID = 0; // SessionID for syscat queries
const char COL_TYPE_DICT = 'D'; // Dictionary type
const char COL_TYPE_DICT = 'D'; // Dictionary type
const uint64_t INVALID_LBID = 0xFFFFFFFFFULL; // 2**36 - 1
const uint64_t INVALID_LBID = 0xFFFFFFFFFULL; // 2**36 - 1
const unsigned int SUBSYSTEM_ID_DDLPROC = 15;
const unsigned int SUBSYSTEM_ID_DMLPROC = 20;
const unsigned int SUBSYSTEM_ID_WE = 19;
const unsigned int SUBSYSTEM_ID_WE_SRV = 32;
const unsigned int SUBSYSTEM_ID_WE_SPLIT= 33;
const unsigned int SUBSYSTEM_ID_WE_BULK = 34;
const unsigned int SUBSYSTEM_ID_DDLPROC = 15;
const unsigned int SUBSYSTEM_ID_DMLPROC = 20;
const unsigned int SUBSYSTEM_ID_WE = 19;
const unsigned int SUBSYSTEM_ID_WE_SRV = 32;
const unsigned int SUBSYSTEM_ID_WE_SPLIT = 33;
const unsigned int SUBSYSTEM_ID_WE_BULK = 34;
//--------------------------------------------------------------------------
// Default definitions
//--------------------------------------------------------------------------
const int DEFAULT_CACHE_BLOCK = 256; // Max num of cache blocks
const int DEFAULT_CHK_INTERVAL = 3; // Checkpoint in seconds
const int DEFAULT_CACHE_PCT_FREE = 25; // Min % of free cache
const int DEFAULT_BUFSIZ = 1*1024*1024; // setvbuf buffer size
const int DEFAULT_COLSIZ = 8; // col size for hdfs rdwr buf
//--------------------------------------------------------------------------
// Default definitions
//--------------------------------------------------------------------------
const int DEFAULT_CACHE_BLOCK = 256; // Max num of cache blocks
const int DEFAULT_CHK_INTERVAL = 3; // Checkpoint in seconds
const int DEFAULT_CACHE_PCT_FREE = 25; // Min % of free cache
const int DEFAULT_BUFSIZ = 1 * 1024 * 1024; // setvbuf buffer size
const int DEFAULT_COLSIZ = 8; // col size for hdfs rdwr buf
const int BLK_INIT = 0;
const int BLK_READ = 1;
const int BLK_WRITE = 2;
const int BLK_INIT = 0;
const int BLK_READ = 1;
const int BLK_WRITE = 2;
//--------------------------------------------------------------------------
// Return code definitions
//--------------------------------------------------------------------------
const int NO_ERROR = 0; // No error
const int NOT_FOUND = -1; // Not found
const int INVALID_NUM = -1; // Invalid number
//--------------------------------------------------------------------------
// Return code definitions
//--------------------------------------------------------------------------
const int NO_ERROR = 0; // No error
const int NOT_FOUND = -1; // Not found
const int INVALID_NUM = -1; // Invalid number
//--------------------------------------------------------------------------
// Error code definition
//--------------------------------------------------------------------------
const int ERR_CODEBASE = 1000; // Generic error codes
const int ERR_FILEBASE = 1050; // File-related error codes
const int ERR_XMLBASE = 1150; // XML job file error codes
const int ERR_TBLLOCKBASE = 1200; // Table-lock error codes
const int ERR_WRAPPERBASE = 1250; // DDL/DML API related errors
const int ERR_INDEXBASE = 1300; // Index-related error codes
const int ERR_FMGRBASE = 1350; // Freemgr errors
const int ERR_DCTNRYBASE = 1400; // Dictionary errors
const int ERR_BULKBASE = 1450; // Bulk specific errors
const int ERR_BRMBASE = 1500; // BRM errors
const int ERR_DMBASE = 1550; // Disk manager errors
const int ERR_CACHEBASE = 1600; // Cche management errors
const int ERR_COMPBASE = 1650; // Compression errors
const int ERR_AUTOINCBASE = 1700; // Auto-increment errors
const int ERR_BLKCACHEBASE = 1750; // Block cache flush errors
const int ERR_METABKUPBASE = 1800; // Backup bulk meta file errors
//--------------------------------------------------------------------------
// Error code definition
//--------------------------------------------------------------------------
const int ERR_CODEBASE = 1000; // Generic error codes
const int ERR_FILEBASE = 1050; // File-related error codes
const int ERR_XMLBASE = 1150; // XML job file error codes
const int ERR_TBLLOCKBASE = 1200; // Table-lock error codes
const int ERR_WRAPPERBASE = 1250; // DDL/DML API related errors
const int ERR_INDEXBASE = 1300; // Index-related error codes
const int ERR_FMGRBASE = 1350; // Freemgr errors
const int ERR_DCTNRYBASE = 1400; // Dictionary errors
const int ERR_BULKBASE = 1450; // Bulk specific errors
const int ERR_BRMBASE = 1500; // BRM errors
const int ERR_DMBASE = 1550; // Disk manager errors
const int ERR_CACHEBASE = 1600; // Cche management errors
const int ERR_COMPBASE = 1650; // Compression errors
const int ERR_AUTOINCBASE = 1700; // Auto-increment errors
const int ERR_BLKCACHEBASE = 1750; // Block cache flush errors
const int ERR_METABKUPBASE = 1800; // Backup bulk meta file errors
//--------------------------------------------------------------------------
// Generic error
//--------------------------------------------------------------------------
const int ERR_UNKNOWN = ERR_CODEBASE + 1; // Generic error
const int ERR_INVALID_PARAM = ERR_CODEBASE + 2; // Invalid parms
const int ERR_STRUCT_EMPTY = ERR_CODEBASE + 3; // Struct is empty
const int ERR_VALUE_OUTOFRANGE = ERR_CODEBASE + 4; // Val out of range
const int ERR_PARSING = ERR_CODEBASE + 5; // Parsing error
const int ERR_NO_MEM = ERR_CODEBASE + 6; // Mem alloc error
const int ERR_DML_LOG_NAME = ERR_CODEBASE + 7; // DML log filename error
const int ERR_OPEN_DML_LOG = ERR_CODEBASE + 8; // Open DML log file error
const int ERR_HDFS_BACKUP = ERR_CODEBASE + 9; // HDFS backup error
//--------------------------------------------------------------------------
// Generic error
//--------------------------------------------------------------------------
const int ERR_UNKNOWN = ERR_CODEBASE + 1; // Generic error
const int ERR_INVALID_PARAM = ERR_CODEBASE + 2; // Invalid parms
const int ERR_STRUCT_EMPTY = ERR_CODEBASE + 3; // Struct is empty
const int ERR_VALUE_OUTOFRANGE = ERR_CODEBASE + 4; // Val out of range
const int ERR_PARSING = ERR_CODEBASE + 5; // Parsing error
const int ERR_NO_MEM = ERR_CODEBASE + 6; // Mem alloc error
const int ERR_DML_LOG_NAME = ERR_CODEBASE + 7; // DML log filename error
const int ERR_OPEN_DML_LOG = ERR_CODEBASE + 8; // Open DML log file error
const int ERR_HDFS_BACKUP = ERR_CODEBASE + 9; // HDFS backup error
//--------------------------------------------------------------------------
// File level error
//--------------------------------------------------------------------------
const int ERR_FILE_CREATE = ERR_FILEBASE + 1; // File creation error, mostly because file has already existed
const int ERR_FILE_OPEN = ERR_FILEBASE + 2; // Can not open the file, mostly because file not found
const int ERR_FILE_DELETE = ERR_FILEBASE + 3; // Can not delete the file, common reason is file not exist
const int ERR_FILE_EXIST = ERR_FILEBASE + 4; // File alreay exists
const int ERR_FILE_NOT_EXIST = ERR_FILEBASE + 5; // File not exists
const int ERR_FILE_NULL = ERR_FILEBASE + 6; // File is empty
const int ERR_FILE_WRITE = ERR_FILEBASE + 7; // Error writing to a DB file
const int ERR_FILE_READ = ERR_FILEBASE + 8; // Error reading from a DB file
const int ERR_FILE_SEEK = ERR_FILEBASE + 9; // Error in positioning file handle
const int ERR_FILE_READ_IMPORT = ERR_FILEBASE + 10;// Error reading import source file
const int ERR_DIR_CREATE = ERR_FILEBASE + 11;// Error in creating directory
const int ERR_FILE_NEW_EXTENT_FBO = ERR_FILEBASE + 12;// New extent fbo too large
const int ERR_FILE_FBO_NEG = ERR_FILEBASE + 13;// File FBO is negative
const int ERR_FILE_TRUNCATE = ERR_FILEBASE + 14;// Error truncating file
const int ERR_FILE_DISK_SPACE = ERR_FILEBASE + 15;// Out of space on file system
const int ERR_FILE_STAT = ERR_FILEBASE + 16;// Error getting stats on file
const int ERR_VB_FILE_NOT_EXIST = ERR_FILEBASE + 17;// Version buffer file not exists
const int ERR_FILE_FLUSH = ERR_FILEBASE + 18;// Error flushing file
const int ERR_FILE_GLOBBING = ERR_FILEBASE + 19;// Error globbing a file name
//--------------------------------------------------------------------------
// File level error
//--------------------------------------------------------------------------
const int ERR_FILE_CREATE = ERR_FILEBASE + 1; // File creation error, mostly because file has already existed
const int ERR_FILE_OPEN = ERR_FILEBASE + 2; // Can not open the file, mostly because file not found
const int ERR_FILE_DELETE = ERR_FILEBASE + 3; // Can not delete the file, common reason is file not exist
const int ERR_FILE_EXIST = ERR_FILEBASE + 4; // File alreay exists
const int ERR_FILE_NOT_EXIST = ERR_FILEBASE + 5; // File not exists
const int ERR_FILE_NULL = ERR_FILEBASE + 6; // File is empty
const int ERR_FILE_WRITE = ERR_FILEBASE + 7; // Error writing to a DB file
const int ERR_FILE_READ = ERR_FILEBASE + 8; // Error reading from a DB file
const int ERR_FILE_SEEK = ERR_FILEBASE + 9; // Error in positioning file handle
const int ERR_FILE_READ_IMPORT = ERR_FILEBASE + 10;// Error reading import source file
const int ERR_DIR_CREATE = ERR_FILEBASE + 11;// Error in creating directory
const int ERR_FILE_NEW_EXTENT_FBO = ERR_FILEBASE + 12;// New extent fbo too large
const int ERR_FILE_FBO_NEG = ERR_FILEBASE + 13;// File FBO is negative
const int ERR_FILE_TRUNCATE = ERR_FILEBASE + 14;// Error truncating file
const int ERR_FILE_DISK_SPACE = ERR_FILEBASE + 15;// Out of space on file system
const int ERR_FILE_STAT = ERR_FILEBASE + 16;// Error getting stats on file
const int ERR_VB_FILE_NOT_EXIST = ERR_FILEBASE + 17;// Version buffer file not exists
const int ERR_FILE_FLUSH = ERR_FILEBASE + 18;// Error flushing file
const int ERR_FILE_GLOBBING = ERR_FILEBASE + 19;// Error globbing a file name
//--------------------------------------------------------------------------
// XML level error
//--------------------------------------------------------------------------
const int ERR_XML_FILE = ERR_XMLBASE + 1; // File error, probably because file does not exist
const int ERR_XML_ROOT_ELEM = ERR_XMLBASE + 2; // Root element err
const int ERR_XML_EMPTY = ERR_XMLBASE + 3; // Empty XML file
const int ERR_XML_PARSE = ERR_XMLBASE + 4; // Parsing error
//--------------------------------------------------------------------------
// XML level error
//--------------------------------------------------------------------------
const int ERR_XML_FILE = ERR_XMLBASE + 1; // File error, probably because file does not exist
const int ERR_XML_ROOT_ELEM = ERR_XMLBASE + 2; // Root element err
const int ERR_XML_EMPTY = ERR_XMLBASE + 3; // Empty XML file
const int ERR_XML_PARSE = ERR_XMLBASE + 4; // Parsing error
//--------------------------------------------------------------------------
// table lock level error
//--------------------------------------------------------------------------
const int ERR_TBLLOCK_LOCK_NOT_FOUND = ERR_TBLLOCKBASE + 1; // table has no lock
const int ERR_TBLLOCK_GET_LOCK = ERR_TBLLOCKBASE + 2; // error acquiring a table lock
const int ERR_TBLLOCK_GET_LOCK_LOCKED= ERR_TBLLOCKBASE + 3; // table currently locked
const int ERR_TBLLOCK_RELEASE_LOCK = ERR_TBLLOCKBASE + 4; // error releasing a table lock
const int ERR_TBLLOCK_CHANGE_STATE = ERR_TBLLOCKBASE + 5; // error changing state of lock
const int ERR_TBLLOCK_GET_INFO = ERR_TBLLOCKBASE + 6; // error getting info about a lock
const int ERR_TBLLOCK_LOCKID_CONFLICT= ERR_TBLLOCKBASE + 7; // lockID for different table than expected
//--------------------------------------------------------------------------
// table lock level error
//--------------------------------------------------------------------------
const int ERR_TBLLOCK_LOCK_NOT_FOUND = ERR_TBLLOCKBASE + 1; // table has no lock
const int ERR_TBLLOCK_GET_LOCK = ERR_TBLLOCKBASE + 2; // error acquiring a table lock
const int ERR_TBLLOCK_GET_LOCK_LOCKED = ERR_TBLLOCKBASE + 3; // table currently locked
const int ERR_TBLLOCK_RELEASE_LOCK = ERR_TBLLOCKBASE + 4; // error releasing a table lock
const int ERR_TBLLOCK_CHANGE_STATE = ERR_TBLLOCKBASE + 5; // error changing state of lock
const int ERR_TBLLOCK_GET_INFO = ERR_TBLLOCKBASE + 6; // error getting info about a lock
const int ERR_TBLLOCK_LOCKID_CONFLICT = ERR_TBLLOCKBASE + 7; // lockID for different table than expected
//--------------------------------------------------------------------------
// DDL/DML Interface level error
//--------------------------------------------------------------------------
const int ERR_STRUCT_VALUE_NOT_MATCH = ERR_WRAPPERBASE + 1; // The number of struct not match with the number of value set
const int ERR_ROWID_VALUE_NOT_MATCH = ERR_WRAPPERBASE + 2; // The number of rowid not match with the number of values
const int ERR_TBL_SYSCAT_ERROR = ERR_WRAPPERBASE + 3; /** @brief Syscatalog query error */
//--------------------------------------------------------------------------
// DDL/DML Interface level error
//--------------------------------------------------------------------------
const int ERR_STRUCT_VALUE_NOT_MATCH = ERR_WRAPPERBASE + 1; // The number of struct not match with the number of value set
const int ERR_ROWID_VALUE_NOT_MATCH = ERR_WRAPPERBASE + 2; // The number of rowid not match with the number of values
const int ERR_TBL_SYSCAT_ERROR = ERR_WRAPPERBASE + 3; /** @brief Syscatalog query error */
//--------------------------------------------------------------------------
// index error
//--------------------------------------------------------------------------
const int ERR_IDX_TREE_MOVE_ENTRY = ERR_INDEXBASE + 1; // The error in move part of tree to a new subblock
const int ERR_IDX_TREE_INVALID_TYPE = ERR_INDEXBASE + 2; // Invalid tree entry type
const int ERR_IDX_TREE_BITTEST_VAL = ERR_INDEXBASE + 3; // Wrong bit test value in the entry
const int ERR_IDX_TREE_INVALID_LEVEL = ERR_INDEXBASE + 4; // Invalid testbit treel level
const int ERR_IDX_TREE_INVALID_GRP = ERR_INDEXBASE + 5; // Invalid group type
const int ERR_IDX_TREE_LISTPTR_CHANGE = ERR_INDEXBASE + 6; // List pointer change
//index list error
const int ERR_IDX_LIST_INVALID_ADDHDR = ERR_INDEXBASE + 7; // Create indexlist header error
const int ERR_IDX_LIST_INVALID_UPDATE = ERR_INDEXBASE + 8; // Update Index List error
const int ERR_IDX_LIST_INVALID_DELETE = ERR_INDEXBASE + 9; // Delete rowid in indexlist err*/
const int ERR_IDX_LIST_INVALID_KEY = ERR_INDEXBASE + 10;// Invalid Key passed
const int ERR_IDX_LIST_GET_RID_ARRARY = ERR_INDEXBASE + 11;// RID array
const int ERR_IDX_LIST_WRONG_KEY = ERR_INDEXBASE + 12;// not matched Key passed
const int ERR_IDX_LIST_HDR_EMPTY = ERR_INDEXBASE + 13;// Delete rowid in indexlist err
const int ERR_IDX_LIST_GET_SEGMT = ERR_INDEXBASE + 14;// Get Segment
const int ERR_IDX_LIST_WRONG_LBID_WRITE=ERR_INDEXBASE + 15;
const int ERR_IDX_LIST_UPDATE_SUB = ERR_INDEXBASE + 16;
const int ERR_IDX_LIST_UPDATE_NARRAY = ERR_INDEXBASE + 17;
const int ERR_IDX_LIST_LAST_FBO_NEG = ERR_INDEXBASE + 18;
const int ERR_IDX_LIST_INIT_NEW_BLKS = ERR_INDEXBASE + 19;
const int ERR_IDX_LIST_INIT_LINK_BLKS = ERR_INDEXBASE + 20;
const int ERR_IDX_LIST_UPDATE_COUNT = ERR_INDEXBASE + 21;
const int ERR_IDX_LIST_SET_NEXT_LBID = ERR_INDEXBASE + 22;
const int ERR_IDX_LIST_INVALID_LBID = ERR_INDEXBASE + 23;
const int ERR_IDX_LIST_INVALID_BLK_READ=ERR_INDEXBASE + 24;
const int ERR_IDX_LIST_UPDATE_HDR_COUNT=ERR_INDEXBASE + 25;
const int ERR_IDX_LIST_WRONG_BLK = ERR_INDEXBASE + 26;
const int ERR_IDX_LIST_WRONG_TYPE = ERR_INDEXBASE + 27;
const int ERR_IDX_LIST_GET_COUNT = ERR_INDEXBASE + 28;
const int ERR_IDX_LIST_GET_NEXT = ERR_INDEXBASE + 29;
const int ERR_IDX_LIST_GET_PARENT = ERR_INDEXBASE + 30;
const int ERR_IDX_LIST_GET_SUB_BLK = ERR_INDEXBASE + 31;
const int ERR_IDX_LIST_INVALID_UP_HDR = ERR_INDEXBASE + 32;// Update Index List error
const int ERR_IDX_LIST_INVALID_ADD_LIST=ERR_INDEXBASE + 33;// Update Index List error
const int ERR_IDX_LIST_INVALID_UP = ERR_INDEXBASE + 34;// Update Index List error
//--------------------------------------------------------------------------
// index error
//--------------------------------------------------------------------------
const int ERR_IDX_TREE_MOVE_ENTRY = ERR_INDEXBASE + 1; // The error in move part of tree to a new subblock
const int ERR_IDX_TREE_INVALID_TYPE = ERR_INDEXBASE + 2; // Invalid tree entry type
const int ERR_IDX_TREE_BITTEST_VAL = ERR_INDEXBASE + 3; // Wrong bit test value in the entry
const int ERR_IDX_TREE_INVALID_LEVEL = ERR_INDEXBASE + 4; // Invalid testbit treel level
const int ERR_IDX_TREE_INVALID_GRP = ERR_INDEXBASE + 5; // Invalid group type
const int ERR_IDX_TREE_LISTPTR_CHANGE = ERR_INDEXBASE + 6; // List pointer change
//index list error
const int ERR_IDX_LIST_INVALID_ADDHDR = ERR_INDEXBASE + 7; // Create indexlist header error
const int ERR_IDX_LIST_INVALID_UPDATE = ERR_INDEXBASE + 8; // Update Index List error
const int ERR_IDX_LIST_INVALID_DELETE = ERR_INDEXBASE + 9; // Delete rowid in indexlist err*/
const int ERR_IDX_LIST_INVALID_KEY = ERR_INDEXBASE + 10;// Invalid Key passed
const int ERR_IDX_LIST_GET_RID_ARRARY = ERR_INDEXBASE + 11;// RID array
const int ERR_IDX_LIST_WRONG_KEY = ERR_INDEXBASE + 12;// not matched Key passed
const int ERR_IDX_LIST_HDR_EMPTY = ERR_INDEXBASE + 13;// Delete rowid in indexlist err
const int ERR_IDX_LIST_GET_SEGMT = ERR_INDEXBASE + 14;// Get Segment
const int ERR_IDX_LIST_WRONG_LBID_WRITE = ERR_INDEXBASE + 15;
const int ERR_IDX_LIST_UPDATE_SUB = ERR_INDEXBASE + 16;
const int ERR_IDX_LIST_UPDATE_NARRAY = ERR_INDEXBASE + 17;
const int ERR_IDX_LIST_LAST_FBO_NEG = ERR_INDEXBASE + 18;
const int ERR_IDX_LIST_INIT_NEW_BLKS = ERR_INDEXBASE + 19;
const int ERR_IDX_LIST_INIT_LINK_BLKS = ERR_INDEXBASE + 20;
const int ERR_IDX_LIST_UPDATE_COUNT = ERR_INDEXBASE + 21;
const int ERR_IDX_LIST_SET_NEXT_LBID = ERR_INDEXBASE + 22;
const int ERR_IDX_LIST_INVALID_LBID = ERR_INDEXBASE + 23;
const int ERR_IDX_LIST_INVALID_BLK_READ = ERR_INDEXBASE + 24;
const int ERR_IDX_LIST_UPDATE_HDR_COUNT = ERR_INDEXBASE + 25;
const int ERR_IDX_LIST_WRONG_BLK = ERR_INDEXBASE + 26;
const int ERR_IDX_LIST_WRONG_TYPE = ERR_INDEXBASE + 27;
const int ERR_IDX_LIST_GET_COUNT = ERR_INDEXBASE + 28;
const int ERR_IDX_LIST_GET_NEXT = ERR_INDEXBASE + 29;
const int ERR_IDX_LIST_GET_PARENT = ERR_INDEXBASE + 30;
const int ERR_IDX_LIST_GET_SUB_BLK = ERR_INDEXBASE + 31;
const int ERR_IDX_LIST_INVALID_UP_HDR = ERR_INDEXBASE + 32;// Update Index List error
const int ERR_IDX_LIST_INVALID_ADD_LIST = ERR_INDEXBASE + 33; // Update Index List error
const int ERR_IDX_LIST_INVALID_UP = ERR_INDEXBASE + 34;// Update Index List error
//--------------------------------------------------------------------------
// freemgr error
//--------------------------------------------------------------------------
const int ERR_FM_ASSIGN_ERR = ERR_FMGRBASE + 1; // General assignment error
const int ERR_FM_RELEASE_ERR = ERR_FMGRBASE + 2; // General release error
const int ERR_FM_BAD_FBO = ERR_FMGRBASE + 3; // File Block Offset err
const int ERR_FM_BAD_TYPE = ERR_FMGRBASE + 4; // type must be pointer or list
const int ERR_FM_NO_SPACE = ERR_FMGRBASE + 5; // No blocks available
const int ERR_FM_EXTEND = ERR_FMGRBASE + 6; // Error extending file
//--------------------------------------------------------------------------
// Dictionary error
//--------------------------------------------------------------------------
const int ERR_DICT_NO_SPACE_INSERT= ERR_DCTNRYBASE+ 1; // ins no space
const int ERR_DICT_SIZE_GT_2G = ERR_DCTNRYBASE+ 2; // ins size >8000
const int ERR_DICT_NO_OP_DELETE = ERR_DCTNRYBASE+ 3; // del no op
const int ERR_DICT_NO_OFFSET_DELETE=ERR_DCTNRYBASE+ 4; // del bad offset
const int ERR_DICT_INVALID_HDR = ERR_DCTNRYBASE+ 5; // Delete Hdr
const int ERR_DICT_ZERO_LEN = ERR_DCTNRYBASE+ 6; // Delete zero len
const int ERR_DICT_TOKEN_NOT_FOUND= ERR_DCTNRYBASE+ 7; // token not found
const int ERR_DICT_FILE_NOT_FOUND = ERR_DCTNRYBASE+ 8; // dict file not found
const int ERR_DICT_BAD_TOKEN_LBID = ERR_DCTNRYBASE+ 9; // bad token lbid
const int ERR_DICT_BAD_TOKEN_OP = ERR_DCTNRYBASE+ 10;// token op is bad
//--------------------------------------------------------------------------
// freemgr error
//--------------------------------------------------------------------------
const int ERR_FM_ASSIGN_ERR = ERR_FMGRBASE + 1; // General assignment error
const int ERR_FM_RELEASE_ERR = ERR_FMGRBASE + 2; // General release error
const int ERR_FM_BAD_FBO = ERR_FMGRBASE + 3; // File Block Offset err
const int ERR_FM_BAD_TYPE = ERR_FMGRBASE + 4; // type must be pointer or list
const int ERR_FM_NO_SPACE = ERR_FMGRBASE + 5; // No blocks available
const int ERR_FM_EXTEND = ERR_FMGRBASE + 6; // Error extending file
//--------------------------------------------------------------------------
// Bulk error
//--------------------------------------------------------------------------
const int ERR_BULK_MAX_ERR_NUM = ERR_BULKBASE + 1; // Maximum number of error rows reached
const int ERR_BULK_DATA_COL_NUM = ERR_BULKBASE + 2; // The total number of data column not match with column definitions
const int ERR_BULK_SEND_MSG_ERR = ERR_BULKBASE + 3; // send msg to primproc to flush cache
const int ERR_BULK_MISSING_EXTENT_ENTRY=ERR_BULKBASE + 4; // Missing Extent Entry when trying to save LBID info
const int ERR_BULK_MISSING_EXTENT_ROW = ERR_BULKBASE + 5; // Missing Extent Row when trying to save LBID info
const int ERR_BULK_ROW_FILL_BUFFER = ERR_BULKBASE + 6; // Single row fills read buffer
const int ERR_BULK_DBROOT_CHANGE = ERR_BULKBASE + 7; // Local DBRoot settings changed during an import
const int ERR_BULK_ROLLBACK_MISS_ROOT = ERR_BULKBASE + 8; // Mode3 automatic rollback skipped with missing DBRoot
const int ERR_BULK_ROLLBACK_SEG_LIST = ERR_BULKBASE + 9; // Error building segment file list in a directory
const int ERR_BULK_BINARY_PARTIAL_REC = ERR_BULKBASE + 10;// Binary input did not end on fixed length record boundary
const int ERR_BULK_BINARY_IGNORE_FLD = ERR_BULKBASE + 11;// <IgnoreField> tag not supported for binary import
//--------------------------------------------------------------------------
// Dictionary error
//--------------------------------------------------------------------------
const int ERR_DICT_NO_SPACE_INSERT = ERR_DCTNRYBASE + 1; // ins no space
const int ERR_DICT_SIZE_GT_2G = ERR_DCTNRYBASE + 2; // ins size >8000
const int ERR_DICT_NO_OP_DELETE = ERR_DCTNRYBASE + 3; // del no op
const int ERR_DICT_NO_OFFSET_DELETE = ERR_DCTNRYBASE + 4; // del bad offset
const int ERR_DICT_INVALID_HDR = ERR_DCTNRYBASE + 5; // Delete Hdr
const int ERR_DICT_ZERO_LEN = ERR_DCTNRYBASE + 6; // Delete zero len
const int ERR_DICT_TOKEN_NOT_FOUND = ERR_DCTNRYBASE + 7; // token not found
const int ERR_DICT_FILE_NOT_FOUND = ERR_DCTNRYBASE + 8; // dict file not found
const int ERR_DICT_BAD_TOKEN_LBID = ERR_DCTNRYBASE + 9; // bad token lbid
const int ERR_DICT_BAD_TOKEN_OP = ERR_DCTNRYBASE + 10; // token op is bad
//--------------------------------------------------------------------------
// BRM error
//--------------------------------------------------------------------------
const int ERR_BRM_LOOKUP_LBID = ERR_BRMBASE + 1; // Lookup LBID error
const int ERR_BRM_LOOKUP_FBO = ERR_BRMBASE + 2; // Lookup FBO error
const int ERR_BRM_ALLOC_EXTEND = ERR_BRMBASE + 3; // Allocate extent error
const int ERR_BRM_COMMIT = ERR_BRMBASE + 4; // Commit error
const int ERR_BRM_ROLLBACK = ERR_BRMBASE + 5; // Rollback error
const int ERR_BRM_GET_UNCOMM_LBID = ERR_BRMBASE + 6; // Get uncommitted lbid list error
const int ERR_BRM_DEL_OID = ERR_BRMBASE + 7; // Delete oid error
const int ERR_BRM_BEGIN_COPY = ERR_BRMBASE + 8; // Begin copy error
const int ERR_BRM_END_COPY = ERR_BRMBASE + 9; // End copy error
const int ERR_BRM_GET_HWM = ERR_BRMBASE + 10;// Get hwm error
const int ERR_BRM_SET_HWM = ERR_BRMBASE + 11;// Set hwm error
const int ERR_BRM_WR_VB_ENTRY = ERR_BRMBASE + 12;// Write VB entry error
const int ERR_BRM_VB_COPY_READ = ERR_BRMBASE + 13;// VB copy read error
const int ERR_BRM_VB_COPY_SEEK_DB = ERR_BRMBASE + 14;// VB copy seek error to DB file
const int ERR_BRM_VB_COPY_SEEK_VB = ERR_BRMBASE + 15;// VB copy seek error to VB file
const int ERR_BRM_VB_COPY_WRITE = ERR_BRMBASE + 16;// VB copy write
const int ERR_BRM_DEAD_LOCK = ERR_BRMBASE + 17;// DEAD lock error
const int ERR_BRM_MARK_INVALID = ERR_BRMBASE + 18;// Mark extent invalid error from casual paritioning
const int ERR_BRM_SAVE_STATE = ERR_BRMBASE + 19;// Save state error
const int ERR_BRM_GET_START_EXTENT= ERR_BRMBASE + 20;// Get starting Extent error
const int ERR_BRM_VB_OVERFLOW = ERR_BRMBASE + 21;// Version buffer overflow
const int ERR_BRM_READ_ONLY = ERR_BRMBASE + 22;// BRM is in READ-ONLY state
const int ERR_BRM_GET_READ_WRITE = ERR_BRMBASE + 23;// error getting BRM READ/WRITE state
const int ERR_BRM_BULK_RB_COLUMN = ERR_BRMBASE + 24;// error during column bulk rollback
const int ERR_BRM_BULK_RB_DCTNRY = ERR_BRMBASE + 25;// error during dctnry bulk rollback
const int ERR_BRM_DELETE_EXTENT_COLUMN= ERR_BRMBASE + 26;// error during delete column extents
const int ERR_BRM_DELETE_EXTENT_DCTNRY= ERR_BRMBASE + 27;// error during delete dictionary extents
const int ERR_BRM_TAKE_SNAPSHOT = ERR_BRMBASE + 28;// Taking snapshot of BRM state
const int ERR_BRM_LOOKUP_START_LBID=ERR_BRMBASE + 29;// Lookup starting LBID error
const int ERR_BRM_BULK_UPDATE = ERR_BRMBASE + 30;// Error with bulk update of HWM and CP
const int ERR_BRM_GET_EXT_STATE = ERR_BRMBASE + 31;// Error getting extent state
const int ERR_EXTENTMAP_LOOKUP = ERR_BRMBASE + 32;// Lookup extent map error
const int ERR_BRM_LOOKUP_VERSION = ERR_BRMBASE + 33;// Lookup version error
const int ERR_BRM_LOOKUP_LBID_RANGES = ERR_BRMBASE + 34;// Lookup LBID Ranges error
const int ERR_BRM_HWMS_NOT_EQUAL = ERR_BRMBASE + 35;// HWMs of same col width not equal
const int ERR_BRM_HWMS_OUT_OF_SYNC= ERR_BRMBASE + 36;// HWMs for dif col width not in sync
const int ERR_BRM_DBROOT_HWMS = ERR_BRMBASE + 37;// Error getting HWMs for each DBRoot
const int ERR_BRM_NETWORK = ERR_BRMBASE + 38;// Network error when calling BRM functions
const int ERR_BRM_READONLY = ERR_BRMBASE + 39;// DBRM is readonly
const int ERR_INVALID_VBOID = ERR_BRMBASE + 40;// returned if the given vboid is invalid
const int ERR_BRM_SET_EXTENTS_CP = ERR_BRMBASE + 41;// Error setting extents min/max
const int ERR_BRM_SHUTDOWN = ERR_BRMBASE + 42;// BRM is set to shutdown
const int ERR_BRM_GET_SHUTDOWN = ERR_BRMBASE + 43;// error getting BRM Shutdown flag
const int ERR_BRM_SUSPEND = ERR_BRMBASE + 44;// BRM is set to Suspend writes
const int ERR_BRM_GET_SUSPEND = ERR_BRMBASE + 45;// error getting BRM Suspend flag
const int ERR_BRM_BAD_STRIPE_CNT = ERR_BRMBASE + 46;// Incorrect num of cols allocated in stripe
//--------------------------------------------------------------------------
// Bulk error
//--------------------------------------------------------------------------
const int ERR_BULK_MAX_ERR_NUM = ERR_BULKBASE + 1; // Maximum number of error rows reached
const int ERR_BULK_DATA_COL_NUM = ERR_BULKBASE + 2; // The total number of data column not match with column definitions
const int ERR_BULK_SEND_MSG_ERR = ERR_BULKBASE + 3; // send msg to primproc to flush cache
const int ERR_BULK_MISSING_EXTENT_ENTRY = ERR_BULKBASE + 4; // Missing Extent Entry when trying to save LBID info
const int ERR_BULK_MISSING_EXTENT_ROW = ERR_BULKBASE + 5; // Missing Extent Row when trying to save LBID info
const int ERR_BULK_ROW_FILL_BUFFER = ERR_BULKBASE + 6; // Single row fills read buffer
const int ERR_BULK_DBROOT_CHANGE = ERR_BULKBASE + 7; // Local DBRoot settings changed during an import
const int ERR_BULK_ROLLBACK_MISS_ROOT = ERR_BULKBASE + 8; // Mode3 automatic rollback skipped with missing DBRoot
const int ERR_BULK_ROLLBACK_SEG_LIST = ERR_BULKBASE + 9; // Error building segment file list in a directory
const int ERR_BULK_BINARY_PARTIAL_REC = ERR_BULKBASE + 10;// Binary input did not end on fixed length record boundary
const int ERR_BULK_BINARY_IGNORE_FLD = ERR_BULKBASE + 11;// <IgnoreField> tag not supported for binary import
//--------------------------------------------------------------------------
// DM error
//--------------------------------------------------------------------------
const int ERR_DM_CONVERT_OID = ERR_DMBASE + 1; // Conversion error
//--------------------------------------------------------------------------
// BRM error
//--------------------------------------------------------------------------
const int ERR_BRM_LOOKUP_LBID = ERR_BRMBASE + 1; // Lookup LBID error
const int ERR_BRM_LOOKUP_FBO = ERR_BRMBASE + 2; // Lookup FBO error
const int ERR_BRM_ALLOC_EXTEND = ERR_BRMBASE + 3; // Allocate extent error
const int ERR_BRM_COMMIT = ERR_BRMBASE + 4; // Commit error
const int ERR_BRM_ROLLBACK = ERR_BRMBASE + 5; // Rollback error
const int ERR_BRM_GET_UNCOMM_LBID = ERR_BRMBASE + 6; // Get uncommitted lbid list error
const int ERR_BRM_DEL_OID = ERR_BRMBASE + 7; // Delete oid error
const int ERR_BRM_BEGIN_COPY = ERR_BRMBASE + 8; // Begin copy error
const int ERR_BRM_END_COPY = ERR_BRMBASE + 9; // End copy error
const int ERR_BRM_GET_HWM = ERR_BRMBASE + 10;// Get hwm error
const int ERR_BRM_SET_HWM = ERR_BRMBASE + 11;// Set hwm error
const int ERR_BRM_WR_VB_ENTRY = ERR_BRMBASE + 12;// Write VB entry error
const int ERR_BRM_VB_COPY_READ = ERR_BRMBASE + 13;// VB copy read error
const int ERR_BRM_VB_COPY_SEEK_DB = ERR_BRMBASE + 14;// VB copy seek error to DB file
const int ERR_BRM_VB_COPY_SEEK_VB = ERR_BRMBASE + 15;// VB copy seek error to VB file
const int ERR_BRM_VB_COPY_WRITE = ERR_BRMBASE + 16;// VB copy write
const int ERR_BRM_DEAD_LOCK = ERR_BRMBASE + 17;// DEAD lock error
const int ERR_BRM_MARK_INVALID = ERR_BRMBASE + 18;// Mark extent invalid error from casual paritioning
const int ERR_BRM_SAVE_STATE = ERR_BRMBASE + 19;// Save state error
const int ERR_BRM_GET_START_EXTENT = ERR_BRMBASE + 20; // Get starting Extent error
const int ERR_BRM_VB_OVERFLOW = ERR_BRMBASE + 21;// Version buffer overflow
const int ERR_BRM_READ_ONLY = ERR_BRMBASE + 22;// BRM is in READ-ONLY state
const int ERR_BRM_GET_READ_WRITE = ERR_BRMBASE + 23;// error getting BRM READ/WRITE state
const int ERR_BRM_BULK_RB_COLUMN = ERR_BRMBASE + 24;// error during column bulk rollback
const int ERR_BRM_BULK_RB_DCTNRY = ERR_BRMBASE + 25;// error during dctnry bulk rollback
const int ERR_BRM_DELETE_EXTENT_COLUMN = ERR_BRMBASE + 26; // error during delete column extents
const int ERR_BRM_DELETE_EXTENT_DCTNRY = ERR_BRMBASE + 27; // error during delete dictionary extents
const int ERR_BRM_TAKE_SNAPSHOT = ERR_BRMBASE + 28;// Taking snapshot of BRM state
const int ERR_BRM_LOOKUP_START_LBID = ERR_BRMBASE + 29; // Lookup starting LBID error
const int ERR_BRM_BULK_UPDATE = ERR_BRMBASE + 30;// Error with bulk update of HWM and CP
const int ERR_BRM_GET_EXT_STATE = ERR_BRMBASE + 31;// Error getting extent state
const int ERR_EXTENTMAP_LOOKUP = ERR_BRMBASE + 32;// Lookup extent map error
const int ERR_BRM_LOOKUP_VERSION = ERR_BRMBASE + 33;// Lookup version error
const int ERR_BRM_LOOKUP_LBID_RANGES = ERR_BRMBASE + 34;// Lookup LBID Ranges error
const int ERR_BRM_HWMS_NOT_EQUAL = ERR_BRMBASE + 35;// HWMs of same col width not equal
const int ERR_BRM_HWMS_OUT_OF_SYNC = ERR_BRMBASE + 36; // HWMs for dif col width not in sync
const int ERR_BRM_DBROOT_HWMS = ERR_BRMBASE + 37;// Error getting HWMs for each DBRoot
const int ERR_BRM_NETWORK = ERR_BRMBASE + 38;// Network error when calling BRM functions
const int ERR_BRM_READONLY = ERR_BRMBASE + 39;// DBRM is readonly
const int ERR_INVALID_VBOID = ERR_BRMBASE + 40;// returned if the given vboid is invalid
const int ERR_BRM_SET_EXTENTS_CP = ERR_BRMBASE + 41;// Error setting extents min/max
const int ERR_BRM_SHUTDOWN = ERR_BRMBASE + 42;// BRM is set to shutdown
const int ERR_BRM_GET_SHUTDOWN = ERR_BRMBASE + 43;// error getting BRM Shutdown flag
const int ERR_BRM_SUSPEND = ERR_BRMBASE + 44;// BRM is set to Suspend writes
const int ERR_BRM_GET_SUSPEND = ERR_BRMBASE + 45;// error getting BRM Suspend flag
const int ERR_BRM_BAD_STRIPE_CNT = ERR_BRMBASE + 46;// Incorrect num of cols allocated in stripe
//--------------------------------------------------------------------------
// Cache error
//--------------------------------------------------------------------------
const int ERR_CACHE_KEY_EXIST = ERR_CACHEBASE + 1; // Cache key exist
const int ERR_CACHE_KEY_NOT_EXIST = ERR_CACHEBASE + 2; // Cache key not exist
const int ERR_NULL_BLOCK = ERR_CACHEBASE + 3; // Block is NULL
const int ERR_FREE_LIST_EMPTY = ERR_CACHEBASE + 4; // Empty Free list
//--------------------------------------------------------------------------
// DM error
//--------------------------------------------------------------------------
const int ERR_DM_CONVERT_OID = ERR_DMBASE + 1; // Conversion error
//--------------------------------------------------------------------------
// Compression error
//--------------------------------------------------------------------------
const int ERR_COMP_COMPRESS = ERR_COMPBASE + 1; // Error compressing data
const int ERR_COMP_UNCOMPRESS = ERR_COMPBASE + 2; // Error uncompressing data
const int ERR_COMP_PARSE_HDRS = ERR_COMPBASE + 3; // Error parsing compression headers
const int ERR_COMP_VERIFY_HDRS = ERR_COMPBASE + 4; // Error verifying compression headers
const int ERR_COMP_PAD_DATA = ERR_COMPBASE + 5; // Pad compressed data failed
const int ERR_COMP_READ_BLOCK = ERR_COMPBASE + 6; // Failed to read a block
const int ERR_COMP_SAVE_BLOCK = ERR_COMPBASE + 7; // Failed to save a block
const int ERR_COMP_WRONG_PTR = ERR_COMPBASE + 8; // Pointer in header is wrong
const int ERR_COMP_FILE_NOT_FOUND = ERR_COMPBASE + 9; // File not found in map
const int ERR_COMP_CHUNK_NOT_FOUND= ERR_COMPBASE + 10;// Chunk not found in map
const int ERR_COMP_UNAVAIL_TYPE = ERR_COMPBASE + 11;// Unavailable compression type
const int ERR_COMP_REMOVE_FILE = ERR_COMPBASE + 12;// Failed to remove a file
const int ERR_COMP_RENAME_FILE = ERR_COMPBASE + 13;// Failed to rename a file
const int ERR_COMP_OPEN_FILE = ERR_COMPBASE + 14;// Failed to open a compressed data file
const int ERR_COMP_SET_OFFSET = ERR_COMPBASE + 15;// Failed to set offset in a compressed data file
const int ERR_COMP_READ_FILE = ERR_COMPBASE + 16;// Failed to read from a compressed data file
const int ERR_COMP_WRITE_FILE = ERR_COMPBASE + 17;// Failed to write to a compresssed data file
const int ERR_COMP_CLOSE_FILE = ERR_COMPBASE + 18;// Failed to close a compressed data file
const int ERR_COMP_TRUNCATE_ZERO = ERR_COMPBASE + 19;// Invalid attempt to truncate file to 0 bytes
//--------------------------------------------------------------------------
// Cache error
//--------------------------------------------------------------------------
const int ERR_CACHE_KEY_EXIST = ERR_CACHEBASE + 1; // Cache key exist
const int ERR_CACHE_KEY_NOT_EXIST = ERR_CACHEBASE + 2; // Cache key not exist
const int ERR_NULL_BLOCK = ERR_CACHEBASE + 3; // Block is NULL
const int ERR_FREE_LIST_EMPTY = ERR_CACHEBASE + 4; // Empty Free list
//--------------------------------------------------------------------------
// Auto-increment error
//--------------------------------------------------------------------------
const int ERR_AUTOINC_GEN_EXCEED_MAX = ERR_AUTOINCBASE + 1; // Generated autoinc value exceeds max auto increment value/
const int ERR_AUTOINC_USER_OUT_OF_RANGE=ERR_AUTOINCBASE + 2; // User specified autoinc value is out of range
const int ERR_AUTOINC_TABLE_NAME = ERR_AUTOINCBASE + 3; // Invalid schema/tablename for auto increment
const int ERR_AUTOINC_INIT1 = ERR_AUTOINCBASE + 4; // Error initializing auto increment (known exception)
const int ERR_AUTOINC_INIT2 = ERR_AUTOINCBASE + 5; // Error initializing auto increment (unknown exception)
const int ERR_AUTOINC_RID = ERR_AUTOINCBASE + 6; // Error initializing auto increment (unknown exception)
const int ERR_AUTOINC_START_SEQ = ERR_AUTOINCBASE + 7; // Error setting up an auto-increment sequence
const int ERR_AUTOINC_GET_RANGE = ERR_AUTOINCBASE + 8; // Error reserving an auto-increment range
const int ERR_AUTOINC_GET_LOCK = ERR_AUTOINCBASE + 9; // Error getting a lock to update auto-inc next value
const int ERR_AUTOINC_REL_LOCK = ERR_AUTOINCBASE +10; // Error releasing lock to update auto-inc next value
const int ERR_AUTOINC_UPDATE = ERR_AUTOINCBASE +11; // Error updating nextValue in system catalog
//--------------------------------------------------------------------------
// Compression error
//--------------------------------------------------------------------------
const int ERR_COMP_COMPRESS = ERR_COMPBASE + 1; // Error compressing data
const int ERR_COMP_UNCOMPRESS = ERR_COMPBASE + 2; // Error uncompressing data
const int ERR_COMP_PARSE_HDRS = ERR_COMPBASE + 3; // Error parsing compression headers
const int ERR_COMP_VERIFY_HDRS = ERR_COMPBASE + 4; // Error verifying compression headers
const int ERR_COMP_PAD_DATA = ERR_COMPBASE + 5; // Pad compressed data failed
const int ERR_COMP_READ_BLOCK = ERR_COMPBASE + 6; // Failed to read a block
const int ERR_COMP_SAVE_BLOCK = ERR_COMPBASE + 7; // Failed to save a block
const int ERR_COMP_WRONG_PTR = ERR_COMPBASE + 8; // Pointer in header is wrong
const int ERR_COMP_FILE_NOT_FOUND = ERR_COMPBASE + 9; // File not found in map
const int ERR_COMP_CHUNK_NOT_FOUND = ERR_COMPBASE + 10; // Chunk not found in map
const int ERR_COMP_UNAVAIL_TYPE = ERR_COMPBASE + 11;// Unavailable compression type
const int ERR_COMP_REMOVE_FILE = ERR_COMPBASE + 12;// Failed to remove a file
const int ERR_COMP_RENAME_FILE = ERR_COMPBASE + 13;// Failed to rename a file
const int ERR_COMP_OPEN_FILE = ERR_COMPBASE + 14;// Failed to open a compressed data file
const int ERR_COMP_SET_OFFSET = ERR_COMPBASE + 15;// Failed to set offset in a compressed data file
const int ERR_COMP_READ_FILE = ERR_COMPBASE + 16;// Failed to read from a compressed data file
const int ERR_COMP_WRITE_FILE = ERR_COMPBASE + 17;// Failed to write to a compresssed data file
const int ERR_COMP_CLOSE_FILE = ERR_COMPBASE + 18;// Failed to close a compressed data file
const int ERR_COMP_TRUNCATE_ZERO = ERR_COMPBASE + 19;// Invalid attempt to truncate file to 0 bytes
//--------------------------------------------------------------------------
// Block cache flush error
//--------------------------------------------------------------------------
const int ERR_BLKCACHE_FLUSH_LIST = ERR_BLKCACHEBASE + 1; // Error flushing list of blocks to PrimProc
//--------------------------------------------------------------------------
// Auto-increment error
//--------------------------------------------------------------------------
const int ERR_AUTOINC_GEN_EXCEED_MAX = ERR_AUTOINCBASE + 1; // Generated autoinc value exceeds max auto increment value/
const int ERR_AUTOINC_USER_OUT_OF_RANGE = ERR_AUTOINCBASE + 2; // User specified autoinc value is out of range
const int ERR_AUTOINC_TABLE_NAME = ERR_AUTOINCBASE + 3; // Invalid schema/tablename for auto increment
const int ERR_AUTOINC_INIT1 = ERR_AUTOINCBASE + 4; // Error initializing auto increment (known exception)
const int ERR_AUTOINC_INIT2 = ERR_AUTOINCBASE + 5; // Error initializing auto increment (unknown exception)
const int ERR_AUTOINC_RID = ERR_AUTOINCBASE + 6; // Error initializing auto increment (unknown exception)
const int ERR_AUTOINC_START_SEQ = ERR_AUTOINCBASE + 7; // Error setting up an auto-increment sequence
const int ERR_AUTOINC_GET_RANGE = ERR_AUTOINCBASE + 8; // Error reserving an auto-increment range
const int ERR_AUTOINC_GET_LOCK = ERR_AUTOINCBASE + 9; // Error getting a lock to update auto-inc next value
const int ERR_AUTOINC_REL_LOCK = ERR_AUTOINCBASE + 10; // Error releasing lock to update auto-inc next value
const int ERR_AUTOINC_UPDATE = ERR_AUTOINCBASE + 11; // Error updating nextValue in system catalog
//--------------------------------------------------------------------------
// Bulk backup metadata file and corresponding HWM compressed chunk files
//--------------------------------------------------------------------------
const int ERR_METADATABKUP_FILE_RENAME = ERR_METABKUPBASE + 1; // Error renaming meta file */
const int ERR_METADATABKUP_COMP_PARSE_HDRS = ERR_METABKUPBASE + 2; // Error parsing compression headers */
const int ERR_METADATABKUP_COMP_VERIFY_HDRS = ERR_METABKUPBASE + 3; // Error verifying compression headers */
const int ERR_METADATABKUP_COMP_CHUNK_NOT_FOUND= ERR_METABKUPBASE + 4; // Chunk not found in file */
const int ERR_METADATABKUP_COMP_OPEN_BULK_BKUP = ERR_METABKUPBASE + 5; // Error opening backup chunk file */
const int ERR_METADATABKUP_COMP_WRITE_BULK_BKUP= ERR_METABKUPBASE + 6; // Error writing to backup chunk file */
const int ERR_METADATABKUP_COMP_READ_BULK_BKUP = ERR_METABKUPBASE + 7; // Error reading from backup chunk file */
const int ERR_METADATABKUP_COMP_RENAME = ERR_METABKUPBASE + 8; // Error renaming chunk file */
//--------------------------------------------------------------------------
// Block cache flush error
//--------------------------------------------------------------------------
const int ERR_BLKCACHE_FLUSH_LIST = ERR_BLKCACHEBASE + 1; // Error flushing list of blocks to PrimProc
//--------------------------------------------------------------------------
// Bulk backup metadata file and corresponding HWM compressed chunk files
//--------------------------------------------------------------------------
const int ERR_METADATABKUP_FILE_RENAME = ERR_METABKUPBASE + 1; // Error renaming meta file */
const int ERR_METADATABKUP_COMP_PARSE_HDRS = ERR_METABKUPBASE + 2; // Error parsing compression headers */
const int ERR_METADATABKUP_COMP_VERIFY_HDRS = ERR_METABKUPBASE + 3; // Error verifying compression headers */
const int ERR_METADATABKUP_COMP_CHUNK_NOT_FOUND = ERR_METABKUPBASE + 4; // Chunk not found in file */
const int ERR_METADATABKUP_COMP_OPEN_BULK_BKUP = ERR_METABKUPBASE + 5; // Error opening backup chunk file */
const int ERR_METADATABKUP_COMP_WRITE_BULK_BKUP = ERR_METABKUPBASE + 6; // Error writing to backup chunk file */
const int ERR_METADATABKUP_COMP_READ_BULK_BKUP = ERR_METABKUPBASE + 7; // Error reading from backup chunk file */
const int ERR_METADATABKUP_COMP_RENAME = ERR_METABKUPBASE + 8; // Error renaming chunk file */
//------------------------------------------------------------------------------
// Class used to convert an error code to a corresponding error message string

File diff suppressed because it is too large Load Diff

View File

@ -60,409 +60,409 @@ namespace WriteEngine
class FileOp : public BlockOp
{
public:
/**
* @brief Constructor
*/
EXPORT explicit FileOp(bool doAlloc=true);
/**
* @brief Constructor
*/
EXPORT explicit FileOp(bool doAlloc = true);
/**
* @brief Destructor
*/
/**
* @brief Destructor
*/
EXPORT virtual ~FileOp();
/**
* @brief Close a file
*/
/**
* @brief Close a file
*/
EXPORT void closeFile( IDBDataFile* pFile ) const;
/**
* @brief Create a directory
*/
/**
* @brief Create a directory
*/
EXPORT int createDir( const char* dirName, mode_t mode ) const;
int createDir( const char* dirName ) const;
/**
* @brief Create a file with a fixed file size and file id
*/
/**
* @brief Create a file with a fixed file size and file id
*/
EXPORT int createFile( FID fid,
int & allocSize,
uint16_t dbRoot, uint32_t partition,
execplan::CalpontSystemCatalog::ColDataType colDataType,
uint64_t emptyVal = 0, int width = 1 ) ;
int& allocSize,
uint16_t dbRoot, uint32_t partition,
execplan::CalpontSystemCatalog::ColDataType colDataType,
uint64_t emptyVal = 0, int width = 1 ) ;
/**
* @brief Delete a file
*/
/**
* @brief Delete a file
*/
EXPORT int deleteFile( const char* fileName ) const;
/**
* @brief Delete the db files corresponding to the specified file id
*/
/**
* @brief Delete the db files corresponding to the specified file id
*/
EXPORT int deleteFile( FID fid ) const;
/**
* @brief Delete the db files corresponding to the specified file id
*/
/**
* @brief Delete the db files corresponding to the specified file id
*/
EXPORT int deleteFiles( const std::vector<int32_t>& fids ) const;
/**
* @brief Delete db files corresponding to specified file id and partition
*/
EXPORT int deletePartitions( const std::vector<OID>& fids,
const std::vector<BRM::PartitionInfo>& partitions )
const;
/**
* @brief Delete a specific database segment file.
*/
EXPORT int deleteFile( FID fid, uint16_t dbRoot,
uint32_t partition,
uint16_t segment ) const;
/**
* @brief Check whether a file exists or not
*/
/**
* @brief Delete db files corresponding to specified file id and partition
*/
EXPORT int deletePartitions( const std::vector<OID>& fids,
const std::vector<BRM::PartitionInfo>& partitions )
const;
/**
* @brief Delete a specific database segment file.
*/
EXPORT int deleteFile( FID fid, uint16_t dbRoot,
uint32_t partition,
uint16_t segment ) const;
/**
* @brief Check whether a file exists or not
*/
EXPORT bool exists( const char* fileName ) const;
/**
* @brief @brief Check whether file exists or not by using file id, DBRoot,
* partition, and segment number.
*/
/**
* @brief @brief Check whether file exists or not by using file id, DBRoot,
* partition, and segment number.
*/
EXPORT bool exists( FID fid, uint16_t dbRoot,
uint32_t partition, uint16_t segment ) const;
uint32_t partition, uint16_t segment ) const;
/**
* @brief Check whether a column exists or not by using file id. Since this
* is not enough to fully qualify a db filename, all it can do is to verify
* that the OID directory exists on one or more of the DBRoots.
*/
/**
* @brief Check whether a column exists or not by using file id. Since this
* is not enough to fully qualify a db filename, all it can do is to verify
* that the OID directory exists on one or more of the DBRoots.
*/
EXPORT bool existsOIDDir( FID fid ) const;
/**
* @brief Expand current abbreviated extent for this column to a full extent
*
* @param pFile FILE ptr of segment file we are updating.
* @param dbRoot DBRoot of the file being updated.
* @param emptyVal Empty value used in initializing extents for this column
* @param width Width of this column (in bytes)
*/
/**
* @brief Expand current abbreviated extent for this column to a full extent
*
* @param pFile FILE ptr of segment file we are updating.
* @param dbRoot DBRoot of the file being updated.
* @param emptyVal Empty value used in initializing extents for this column
* @param width Width of this column (in bytes)
*/
EXPORT virtual int expandAbbrevColumnExtent(
IDBDataFile* pFile,
uint16_t dbRoot,
uint64_t emptyVal,
int width );
IDBDataFile* pFile,
uint16_t dbRoot,
uint64_t emptyVal,
int width );
/**
* @brief Add an extent to the specified Column OID and DBRoot.
* The extent must already exist in the extentmap prior to calling this fctn.
*
* The partition, segment, and HWM of the column file where the
* extent is added is returned. If needed, the applicable column segment
* file will be created. This extendFile1 function should supplant other
* extendFile functions with Multiple-File-per-OID enhancement, "but" we
* may want to rethink when we do Shared-Nothing. When this function
* returns, the file position will be located at the end of the file.
* For shared-everything DBRoot was an output argument, as BRM selected the
* the DBRoot. For shared-nothing DBRoot is an input argument, as the
* application code must track/control the DBRoot selection.
* If this is the very first file for the specified DBRoot, then the
* partition and segment number must be specified, else the selected
* partition and segment numbers are returned.
*
* @param oid OID of the column to be extended
* @param emptyVal Empty value to be used for oid
* @param width Width of the column
* @param hwm The fbo of the column segment file where the new extent begins
* @param startLbid The starting LBID for the new extent
* @param allocSize Number of blocks allocated to the extent.
* @param dbRoot The DBRoot of the file with the new extent.
* @param partition The partnum of the file with the new extent.
* @param segment The segnum of the file with the new extent.
* @param segFile (out) Name of the segment file where extent was added.
* @param pFile (out) FILE ptr to the file where the extent is added.
* @param newFile (out) Indicates if a new file was created for the extent
* @param hdrs (in/out) Contents of headers, if file is compressed.
* @return returns NO_ERROR if success.
*/
/**
* @brief Add an extent to the specified Column OID and DBRoot.
* The extent must already exist in the extentmap prior to calling this fctn.
*
* The partition, segment, and HWM of the column file where the
* extent is added is returned. If needed, the applicable column segment
* file will be created. This extendFile1 function should supplant other
* extendFile functions with Multiple-File-per-OID enhancement, "but" we
* may want to rethink when we do Shared-Nothing. When this function
* returns, the file position will be located at the end of the file.
* For shared-everything DBRoot was an output argument, as BRM selected the
* the DBRoot. For shared-nothing DBRoot is an input argument, as the
* application code must track/control the DBRoot selection.
* If this is the very first file for the specified DBRoot, then the
* partition and segment number must be specified, else the selected
* partition and segment numbers are returned.
*
* @param oid OID of the column to be extended
* @param emptyVal Empty value to be used for oid
* @param width Width of the column
* @param hwm The fbo of the column segment file where the new extent begins
* @param startLbid The starting LBID for the new extent
* @param allocSize Number of blocks allocated to the extent.
* @param dbRoot The DBRoot of the file with the new extent.
* @param partition The partnum of the file with the new extent.
* @param segment The segnum of the file with the new extent.
* @param segFile (out) Name of the segment file where extent was added.
* @param pFile (out) FILE ptr to the file where the extent is added.
* @param newFile (out) Indicates if a new file was created for the extent
* @param hdrs (in/out) Contents of headers, if file is compressed.
* @return returns NO_ERROR if success.
*/
EXPORT int extendFile(OID oid, uint64_t emptyVal,
int width,
HWM hwm,
BRM::LBID_t startLbid,
int allocSize,
uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
std::string& segFile,
IDBDataFile*& pFile,
bool& newFile,
char* hdrs);
int width,
HWM hwm,
BRM::LBID_t startLbid,
int allocSize,
uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
std::string& segFile,
IDBDataFile*& pFile,
bool& newFile,
char* hdrs);
/**
* @brief For alter table add column; add an extent to a specific file
*
* @param oid OID of the column to be extended
* @param emptyVal Empty value to be used for oid
* @param width Width of the column
* @param allocSize (out) Number of blocks allocated to the extent.
* @param dbRoot The DBRoot of the file with the new extent.
* @param partition The partnum of the file with the new extent.
* @param segment The segnum of the file with the new extent.
* @param segFile (out) Name of the segment file where extent was added.
* @param startLbid (out) The starting LBID for the new extent
* @param newFile (out) Indicates if a new file was created for the extent
* @param hdrs (in/out) Contents of headers, if file is compressed.
*/
/**
* @brief For alter table add column; add an extent to a specific file
*
* @param oid OID of the column to be extended
* @param emptyVal Empty value to be used for oid
* @param width Width of the column
* @param allocSize (out) Number of blocks allocated to the extent.
* @param dbRoot The DBRoot of the file with the new extent.
* @param partition The partnum of the file with the new extent.
* @param segment The segnum of the file with the new extent.
* @param segFile (out) Name of the segment file where extent was added.
* @param startLbid (out) The starting LBID for the new extent
* @param newFile (out) Indicates if a new file was created for the extent
* @param hdrs (in/out) Contents of headers, if file is compressed.
*/
EXPORT int addExtentExactFile(OID oid, uint64_t emptyVal,
int width,
int& allocSize,
uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
execplan::CalpontSystemCatalog::ColDataType colDataType,
std::string& segFile,
BRM::LBID_t& startLbid,
bool& newFile,
char* hdrs);
int width,
int& allocSize,
uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
execplan::CalpontSystemCatalog::ColDataType colDataType,
std::string& segFile,
BRM::LBID_t& startLbid,
bool& newFile,
char* hdrs);
/**
* @brief Pad the specified compressed extent with empty chunks
* @param oid OID of relevant column
* @param width Width in bytes of this column
* @param emptyVal Empty value to be employed in filling the chunks
* @param dbRoot DBRoot of the extent to be filled
* @param partition Partition of the extent to be filled
* @param segment Segment file number of the extent to be filled
* @param hwm New HWM blk setting for the segment file after extent is padded
* @param segFile (out) Name of updated segment file
* @param errTask (out) Task that failed if error occurs
* @return returns NO_ERROR if success.
*/
/**
* @brief Pad the specified compressed extent with empty chunks
* @param oid OID of relevant column
* @param width Width in bytes of this column
* @param emptyVal Empty value to be employed in filling the chunks
* @param dbRoot DBRoot of the extent to be filled
* @param partition Partition of the extent to be filled
* @param segment Segment file number of the extent to be filled
* @param hwm New HWM blk setting for the segment file after extent is padded
* @param segFile (out) Name of updated segment file
* @param errTask (out) Task that failed if error occurs
* @return returns NO_ERROR if success.
*/
EXPORT int fillCompColumnExtentEmptyChunks(OID oid,
int colWidth,
uint64_t emptyVal,
uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
HWM hwm,
std::string& segFile,
std::string& errTask);
int colWidth,
uint64_t emptyVal,
uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
HWM hwm,
std::string& segFile,
std::string& errTask);
/**
* @brief Write the specified header info to compressed column file pFile.
*
* @param pFile Column file to be written to
* @param hdr Header info to be written
*/
/**
* @brief Write the specified header info to compressed column file pFile.
*
* @param pFile Column file to be written to
* @param hdr Header info to be written
*/
EXPORT int writeHeaders(IDBDataFile* pFile, const char* hdr) const;
/**
* @brief Write the specified header info to compressed column or
* dictionary file pFile.
*
* @param pFile Column file to be written to
* @param controlHdr Control header info to be written
* @param pointerHdr Pointer header info to be written
* @param ptrHdrSize Size (in bytes) of pointerHdr
*/
/**
* @brief Write the specified header info to compressed column or
* dictionary file pFile.
*
* @param pFile Column file to be written to
* @param controlHdr Control header info to be written
* @param pointerHdr Pointer header info to be written
* @param ptrHdrSize Size (in bytes) of pointerHdr
*/
EXPORT int writeHeaders(IDBDataFile* pFile,
const char* controlHdr,
const char* pointerHdr,
uint64_t ptrHdrSize) const;
const char* controlHdr,
const char* pointerHdr,
uint64_t ptrHdrSize) const;
/**
* @brief Get the Version Buffer filename for the specified fid (OID).
*
* This version of getFileName automatically uses 0 for the partition and
* segment numbers. The applicable DBRoot is assigned based on the OID.
*
* @param fid (in) OID of the Version Buffer DB file of interest
* @param fileName (out) the name of the pertinent file that was found
*
* @return returns NO_ERROR if success; ERR_FILE_NOT_EXIST if file not found
*/
/**
* @brief Get the Version Buffer filename for the specified fid (OID).
*
* This version of getFileName automatically uses 0 for the partition and
* segment numbers. The applicable DBRoot is assigned based on the OID.
*
* @param fid (in) OID of the Version Buffer DB file of interest
* @param fileName (out) the name of the pertinent file that was found
*
* @return returns NO_ERROR if success; ERR_FILE_NOT_EXIST if file not found
*/
int getVBFileName( FID fid, char* fileName ) const;
/**
* @brief Get the filename for the specified fid (OID). DBRoot, partition,
* and segment number.
*
* @param fid (in) OID of the DB file of interest
* @param fileName (out) the name of the pertinent file that was found
* @param dbRoot (in) DBRoot of the file of interest. If 0, then all the
* DBRoots will be searched.
* @param partition (in) partition number of the file of interest
* @param segment (in) segment number of the file of interest
*/
/**
* @brief Get the filename for the specified fid (OID). DBRoot, partition,
* and segment number.
*
* @param fid (in) OID of the DB file of interest
* @param fileName (out) the name of the pertinent file that was found
* @param dbRoot (in) DBRoot of the file of interest. If 0, then all the
* DBRoots will be searched.
* @param partition (in) partition number of the file of interest
* @param segment (in) segment number of the file of interest
*/
int getFileName( FID fid, char* fileName,
uint16_t dbRoot,
uint32_t partition,
uint16_t segment ) const;
uint16_t dbRoot,
uint32_t partition,
uint16_t segment ) const;
/**
* @brief Construct directory path for the specified fid (OID), DBRoot, and
* partition number. Directory does not have to exist, nor is it created.
*/
int getDirName( FID fid, uint16_t dbRoot,
uint32_t partition,
std::string& dirName) const;
uint32_t partition,
std::string& dirName) const;
/**
* @brief Get the file size
*/
EXPORT int getFileSize( IDBDataFile* pFile, long long& fileSize ) const;
EXPORT int getFileSize( FID fid, uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
long long& fileSize ) const;
uint32_t partition,
uint16_t segment,
long long& fileSize ) const;
/**
* @brief Initialize an extent in a dictionary store file
* @param pFile (in) IDBDataFile* of dictionary store file to be written to
* @param dbRoot (in) - DBRoot of pFile
* @param nBlocks (in) - number of blocks to be written for an extent
* @param blockHdrInit(in) - data used to initialize each block header
* @param blockHdrInitSize(in) - number of bytes in blockHdrInit
* @param bExpandExtent (in) - Expand existing extent, or initialize new one
*/
/**
* @brief Initialize an extent in a dictionary store file
* @param pFile (in) IDBDataFile* of dictionary store file to be written to
* @param dbRoot (in) - DBRoot of pFile
* @param nBlocks (in) - number of blocks to be written for an extent
* @param blockHdrInit(in) - data used to initialize each block header
* @param blockHdrInitSize(in) - number of bytes in blockHdrInit
* @param bExpandExtent (in) - Expand existing extent, or initialize new one
*/
EXPORT int initDctnryExtent( IDBDataFile* pFile,
uint16_t dbRoot,
int nBlocks,
unsigned char* blockHdrInit,
int blockHdrInitSize,
bool bExpandExtent );
uint16_t dbRoot,
int nBlocks,
unsigned char* blockHdrInit,
int blockHdrInitSize,
bool bExpandExtent );
/**
* @brief Check whether it is an directory
*/
/**
* @brief Check whether it is an directory
*/
EXPORT bool isDir( const char* dirName ) const;
/**
* @brief See if there is room in the file system for specific number of blks
* @param fileName Name of file to extend (does not have to be full name)
* @param nBlocks Number of 8192-byte blocks to be added
* @return returns TRUE if file system has room for 'nBlocks', else FALSE
*/
/**
* @brief See if there is room in the file system for specific number of blks
* @param fileName Name of file to extend (does not have to be full name)
* @param nBlocks Number of 8192-byte blocks to be added
* @return returns TRUE if file system has room for 'nBlocks', else FALSE
*/
EXPORT bool isDiskSpaceAvail(const std::string& fileName,
int nBlocks) const;
int nBlocks) const;
/**
* @brief Convert an oid to a full file name
*/
/**
* @brief Convert an oid to a full file name
*/
EXPORT int oid2FileName( FID fid, char* fullFileName,
bool bCreateDir, uint16_t dbRoot,
uint32_t partition, uint16_t segment ) const;
bool bCreateDir, uint16_t dbRoot,
uint32_t partition, uint16_t segment ) const;
EXPORT int oid2DirName( FID fid, char* oidDirName ) const;
/**
* @brief Open a file using a filename.
* @param fileName Name of the file to open.
* @param mode Mode to use in opening the file (ex: "r+b").
* @param ioBuffSize Buffer size to be employed by setvbuf().
* @return returns the IDBDataFile* of the opened file.
*/
/**
* @brief Open a file using a filename.
* @param fileName Name of the file to open.
* @param mode Mode to use in opening the file (ex: "r+b").
* @param ioBuffSize Buffer size to be employed by setvbuf().
* @return returns the IDBDataFile* of the opened file.
*/
EXPORT IDBDataFile* openFile( const char* fileName,
const char* mode = "r+b",
int ioColSize = DEFAULT_COLSIZ,
bool useTmpSuffix = false) const;
const char* mode = "r+b",
int ioColSize = DEFAULT_COLSIZ,
bool useTmpSuffix = false) const;
/**
* @brief Open a file using an OID, dbroot, partition, and segment number.
* @param fid OID of the file to be opened.
* @param dbRoot DBRoot of the file to be opened.
* @param partition Partition number of the file to be opened.
* @param segment Segment number of the file to be opened.
* @param mode Mode to use in opening the file (default of "r+b" will open
* an existing binary file as read/write.
* @param ioBuffSize Buffer size to be employed by setvbuf().
* @return returns the IDBDataFile* of the opened file.
*/
/**
* @brief Open a file using an OID, dbroot, partition, and segment number.
* @param fid OID of the file to be opened.
* @param dbRoot DBRoot of the file to be opened.
* @param partition Partition number of the file to be opened.
* @param segment Segment number of the file to be opened.
* @param mode Mode to use in opening the file (default of "r+b" will open
* an existing binary file as read/write.
* @param ioBuffSize Buffer size to be employed by setvbuf().
* @return returns the IDBDataFile* of the opened file.
*/
EXPORT IDBDataFile* openFile( FID fid,
uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
std::string& segFile,
const char* mode = "r+b",
int ioColSize = DEFAULT_COLSIZ,
bool useTmpSuffix = false) const;
uint16_t dbRoot,
uint32_t partition,
uint16_t segment,
std::string& segFile,
const char* mode = "r+b",
int ioColSize = DEFAULT_COLSIZ,
bool useTmpSuffix = false) const;
/**
* @brief Read to a buffer from a file at current location
*/
/**
* @brief Read to a buffer from a file at current location
*/
EXPORT int readFile( IDBDataFile* pFile, unsigned char* readBuf,
int readSize ) const;
int readSize ) const;
/**
* @brief Reads in 2 compression header blocks from a column segment file.
* IDBDataFile* points to start of data when function returns.
* @param pFile (in) IDBDataFile* of column segment file to be read.
* @param hdrs (out) Contents of headers that are read.
*/
/**
* @brief Reads in 2 compression header blocks from a column segment file.
* IDBDataFile* points to start of data when function returns.
* @param pFile (in) IDBDataFile* of column segment file to be read.
* @param hdrs (out) Contents of headers that are read.
*/
EXPORT int readHeaders( IDBDataFile* pFile, char* hdrs ) const;
EXPORT int readHeaders( IDBDataFile* pFile, char* hdr1, char* hdr2 )const;
/**
* @brief Reinitialize a partial extent in a column segment file
* @param pFile (in) IDBDataFile* of column segment file to be written to
* @param startOffset (in) - file offset where blocks are to be written
* @param nBlocks (in) - number of blocks to be written to the extent
* @param emptyVal(in) - empty value to be used for column data values
* width (in) - width of the applicable column
*/
/**
* @brief Reinitialize a partial extent in a column segment file
* @param pFile (in) IDBDataFile* of column segment file to be written to
* @param startOffset (in) - file offset where blocks are to be written
* @param nBlocks (in) - number of blocks to be written to the extent
* @param emptyVal(in) - empty value to be used for column data values
* width (in) - width of the applicable column
*/
EXPORT int reInitPartialColumnExtent( IDBDataFile* pFile,
long long startOffset,
int nBlocks,
uint64_t emptyVal,
int width );
long long startOffset,
int nBlocks,
uint64_t emptyVal,
int width );
/**
* @brief Reinitialize an extent in a dictionary store file
* @param pFile (in) IDBDataFile* of dictionary store file to be written to
* @param startOffset (in) - file offset where blocks are to be written
* @param nBlocks (in) - number of blocks to be written to the extent
* @param blockHdrInit(in) - data used to initialize each block header
* @param blockHdrInitSize(in) - number of bytes in blockHdrInit
*/
/**
* @brief Reinitialize an extent in a dictionary store file
* @param pFile (in) IDBDataFile* of dictionary store file to be written to
* @param startOffset (in) - file offset where blocks are to be written
* @param nBlocks (in) - number of blocks to be written to the extent
* @param blockHdrInit(in) - data used to initialize each block header
* @param blockHdrInitSize(in) - number of bytes in blockHdrInit
*/
EXPORT int reInitPartialDctnryExtent( IDBDataFile* pFile,
long long startOffset,
int nBlocks,
unsigned char* blockHdrInit,
int blockHdrInitSize );
long long startOffset,
int nBlocks,
unsigned char* blockHdrInit,
int blockHdrInitSize );
/**
* @brief Set the file to specified location based on the offset
*/
/**
* @brief Set the file to specified location based on the offset
*/
EXPORT int setFileOffset( IDBDataFile* pFile,
long long offset,
int origin = SEEK_SET ) const;
long long offset,
int origin = SEEK_SET ) const;
EXPORT int setFileOffsetBlock( IDBDataFile* pFile,
uint64_t lbid,
int origin = SEEK_SET ) const;
uint64_t lbid,
int origin = SEEK_SET ) const;
/**
* @brief Truncate the file to the specified file size
*/
/**
* @brief Truncate the file to the specified file size
*/
EXPORT int truncateFile( IDBDataFile* pFile,
long long fileSize ) const;
long long fileSize ) const;
/**
* @brief Write a buffer to a file at current location
*/
/**
* @brief Write a buffer to a file at current location
*/
EXPORT int writeFile( IDBDataFile* pFile,
const unsigned char* buf, int bufSize ) const;
const unsigned char* buf, int bufSize ) const;
/**
* @brief set the flag to use the instance to access the brm wrapper class
*/
/**
* @brief set the flag to use the instance to access the brm wrapper class
*/
EXPORT virtual void setTransId( const TxnID& transId);
EXPORT virtual void setBulkFlag(bool isBulkLoad);
EXPORT virtual void setFixFlag(bool isFix);
EXPORT virtual void setBulkFlag(bool isBulkLoad);
EXPORT virtual void setFixFlag(bool isFix);
TxnID getTransId() const;
void compressionType(int t);
int compressionType() const;
EXPORT virtual int flushFile(int rc, std::map<FID,FID> & oids);
EXPORT virtual int flushFile(int rc, std::map<FID, FID>& oids);
protected:
EXPORT virtual int updateColumnExtent(IDBDataFile* pFile, int nBlocks);
@ -471,25 +471,25 @@ protected:
int m_compressionType; // compresssion type
private:
//not copyable
//not copyable
FileOp(const FileOp& rhs);
FileOp& operator=(const FileOp& rhs);
int createFile( const char* fileName, int fileSize,
uint64_t emptyVal, int width,
uint16_t dbRoot );
int createFile( const char* fileName, int fileSize,
uint64_t emptyVal, int width,
uint16_t dbRoot );
int expandAbbrevColumnChunk( IDBDataFile* pFile,
uint64_t emptyVal,
int colWidth,
const compress::CompChunkPtr& chunkInPtr,
compress::CompChunkPtr& chunkOutPt);
uint64_t emptyVal,
int colWidth,
const compress::CompChunkPtr& chunkInPtr,
compress::CompChunkPtr& chunkOutPt);
int initAbbrevCompColumnExtent( IDBDataFile* pFile,
uint16_t dbRoot,
int nBlocks,
uint64_t emptyVal,
int width);
uint16_t dbRoot,
int nBlocks,
uint64_t emptyVal,
int width);
// Initialize an extent in a column segment file
// pFile (in) IDBDataFile* of column segment file to be written to
@ -501,33 +501,33 @@ private:
// bExpandExtent (in) - Expand existing extent, or initialize new one
// bAbbrevExtent (in) - If adding new extent, is it abbreviated
int initColumnExtent( IDBDataFile* pFile,
uint16_t dbRoot,
int nBlocks,
uint64_t emptyVal,
int width,
bool bNewFile,
bool bExpandExtent,
bool bAbbrevExtent );
uint16_t dbRoot,
int nBlocks,
uint64_t emptyVal,
int width,
bool bNewFile,
bool bExpandExtent,
bool bAbbrevExtent );
static void initDbRootExtentMutexes();
static void removeDbRootExtentMutexes();
int writeInitialCompColumnChunk( IDBDataFile* pFile,
int nBlocksAllocated,
int nRows,
uint64_t emptyVal,
int width,
char* hdrs);
int nBlocksAllocated,
int nRows,
uint64_t emptyVal,
int width,
char* hdrs);
TxnID m_transId;
bool m_isBulk;
bool m_isFix;
bool m_isBulk;
bool m_isFix;
// protect creation of m_DbRootAddExtentMutexes
static boost::mutex m_createDbRootMutexes;
// Mutexes used to serialize extent creation within each DBRoot
static std::map<int,boost::mutex*> m_DbRootAddExtentMutexes;
static std::map<int, boost::mutex*> m_DbRootAddExtentMutexes;
// protect race condition in creating directories
static boost::mutex m_mkdirMutex;
@ -553,19 +553,19 @@ inline int FileOp::createDir( const char* dirName ) const
return createDir( dirName, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH );
}
inline int FileOp::getVBFileName( FID fid, char* fileName ) const
inline int FileOp::getVBFileName( FID fid, char* fileName ) const
{
uint16_t dbRoot = 0;
uint32_t partition = 0;
uint16_t segment = 0;
return oid2FileName( fid, fileName, true, dbRoot, partition, segment );
return oid2FileName( fid, fileName, true, dbRoot, partition, segment );
}
inline int FileOp::getFileName( FID fid, char* fileName,
uint16_t dbRoot,
uint32_t partition,
uint16_t segment ) const
uint16_t dbRoot,
uint32_t partition,
uint16_t segment ) const
{
return oid2FileName( fid, fileName, false, dbRoot, partition, segment );
}

View File

@ -34,318 +34,422 @@
namespace WriteEngine
{
/*****************************************************
* index definition
******************************************************/
const int IDX_BITTEST_SIZE = 10; /** @brief The bit size of bit test */
const int IDX_GROUP_SIZE = 3; /** @brief The bit size of group */
const int IDX_INSTRU_SIZE = 4; /** @brief The bit size of instruction */
const int IDX_PTR_SIZE = 46; /** @brief The bit size of address pointer */
const int IDX_TYPE_SIZE = 3; /** @brief The bit size of type */
/*****************************************************
* index definition
******************************************************/
const int IDX_BITTEST_SIZE = 10; /** @brief The bit size of bit test */
const int IDX_GROUP_SIZE = 3; /** @brief The bit size of group */
const int IDX_INSTRU_SIZE = 4; /** @brief The bit size of instruction */
const int IDX_PTR_SIZE = 46; /** @brief The bit size of address pointer */
const int IDX_TYPE_SIZE = 3; /** @brief The bit size of type */
const int IDX_BITMAP_SUBBLOCK_NO = 1; /** @brief Subblock 1 of root block is for bitmap pointer*/
const int IDX_MAX_TREE_LEVEL = 128; /** @brief The maximum depth of a tree */
const int IDX_MAX_MULTI_COL_BIT = 256; /** @brief The maximum bits of a multi-column tree (256 bit)*/
const int IDX_MAX_MULTI_COL_IDX_LEVEL = 52; /** @brief The maximum depth of a multi-column tree */
const int IDX_MAX_MULTI_COL_IDX_NUM = 64; /** @brief The maximum number of columns for a multi-column index */
const int MAX_IDX_RID = 1024; /** @brief Maximum index rids for one shot */
const int IDX_DEFAULT_READ_ROW = 10000; /** @brief Default number of rows for one read for index */
const int IDX_BITMAP_SUBBLOCK_NO = 1; /** @brief Subblock 1 of root block is for bitmap pointer*/
const int IDX_MAX_TREE_LEVEL = 128; /** @brief The maximum depth of a tree */
const int IDX_MAX_MULTI_COL_BIT = 256; /** @brief The maximum bits of a multi-column tree (256 bit)*/
const int IDX_MAX_MULTI_COL_IDX_LEVEL = 52; /** @brief The maximum depth of a multi-column tree */
const int IDX_MAX_MULTI_COL_IDX_NUM = 64; /** @brief The maximum number of columns for a multi-column index */
const int MAX_IDX_RID = 1024; /** @brief Maximum index rids for one shot */
const int IDX_DEFAULT_READ_ROW = 10000; /** @brief Default number of rows for one read for index */
// todo: need to move a higher level share file for dictionary
const int RID_SIZE = 46;
// todo: need to move a higher level share file for dictionary
const int RID_SIZE = 46;
// const int OID_SIZE = 24; /** @brief The bit size of object id */
const int FBO_SIZE = 36; /** @brief The bit size of file block offset */
const int SBID_SIZE = 5; /** @brief The bit size of sub block id */
const int ENTRY_SIZE = 5; /** @brief The bit size of entry location with a sub block */
const int LIST_SIZE_TYPE = 0;
const int LIST_RID_TYPE = 3;
const int LIST_NOT_USED_TYPE = 7;
const int LIST_HDR_SIZE = 32;
const int LIST_SUBBLOCK_TYPE = 4 ;
const int LIST_BLOCK_TYPE = 5 ;
const int LIST_LLP_TYPE = 6 ;
const int SUBBLOCK_TOTAL_BYTES = 256;
const int LIST_SUB_LLP_POS = 31;
const int LIST_LAST_LBID_POS = 30;
const int LIST_BLOCK_LLP_POS = 1023;
const int MAX_BLOCK_ENTRY = 1024;
const int MAX_SUB_RID_CNT = 30;
const int MAX_BLK_RID_CNT = 1023;
const int MAX_BLK_NARRAY_RID_CNT = 1018;
const int LBID_SBID_ENTRY = 46;
const int RID_COUNT_SIZE = 10;
const int CUR_BLK_POS_WIDTH = 2;
const int LLP_STATUS_WIDTH = 2;
const int LIST_ENTRY_WIDTH = 8;
const int LIST_BLK_LLP_ENTRY_WIDTH= 48;
const int BEGIN_LIST_BLK_LLP_POS = 1018;
const int NEXT_BLK_PTR_OFFSET = 5;
const int PARENT_PTR_OFFSET = 4;
const int TOTAL_NUM_ARRAY_PTR = 4;
const int ARRAY_LLP_EXIST = 1;
const int LLP_NOT_FULL = 0;
const int LLP_FULL = 1;
const int TOTAL_CUR_LEVEL = 10;
const int CUR_LEVEL_POS_WIDTH = 20;
const uint64_t INVALID_KEY = -1LL; /** @brief Invalid number */
const int FBO_SIZE = 36; /** @brief The bit size of file block offset */
const int SBID_SIZE = 5; /** @brief The bit size of sub block id */
const int ENTRY_SIZE = 5; /** @brief The bit size of entry location with a sub block */
/*****************************************************
* mask definition
******************************************************/
const int BIT_MASK_ARRAY[] = { 0x0,
0x01, /** @brief 1 bit mask */
0x03, /** @brief 2 bit mask */
0x07, /** @brief 3 bit mask */
0x0F, /** @brief 4 bit mask */
0x1F, /** @brief 5 bit mask */
0x3F /** @brief 6 bit mask */
};
const int LIST_SIZE_TYPE = 0;
const int LIST_RID_TYPE = 3;
const int LIST_NOT_USED_TYPE = 7;
const int LIST_HDR_SIZE = 32;
const int LIST_SUBBLOCK_TYPE = 4 ;
const int LIST_BLOCK_TYPE = 5 ;
const int LIST_LLP_TYPE = 6 ;
const int SUBBLOCK_TOTAL_BYTES = 256;
const int LIST_SUB_LLP_POS = 31;
const int LIST_LAST_LBID_POS = 30;
const int LIST_BLOCK_LLP_POS = 1023;
const int MAX_BLOCK_ENTRY = 1024;
const int MAX_SUB_RID_CNT = 30;
const int MAX_BLK_RID_CNT = 1023;
const int MAX_BLK_NARRAY_RID_CNT = 1018;
const int LBID_SBID_ENTRY = 46;
const int RID_COUNT_SIZE = 10;
const int CUR_BLK_POS_WIDTH = 2;
const int LLP_STATUS_WIDTH = 2;
const int LIST_ENTRY_WIDTH = 8;
const int LIST_BLK_LLP_ENTRY_WIDTH = 48;
const int BEGIN_LIST_BLK_LLP_POS = 1018;
const int NEXT_BLK_PTR_OFFSET = 5;
const int PARENT_PTR_OFFSET = 4;
const int TOTAL_NUM_ARRAY_PTR = 4;
const int ARRAY_LLP_EXIST = 1;
const int LLP_NOT_FULL = 0;
const int LLP_FULL = 1;
const int TOTAL_CUR_LEVEL = 10;
const int CUR_LEVEL_POS_WIDTH = 20;
const uint64_t INVALID_KEY = -1LL; /** @brief Invalid number */
/************************************************************************
* Type enumerations
************************************************************************/
enum IdxTreeEntryType { /** @brief Index tree entry types */
EMPTY_ENTRY = 0, /** @brief Empty entry */
UNIQUE_VAL = 7, /** @brief Unique value */
EMPTY_LIST = 1, /** @brief Empty list pointer entry */
EMPTY_PTR = 2, /** @brief Empty pointer entry */
BIT_TEST = 3, /** @brief Bit test entry */
LEAF_LIST = 4, /** @brief Leaf list pointer */
BITMAP_PTR = 5, /** @brief Bitmap pointer */
/*****************************************************
* mask definition
******************************************************/
const int BIT_MASK_ARRAY[] = { 0x0,
0x01, /** @brief 1 bit mask */
0x03, /** @brief 2 bit mask */
0x07, /** @brief 3 bit mask */
0x0F, /** @brief 4 bit mask */
0x1F, /** @brief 5 bit mask */
0x3F /** @brief 6 bit mask */
};
/************************************************************************
* Type enumerations
************************************************************************/
enum IdxTreeEntryType /** @brief Index tree entry types */
{
EMPTY_ENTRY = 0, /** @brief Empty entry */
UNIQUE_VAL = 7, /** @brief Unique value */
EMPTY_LIST = 1, /** @brief Empty list pointer entry */
EMPTY_PTR = 2, /** @brief Empty pointer entry */
BIT_TEST = 3, /** @brief Bit test entry */
LEAF_LIST = 4, /** @brief Leaf list pointer */
BITMAP_PTR = 5, /** @brief Bitmap pointer */
// SORT_LIST = 5, /** @brief Sorted list pointer */
MULTI_COL = 6 /** @brief Multi-column index pointer */
};
MULTI_COL = 6 /** @brief Multi-column index pointer */
};
enum IdxTreeGroupType { /** @brief Index tree group types */
ENTRY_1 = 0, /** @brief 1 entry per group */
ENTRY_2 = 1, /** @brief 2 entry per group */
ENTRY_4 = 2, /** @brief 4 entry per group */
ENTRY_8 = 3, /** @brief 8 entry per group */
ENTRY_16 = 4, /** @brief 16 entry per group */
ENTRY_32 = 5, /** @brief 32 entry per group */
ENTRY_BLK = 6 /** @brief 1k entry per group */
};
enum IdxTreeGroupType /** @brief Index tree group types */
{
ENTRY_1 = 0, /** @brief 1 entry per group */
ENTRY_2 = 1, /** @brief 2 entry per group */
ENTRY_4 = 2, /** @brief 4 entry per group */
ENTRY_8 = 3, /** @brief 8 entry per group */
ENTRY_16 = 4, /** @brief 16 entry per group */
ENTRY_32 = 5, /** @brief 32 entry per group */
ENTRY_BLK = 6 /** @brief 1k entry per group */
};
enum IdxBitCompareType { /** @brief Index bit compare types */
BIT_5 = 0, /** @brief 5-bit compare */
BIT_10 = 1 /** @brief 10-bit compare */
};
enum IdxBitCompareType /** @brief Index bit compare types */
{
BIT_5 = 0, /** @brief 5-bit compare */
BIT_10 = 1 /** @brief 10-bit compare */
};
enum IdxFreeMgrType { /** @brief Index free manager types */
TREE = 0, /** @brief Index tree type */
LIST = 1 /** @brief Index list type */
};
enum IdxFreeMgrType /** @brief Index free manager types */
{
TREE = 0, /** @brief Index tree type */
LIST = 1 /** @brief Index list type */
};
/************************************************************************
* @brief index defintions
************************************************************************/
typedef struct {
uint64_t type : IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 12; /** @brief spare bits */
uint64_t group : IDX_GROUP_SIZE; /** @brief entry group type */
// The following is related to ptr
uint64_t fbo : FBO_SIZE; /** @brief file block offset */
uint64_t sbid : SBID_SIZE; /** @brief sub block id */
uint64_t entry : ENTRY_SIZE; /** @brief entry within sub block */
} IdxStartSubBlockEntry; /** @brief Index start block entry structure */
/************************************************************************
* @brief index defintions
************************************************************************/
typedef struct
{
uint64_t type :
IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 12; /** @brief spare bits */
uint64_t group :
IDX_GROUP_SIZE; /** @brief entry group type */
// The following is related to ptr
uint64_t fbo :
FBO_SIZE; /** @brief file block offset */
uint64_t sbid :
SBID_SIZE; /** @brief sub block id */
uint64_t entry :
ENTRY_SIZE; /** @brief entry within sub block */
} IdxStartSubBlockEntry; /** @brief Index start block entry structure */
typedef struct {
uint64_t type : IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 2; /** @brief spare bits */
uint64_t group : IDX_GROUP_SIZE; /** @brief entry group type */
// The following is related to ptr
uint64_t spare2 : 10; /** @brief spare bits */
uint64_t fbo : FBO_SIZE; /** @brief file block offset */
uint64_t sbid : SBID_SIZE; /** @brief sub block id */
uint64_t entry : ENTRY_SIZE; /** @brief entry within sub block */
} IdxEmptyListEntry; /** @brief Index empty list entry structure */
typedef struct
{
uint64_t type :
IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 2; /** @brief spare bits */
uint64_t group :
IDX_GROUP_SIZE; /** @brief entry group type */
// The following is related to ptr
uint64_t spare2 : 10; /** @brief spare bits */
uint64_t fbo :
FBO_SIZE; /** @brief file block offset */
uint64_t sbid :
SBID_SIZE; /** @brief sub block id */
uint64_t entry :
ENTRY_SIZE; /** @brief entry within sub block */
} IdxEmptyListEntry; /** @brief Index empty list entry structure */
typedef struct {
uint64_t type : IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 15; /** @brief spare bits */
// The following is related to ptr
uint64_t fbo : FBO_SIZE; /** @brief file block offset */
uint64_t sbid : SBID_SIZE; /** @brief sub block id */
uint64_t entry : ENTRY_SIZE; /** @brief entry within sub block */
} IdxBitmapPointerEntry; /** @brief Index bitmap pointer entry structure */
typedef struct
{
uint64_t type :
IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 15; /** @brief spare bits */
// The following is related to ptr
uint64_t fbo :
FBO_SIZE; /** @brief file block offset */
uint64_t sbid :
SBID_SIZE; /** @brief sub block id */
uint64_t entry :
ENTRY_SIZE; /** @brief entry within sub block */
} IdxBitmapPointerEntry; /** @brief Index bitmap pointer entry structure */
typedef struct {
uint64_t type : IDX_TYPE_SIZE; /** @brief entry type */
uint64_t bitTest : IDX_BITTEST_SIZE; /** @brief index bittest */
uint64_t group : IDX_GROUP_SIZE; /** @brief entry group type */
uint64_t bitCompare : 1;
uint64_t spare : 1; /** @brief spare bits */
// The following is related to ptr
uint64_t fbo : FBO_SIZE; /** @brief file block offset */
uint64_t sbid : SBID_SIZE; /** @brief sub block id */
uint64_t entry : ENTRY_SIZE; /** @brief entry within sub block */
} IdxBitTestEntry; /** @brief Index bit test entry structure */
typedef struct
{
uint64_t type :
IDX_TYPE_SIZE; /** @brief entry type */
uint64_t bitTest :
IDX_BITTEST_SIZE; /** @brief index bittest */
uint64_t group :
IDX_GROUP_SIZE; /** @brief entry group type */
uint64_t bitCompare : 1;
uint64_t spare : 1; /** @brief spare bits */
// The following is related to ptr
uint64_t fbo :
FBO_SIZE; /** @brief file block offset */
uint64_t sbid :
SBID_SIZE; /** @brief sub block id */
uint64_t entry :
ENTRY_SIZE; /** @brief entry within sub block */
} IdxBitTestEntry; /** @brief Index bit test entry structure */
typedef struct {
uint64_t type : IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 15; /** @brief spare bits */
// The following is related to ptr
uint64_t fbo : FBO_SIZE; /** @brief file block offset */
uint64_t sbid : SBID_SIZE; /** @brief sub block id */
uint64_t entry : ENTRY_SIZE; /** @brief entry within sub block */
} IdxTreePointerEntry; /** @brief Index tree pointer entry structure */
/************************************************************************
* @brief index list node defintions
************************************************************************/
typedef struct {
uint64_t type : IDX_TYPE_SIZE; /** @brief entry type 3 */
uint64_t spare : 15; /** @brief spare bits */
RID rid : RID_SIZE; /** @brief row id */
} IdxRidListEntry; /** @brief Index rid list entry structure */
typedef struct {
uint64_t type : IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 5;
uint64_t count : RID_COUNT_SIZE; /** the count of rids on the current blk */
uint64_t llp : LBID_SBID_ENTRY; /** @brief size */
} IdxRidListPtr;
typedef struct
{
uint64_t type :
IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 15; /** @brief spare bits */
// The following is related to ptr
uint64_t fbo :
FBO_SIZE; /** @brief file block offset */
uint64_t sbid :
SBID_SIZE; /** @brief sub block id */
uint64_t entry :
ENTRY_SIZE; /** @brief entry within sub block */
} IdxTreePointerEntry; /** @brief Index tree pointer entry structure */
/************************************************************************
* @brief index list node defintions
************************************************************************/
typedef struct
{
uint64_t type :
IDX_TYPE_SIZE; /** @brief entry type 3 */
uint64_t spare : 15; /** @brief spare bits */
RID rid :
RID_SIZE; /** @brief row id */
} IdxRidListEntry; /** @brief Index rid list entry structure */
typedef struct {
uint64_t type : IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 5;
uint64_t count : RID_COUNT_SIZE; /** the count of rids on the current blk */
uint64_t lbid : FBO_SIZE; /** @brief size */
uint64_t sbid : SBID_SIZE; /** @brief sub block id */
uint64_t entry : ENTRY_SIZE; /** @brief entry within sub block */
} IdxRidLastListPtr;
typedef struct
{
uint64_t type :
IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 5;
uint64_t count :
RID_COUNT_SIZE; /** the count of rids on the current blk */
uint64_t llp :
LBID_SBID_ENTRY; /** @brief size */
} IdxRidListPtr;
typedef struct {
uint64_t type : IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 13;
uint64_t llpStat : LLP_STATUS_WIDTH; /** llp status */
uint64_t childLbid : FBO_SIZE; /** @brief file block offset */
uint64_t spare2 : 10;
} IdxRidChildListPtr;
typedef struct
{
uint64_t type :
IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 5;
uint64_t count :
RID_COUNT_SIZE; /** the count of rids on the current blk */
uint64_t lbid :
FBO_SIZE; /** @brief size */
uint64_t sbid :
SBID_SIZE; /** @brief sub block id */
uint64_t entry :
ENTRY_SIZE; /** @brief entry within sub block */
} IdxRidLastListPtr;
typedef struct {
uint64_t type : IDX_TYPE_SIZE; /** @brief entry type 0 or 6 */
uint64_t spare : 5;
uint64_t count : RID_COUNT_SIZE; /** the count of rids on the current blk */
uint64_t nextLbid : FBO_SIZE; /** @brief file block offset */
uint64_t curLevel : TOTAL_CUR_LEVEL;
} IdxRidNextListPtr;
typedef struct
{
uint64_t type :
IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 13;
uint64_t llpStat :
LLP_STATUS_WIDTH; /** llp status */
uint64_t childLbid :
FBO_SIZE; /** @brief file block offset */
uint64_t spare2 : 10;
} IdxRidChildListPtr;
typedef struct {
uint64_t type : IDX_TYPE_SIZE; /** @brief entry type 6*/
uint64_t spare : 3; /** @brief spare bits */
uint64_t curLevelPos : CUR_LEVEL_POS_WIDTH;
uint64_t curBlkPos : CUR_BLK_POS_WIDTH; /** the position of current blk */
uint64_t parentLbid : FBO_SIZE; /** @brief file block offset */
} IdxRidParentListPtr;
typedef struct
{
uint64_t type :
IDX_TYPE_SIZE; /** @brief entry type 0 or 6 */
uint64_t spare : 5;
uint64_t count :
RID_COUNT_SIZE; /** the count of rids on the current blk */
uint64_t nextLbid :
FBO_SIZE; /** @brief file block offset */
uint64_t curLevel :
TOTAL_CUR_LEVEL;
} IdxRidNextListPtr;
typedef struct {
IdxRidChildListPtr childIdxRidListPtr[4];
IdxRidParentListPtr parentIdxListPtr;
IdxRidNextListPtr nextIdxListPtr;
} IdxRidListArrayPtr;
/************************************************************************
* @brief index list header defintions
************************************************************************/
typedef struct {
uint64_t type : IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 15; /** @brief spare bits */
uint64_t size : RID_SIZE; /** @brief size */
} IdxRidListHdrSize;
typedef struct
{
uint64_t type :
IDX_TYPE_SIZE; /** @brief entry type 6*/
uint64_t spare : 3; /** @brief spare bits */
uint64_t curLevelPos :
CUR_LEVEL_POS_WIDTH;
uint64_t curBlkPos :
CUR_BLK_POS_WIDTH; /** the position of current blk */
uint64_t parentLbid :
FBO_SIZE; /** @brief file block offset */
} IdxRidParentListPtr;
typedef struct {
uint64_t type : IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 15; /** @brief spare bits */
uint64_t llp : RID_SIZE; /** @brief size */
} IdxRidListHdrPtr;
typedef struct
{
IdxRidChildListPtr childIdxRidListPtr[4];
IdxRidParentListPtr parentIdxListPtr;
IdxRidNextListPtr nextIdxListPtr;
} IdxRidListArrayPtr;
typedef struct {
IdxRidListHdrSize idxRidListSize;
uint64_t key;
IdxRidListEntry firstIdxRidListEntry;
IdxRidListHdrPtr nextIdxRidListPtr;
} IdxRidListHdr;
/************************************************************************
* @brief index list header defintions
************************************************************************/
typedef struct
{
uint64_t type :
IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 15; /** @brief spare bits */
uint64_t size :
RID_SIZE; /** @brief size */
} IdxRidListHdrSize;
typedef struct {
uint64_t part1 : 15; /** @brief entry type */
uint64_t part2 : 15; /** @brief spare bits */
uint64_t spare : 34; /** @brief size */
} IdxRidListOffSet;
/************************************************************************
* @brief index tree node defintions
************************************************************************/
typedef struct {
IdxBitTestEntry next; /** @brief next in the node */
IdxBitTestEntry current; /** @brief current addr */
uint16_t level; /** @brief tree level */
uint16_t allocCount; /** @brief allocated entry cound from free mgr */
uint16_t useCount; /** @brief actual use entry count */
uint16_t offset; /** @brief entry offset */
bool used; /** @brief used flag */
} IdxTreeNode; /** @brief Index tree node */
typedef struct
{
uint64_t type :
IDX_TYPE_SIZE; /** @brief entry type */
uint64_t spare : 15; /** @brief spare bits */
uint64_t llp :
RID_SIZE; /** @brief size */
} IdxRidListHdrPtr;
typedef struct {
IdxTreeNode node[IDX_MAX_TREE_LEVEL]; /** @brief node array */
uint16_t maxLevel; /** @brief max level */
RID rid; /** @brief current row id */
uint64_t key; /** @brief current key */
uint16_t width; /** @brief current width */
} IdxTree; /** @brief Index tree */
typedef struct
{
IdxRidListHdrSize idxRidListSize;
uint64_t key;
IdxRidListEntry firstIdxRidListEntry;
IdxRidListHdrPtr nextIdxRidListPtr;
} IdxRidListHdr;
struct IdxTreeCacheNode {
RID rid; /** @brief RID */
uint64_t key; /** @brief Key */
IdxEmptyListEntry entry; /** @brief List pointer */
bool used; /** @brief Used flag */
IdxTreeCacheNode() { used = false; }
};
typedef struct
{
uint64_t part1 : 15; /** @brief entry type */
uint64_t part2 : 15; /** @brief spare bits */
uint64_t spare : 34; /** @brief size */
} IdxRidListOffSet;
/************************************************************************
* @brief index tree node defintions
************************************************************************/
typedef struct
{
IdxBitTestEntry next; /** @brief next in the node */
IdxBitTestEntry current; /** @brief current addr */
uint16_t level; /** @brief tree level */
uint16_t allocCount; /** @brief allocated entry cound from free mgr */
uint16_t useCount; /** @brief actual use entry count */
uint16_t offset; /** @brief entry offset */
bool used; /** @brief used flag */
} IdxTreeNode; /** @brief Index tree node */
struct IdxMultiColKey {
std::bitset<IDX_MAX_MULTI_COL_BIT> bitSet; /** @brief BitArray for all bits */
std::bitset<IDX_MAX_MULTI_COL_BIT> curBitset;/** @brief Current working column */
std::bitset<IDX_MAX_MULTI_COL_BIT> curMask; /** @brief Current bitset mask */
unsigned char keyBuf[IDX_MAX_MULTI_COL_BIT/8]; /** @brief Key buffer */
int curLevel; /** @brief Current index level */
int maxLevel; /** @brief Maximum index level */
int totalBit; /** @brief Total bits */
int testbitArray[IDX_MAX_MULTI_COL_IDX_LEVEL]; /** @brief Test bit array */
void clear() { bitSet.reset(); curBitset.reset(); curMask.reset();
curLevel = maxLevel = 0; totalBit = 0;
memset( testbitArray, 0, IDX_MAX_MULTI_COL_IDX_LEVEL); memset( keyBuf, 0, IDX_MAX_MULTI_COL_BIT/8 );
curMask = 0x1F; curMask = curMask << (IDX_MAX_MULTI_COL_BIT - 5);
}
IdxMultiColKey() { clear(); }
};
struct IdxMultiRid {
RID* ridArray; /** @brief RID array */
int totalRid; /** @brief Total number of row id */
IdxMultiRid() { totalRid = 0; ridArray = NULL; }
void setMultiRid( RID* rids, const int size ) {
totalRid = size;
ridArray = rids;
/* ridArray = new RID[size];
memcpy( ridArray, rids, size * sizeof( RID ) ); */
}
void clearMultiRid() { /*if( ridArray != NULL ) delete [] ridArray; ridArray = NULL;*/ } // we don't want to get into this mem business
};
typedef struct
{
IdxTreeNode node[IDX_MAX_TREE_LEVEL]; /** @brief node array */
uint16_t maxLevel; /** @brief max level */
RID rid; /** @brief current row id */
uint64_t key; /** @brief current key */
uint16_t width; /** @brief current width */
} IdxTree; /** @brief Index tree */
struct IdxLoadParam {
File sourceFile; /** @brief Source file contatin values */
struct IdxTreeCacheNode
{
RID rid; /** @brief RID */
uint64_t key; /** @brief Key */
IdxEmptyListEntry entry; /** @brief List pointer */
bool used; /** @brief Used flag */
IdxTreeCacheNode()
{
used = false;
}
};
OID indexTreeOid; /** @brief Target index tree oid */
OID indexListOid; /** @brief Target index list oid */
execplan::CalpontSystemCatalog::ColDataType indexColDataType; /** @brief Target index column type */
int indexWidth; /** @brief Target index width */
struct IdxMultiColKey
{
std::bitset<IDX_MAX_MULTI_COL_BIT> bitSet; /** @brief BitArray for all bits */
std::bitset<IDX_MAX_MULTI_COL_BIT> curBitset;/** @brief Current working column */
std::bitset<IDX_MAX_MULTI_COL_BIT> curMask; /** @brief Current bitset mask */
unsigned char keyBuf[IDX_MAX_MULTI_COL_BIT / 8]; /** @brief Key buffer */
int curLevel; /** @brief Current index level */
int maxLevel; /** @brief Maximum index level */
int totalBit; /** @brief Total bits */
int testbitArray[IDX_MAX_MULTI_COL_IDX_LEVEL]; /** @brief Test bit array */
void clear()
{
bitSet.reset();
curBitset.reset();
curMask.reset();
curLevel = maxLevel = 0;
totalBit = 0;
memset( testbitArray, 0, IDX_MAX_MULTI_COL_IDX_LEVEL);
memset( keyBuf, 0, IDX_MAX_MULTI_COL_BIT / 8 );
curMask = 0x1F;
curMask = curMask << (IDX_MAX_MULTI_COL_BIT - 5);
}
IdxMultiColKey()
{
clear();
}
};
struct IdxMultiRid
{
RID* ridArray; /** @brief RID array */
int totalRid; /** @brief Total number of row id */
IdxMultiRid()
{
totalRid = 0;
ridArray = NULL;
}
void setMultiRid( RID* rids, const int size )
{
totalRid = size;
ridArray = rids;
/* ridArray = new RID[size];
memcpy( ridArray, rids, size * sizeof( RID ) ); */
}
void clearMultiRid() { /*if( ridArray != NULL ) delete [] ridArray; ridArray = NULL;*/ } // we don't want to get into this mem business
};
int maxLoadRow; /** @brief Max rows for one load */
struct IdxLoadParam
{
File sourceFile; /** @brief Source file contatin values */
void setIdxLoadParam( const OID treeOid, const OID listOid, const execplan::CalpontSystemCatalog::ColDataType colDataType, const int width, const int maxRow )
{ indexTreeOid = treeOid; indexListOid = listOid; indexColDataType = colDataType;
indexWidth = width; maxLoadRow = maxRow; }
bool isValid() { return indexTreeOid && indexListOid && indexWidth && maxLoadRow; }
IdxLoadParam() { indexTreeOid = indexListOid = indexWidth = maxLoadRow = 0; }
};
OID indexTreeOid; /** @brief Target index tree oid */
OID indexListOid; /** @brief Target index list oid */
execplan::CalpontSystemCatalog::ColDataType indexColDataType; /** @brief Target index column type */
int indexWidth; /** @brief Target index width */
int maxLoadRow; /** @brief Max rows for one load */
void setIdxLoadParam( const OID treeOid, const OID listOid, const execplan::CalpontSystemCatalog::ColDataType colDataType, const int width, const int maxRow )
{
indexTreeOid = treeOid;
indexListOid = listOid;
indexColDataType = colDataType;
indexWidth = width;
maxLoadRow = maxRow;
}
bool isValid()
{
return indexTreeOid && indexListOid && indexWidth && maxLoadRow;
}
IdxLoadParam()
{
indexTreeOid = indexListOid = indexWidth = maxLoadRow = 0;
}
};
} //end of namespace
#endif // _WE_INDEX_H_

View File

@ -29,14 +29,14 @@
namespace WriteEngine
{
WriteEngine::WErrorCodes ec; // referenced as extern by chunkmanager
WriteEngine::WErrorCodes ec; // referenced as extern by chunkmanager
//------------------------------------------------------------------------------
// Constructor
//------------------------------------------------------------------------------
Log::Log() : m_bConsoleOutput( true ),
m_logFileName( "" ),
m_errlogFileName( "" )
m_logFileName( "" ),
m_errlogFileName( "" )
{
m_pid = ::getpid();
}
@ -76,19 +76,19 @@ void Log::formatMsg( const std::string& msg,
{
oss << " (" << m_pid << ":" <<
#ifdef _MSC_VER
GetCurrentThreadId()
GetCurrentThreadId()
#else
pthread_self()
pthread_self()
#endif
<< ") " <<
MSG_LEVEL_STR[level] << " : " << msg ;
<< ") " <<
MSG_LEVEL_STR[level] << " : " << msg ;
}
else
{
oss << " (" << m_pid << ") " << MSG_LEVEL_STR[level] << " : " << msg ;
}
if( code > 0 )
if ( code > 0 )
oss << " [" << code << "]";
}
@ -115,10 +115,11 @@ void Log::logMsg( const char* msg,
formatMsg( msg, level, oss, code );
// log error and critical msgs to syslog
if( level == MSGLVL_ERROR || level == MSGLVL_CRITICAL )
if ( level == MSGLVL_ERROR || level == MSGLVL_CRITICAL )
{
{ //log to log file and error log file within scope of mutex lock.
//logSyslog uses SimpleSyslog which has it's own lock.
{
//log to log file and error log file within scope of mutex lock.
//logSyslog uses SimpleSyslog which has it's own lock.
boost::mutex::scoped_lock lk(m_WriteLockMutex);
m_errLogFile << oss.str() << std::endl;
@ -136,14 +137,14 @@ void Log::logMsg( const char* msg,
// Format msg again without including the status code.
// Only log INFO2 msgs to console if m_bConsoleOutput is TRUE;
// All other msg levels always go to console.
if( (level != MSGLVL_INFO2) || (m_bConsoleOutput) )
if ( (level != MSGLVL_INFO2) || (m_bConsoleOutput) )
formatMsg ( msg, level, oss2 );
boost::mutex::scoped_lock lk(m_WriteLockMutex);
m_logFile << oss.str() << std::endl;
if( (level != MSGLVL_INFO2) || (m_bConsoleOutput) )
if ( (level != MSGLVL_INFO2) || (m_bConsoleOutput) )
std::cout << oss2.str() << std::endl;
}
}
@ -159,7 +160,7 @@ void Log::logMsg( const char* msg,
//------------------------------------------------------------------------------
void Log::setLogFileName( const char* logfile,
const char* errlogfile,
bool consoleFlag )
bool consoleFlag )
{
m_logFileName = logfile;
m_errlogFileName = errlogfile;
@ -204,11 +205,13 @@ void Log::logSyslog( const std::string& msg,
msgId = logging::M0076;
break;
}
case ERR_UNKNOWN:
{
msgId = logging::M0017;
case ERR_UNKNOWN:
{
msgId = logging::M0017;
break;
}
}
default:
{
msgId = logging::M0087;

View File

@ -26,7 +26,7 @@
#include <time.h>
#include <sys/types.h>
#include <unistd.h>
#include <unistd.h>
#include <iostream>
#include <fstream>
@ -45,12 +45,14 @@
/** Namespace WriteEngine */
namespace WriteEngine
{
const std::string MSG_LEVEL_STR[] = {
"INFO",
"INFO",
"WARN",
"ERR ",
"CRIT" };
const std::string MSG_LEVEL_STR[] =
{
"INFO",
"INFO",
"WARN",
"ERR ",
"CRIT"
};
/** @brief Class is used to format and write log messages to cpimport.bin log
* file. When applicable, messages are also logged to syslog logs as well.
@ -58,33 +60,39 @@ const std::string MSG_LEVEL_STR[] = {
class Log : public WEObj
{
public:
/**
* @brief Constructor
*/
/**
* @brief Constructor
*/
EXPORT Log();
/**
* @brief Destructor
*/
/**
* @brief Destructor
*/
EXPORT ~Log();
/**
* @brief Log a cpimport.bin logfile message; logs errors to syslog as well
*/
/**
* @brief Log a cpimport.bin logfile message; logs errors to syslog as well
*/
EXPORT void logMsg( const char* msg, int code, MsgLevel level );
EXPORT void logMsg( const char* msg, MsgLevel level )
{ logMsg( msg, 0, level ); }
{
logMsg( msg, 0, level );
}
EXPORT void logMsg( const std::string& msg, MsgLevel level )
{ logMsg( msg.c_str(), level ); }
{
logMsg( msg.c_str(), level );
}
EXPORT void logMsg( const std::string& msg, int code, MsgLevel level )
{ logMsg( msg.c_str(), code, level ); }
{
logMsg( msg.c_str(), code, level );
}
/**
* @brief Set log file name
*/
/**
* @brief Set log file name
*/
EXPORT void setLogFileName( const char* logfile,
const char* errlogfile,
bool consoleFlag = true );
const char* errlogfile,
bool consoleFlag = true );
// BUG 5022
/**
@ -102,7 +110,7 @@ private:
int code = 0 ) const;
bool m_bConsoleOutput; // flag allowing INFO2 msg
// to display to console
// to display to console
std::string m_logFileName; // log file name
std::string m_errlogFileName; // error log file name
pid_t m_pid; // current pid

View File

@ -30,50 +30,65 @@
/** Namespace WriteEngine */
namespace WriteEngine
{
class Log;
class Log;
/** Class WEObj */
class WEObj
{
public:
/**
* @brief Constructor
*/
WEObj() : m_debugLevel( DEBUG_0 ), m_log( 0 ) {}
/**
* @brief Constructor
*/
WEObj() : m_debugLevel( DEBUG_0 ), m_log( 0 ) {}
/**
* @brief Default Destructor
*/
~WEObj() {}
/**
* @brief Default Destructor
*/
~WEObj() {}
/**
* @brief Is it required to debug
*/
const bool isDebug( const DebugLevel level ) const { return level <= m_debugLevel; }
/**
* @brief Is it required to debug
*/
const bool isDebug( const DebugLevel level ) const
{
return level <= m_debugLevel;
}
/**
* @brief Get debug level
*/
const DebugLevel getDebugLevel() const { return m_debugLevel; }
/**
* @brief Get debug level
*/
const DebugLevel getDebugLevel() const
{
return m_debugLevel;
}
/**
* @brief Get Logger object
*/
Log* getLogger() const { return m_log; }
/**
* @brief Get Logger object
*/
Log* getLogger() const
{
return m_log;
}
/**
* @brief Set debug level
*/
void setDebugLevel( const DebugLevel level ) { m_debugLevel = level; }
/**
* @brief Set debug level
*/
void setDebugLevel( const DebugLevel level )
{
m_debugLevel = level;
}
/**
* @brief Set debug logger and debug level
*/
void setLogger( Log* logger ) { m_log = logger; }
/**
* @brief Set debug logger and debug level
*/
void setLogger( Log* logger )
{
m_log = logger;
}
private:
DebugLevel m_debugLevel; // internal use debug level
Log* m_log; // logger object for debug output
DebugLevel m_debugLevel; // internal use debug level
Log* m_log; // logger object for debug output
};

View File

@ -43,36 +43,36 @@ using namespace idbdatafile;
namespace
{
const char* DATA_DIR_SUFFIX = "_data";
const char* TMP_FILE_SUFFIX = ".tmp";
const char* DATA_DIR_SUFFIX = "_data";
const char* TMP_FILE_SUFFIX = ".tmp";
const char* VERSION3_REC = "# VERSION: 3";
const int VERSION3_REC_LEN= 12;
const char* VERSION4_REC = "# VERSION: 4";
const int VERSION4_REC_LEN= 12;
const char* COLUMN1_REC = "COLUM1"; // HWM extent for a DBRoot
const int COLUMN1_REC_LEN = 6;
const char* COLUMN2_REC = "COLUM2"; // Placeholder for empty DBRoot
const int COLUMN2_REC_LEN = 6;
const char* DSTORE1_REC = "DSTOR1"; // HWM extent for a DBRoot
const int DSTORE1_REC_LEN = 6;
const char* DSTORE2_REC = "DSTOR2"; // Placeholder for empty DBRoot
const int DSTORE2_REC_LEN = 6;
const char* VERSION3_REC = "# VERSION: 3";
const int VERSION3_REC_LEN = 12;
const char* VERSION4_REC = "# VERSION: 4";
const int VERSION4_REC_LEN = 12;
const char* COLUMN1_REC = "COLUM1"; // HWM extent for a DBRoot
const int COLUMN1_REC_LEN = 6;
const char* COLUMN2_REC = "COLUM2"; // Placeholder for empty DBRoot
const int COLUMN2_REC_LEN = 6;
const char* DSTORE1_REC = "DSTOR1"; // HWM extent for a DBRoot
const int DSTORE1_REC_LEN = 6;
const char* DSTORE2_REC = "DSTOR2"; // Placeholder for empty DBRoot
const int DSTORE2_REC_LEN = 6;
//--------------------------------------------------------------------------
// Local Function that prints contents of an RBChunkInfo object
//--------------------------------------------------------------------------
std::ostream& operator<<(std::ostream& os,
const WriteEngine::RBChunkInfo& chk)
{
os << "OID-" << chk.fOid <<
"; DBRoot-" << chk.fDbRoot <<
"; Part-" << chk.fPartition <<
"; Seg-" << chk.fSegment <<
"; HWM-" << chk.fHwm;
//--------------------------------------------------------------------------
// Local Function that prints contents of an RBChunkInfo object
//--------------------------------------------------------------------------
std::ostream& operator<<(std::ostream& os,
const WriteEngine::RBChunkInfo& chk)
{
os << "OID-" << chk.fOid <<
"; DBRoot-" << chk.fDbRoot <<
"; Part-" << chk.fPartition <<
"; Seg-" << chk.fSegment <<
"; HWM-" << chk.fHwm;
return os;
}
return os;
}
}
namespace WriteEngine
@ -82,13 +82,15 @@ namespace WriteEngine
// Compare function used for set of RBChunkInfo objects.
//------------------------------------------------------------------------------
bool RBChunkInfoCompare::operator()
(const RBChunkInfo& lhs, const RBChunkInfo& rhs) const
(const RBChunkInfo& lhs, const RBChunkInfo& rhs) const
{
if (lhs.fOid < rhs.fOid) {
if (lhs.fOid < rhs.fOid)
{
return true;
}
if ((lhs.fOid==rhs.fOid) && (lhs.fSegment < rhs.fSegment)) {
if ((lhs.fOid == rhs.fOid) && (lhs.fSegment < rhs.fSegment))
{
return true;
}
@ -127,7 +129,7 @@ void RBMetaWriter::init (
// Delete any files that collide with the file names we are going to need.
// Construct the filenames; we will use a temporary file name until we are
// finished creating, at which time we will rename the temp files.
for (unsigned m=0; m<dbRoots.size(); m++)
for (unsigned m = 0; m < dbRoots.size(); m++)
{
std::string bulkRollbackPath( Config::getDBRootByNum( dbRoots[m] ) );
bulkRollbackPath += '/';
@ -145,7 +147,7 @@ void RBMetaWriter::init (
// Clear out any data subdirectory
deleteSubDir( metaFileName );
}
}
}
//------------------------------------------------------------------------------
// Saves snapshot of extentmap into a bulk rollback meta data file, for
@ -174,34 +176,36 @@ void RBMetaWriter::saveBulkRollbackMetaData(
Config::getRootIdList( dbRoots );
// Loop through DBRoot HWMs for this PM
for (unsigned m=0; m<dbRoots.size(); m++)
for (unsigned m = 0; m < dbRoots.size(); m++)
{
std::string metaFileName = openMetaFile( dbRoots[m] );
bOpenedFile = true;
fCreatedSubDir = false;
// Loop through the columns in the specified table
for( size_t i = 0; i < columns.size(); i++ )
for ( size_t i = 0; i < columns.size(); i++ )
{
const BRM::EmDbRootHWMInfo_v& dbRootHWMInfo =
dbRootHWMInfoVecCol[i];
// Select dbRootHWMInfo that matches DBRoot for this iteration
unsigned k = 0;
for (; k<dbRootHWMInfo.size(); k++)
for (; k < dbRootHWMInfo.size(); k++)
{
if (dbRoots[m] == dbRootHWMInfo[k].dbRoot)
break;
}
if (k >= dbRootHWMInfo.size()) // logic error; should not happen
{
std::ostringstream oss;
oss << "Error creating meta file; DBRoot" << dbRoots[m] <<
" listed in Calpont config file, but not in extentmap"
" for OID " << columns[i].dataFile.oid;
" listed in Calpont config file, but not in extentmap"
" for OID " << columns[i].dataFile.oid;
throw WeException( oss.str(), ERR_INVALID_PARAM );
}
uint16_t dbRoot = dbRootHWMInfo[k].dbRoot;
uint32_t partition = 0;
uint16_t segment = 0;
@ -211,7 +215,7 @@ void RBMetaWriter::saveBulkRollbackMetaData(
// For empty DBRoot (totalBlocks == 0),
// leave partition, segment, and HWM set to 0
if ((dbRootHWMInfo[k].totalBlocks > 0) ||
(dbRootHWMInfo[k].status == BRM::EXTENTOUTOFSERVICE))
(dbRootHWMInfo[k].status == BRM::EXTENTOUTOFSERVICE))
{
partition = dbRootHWMInfo[k].partitionNum;
segment = dbRootHWMInfo[k].segmentNum;
@ -234,7 +238,7 @@ void RBMetaWriter::saveBulkRollbackMetaData(
columns[i].compressionType );
// Save dctnry store meta-data info to support bulk rollback
if ( dctnryStoreOids[i] > 0 )
if ( dctnryStoreOids[i] > 0 )
{
std::vector<uint32_t> segList;
std::string segFileListErrMsg;
@ -244,7 +248,8 @@ void RBMetaWriter::saveBulkRollbackMetaData(
std::string dirName;
FileOp fileOp(false);
rc = fileOp.getDirName( dctnryStoreOids[i],
dbRoot, partition, dirName );
dbRoot, partition, dirName );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -259,8 +264,9 @@ void RBMetaWriter::saveBulkRollbackMetaData(
}
rc = BulkRollbackMgr::getSegFileList(dirName, false,
segList,
segFileListErrMsg);
segList,
segFileListErrMsg);
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -277,7 +283,7 @@ void RBMetaWriter::saveBulkRollbackMetaData(
if (segList.size() == 0)
{
writeDictionaryStoreMetaNoDataMarker(
writeDictionaryStoreMetaNoDataMarker(
columns[i].dataFile.oid,
dctnryStoreOids[i],
dbRoot,
@ -288,7 +294,7 @@ void RBMetaWriter::saveBulkRollbackMetaData(
else
{
// Loop thru dictionary store seg files for this DBRoot
for (unsigned int kk=0; kk<segList.size(); kk++)
for (unsigned int kk = 0; kk < segList.size(); kk++)
{
unsigned int segDictionary = segList[kk];
@ -296,17 +302,18 @@ void RBMetaWriter::saveBulkRollbackMetaData(
HWM dictHWMStore;
int extState;
rc = BRMWrapper::getInstance()->getLocalHWM(
dctnryStoreOids[i],
partition,
segDictionary,
dictHWMStore,
extState );
dctnryStoreOids[i],
partition,
segDictionary,
dictHWMStore,
extState );
if (rc != NO_ERROR)
{
WErrorCodes ec;
std::ostringstream oss;
oss << "Error getting rollback HWM for "
"dictionary file "<< dctnryStoreOids[i] <<
"dictionary file " << dctnryStoreOids[i] <<
"; partition-" << partition <<
"; segment-" << segDictionary <<
"; " << ec.errorString(rc);
@ -329,9 +336,9 @@ void RBMetaWriter::saveBulkRollbackMetaData(
// For a compressed column, backup the starting HWM chunk if the
// starting HWM block is not on an empty DBRoot (or outOfSrvc)
if ( (columns[i].compressionType) &&
(columns[i].dataFile.fDbRoot == dbRootHWMInfo[k].dbRoot) &&
(dbRootHWMInfo[k].totalBlocks > 0) &&
(dbRootHWMInfo[k].status != BRM::EXTENTOUTOFSERVICE) )
(columns[i].dataFile.fDbRoot == dbRootHWMInfo[k].dbRoot) &&
(dbRootHWMInfo[k].totalBlocks > 0) &&
(dbRootHWMInfo[k].status != BRM::EXTENTOUTOFSERVICE) )
{
backupColumnHWMChunk(
columns[i].dataFile.oid,
@ -352,22 +359,25 @@ void RBMetaWriter::saveBulkRollbackMetaData(
size_t s = data.size(); // buffer size
size_t w = 0; // total bytes written so far
ssize_t n = 0; // bytes written in one write
for (int i = 0; i < 10 && w < s; i++)
{
n = fMetaDataFile->write(p+w, s-w);
n = fMetaDataFile->write(p + w, s - w);
if (n < 0)
break;
break;
w += n;
}
if (w != s)
{
int errRc = errno;
std::ostringstream oss;
oss << "Error writing bulk rollback meta-data file "
<< metaFileName << "; written/expect:" << w << "/" << s
<< "; err-" << errRc << "; " << strerror( errRc );
throw WeException(oss.str(), ERR_FILE_WRITE);
<< metaFileName << "; written/expect:" << w << "/" << s
<< "; err-" << errRc << "; " << strerror( errRc );
throw WeException(oss.str(), ERR_FILE_WRITE);
}
fMetaDataStream.str("");
@ -393,6 +403,7 @@ void RBMetaWriter::saveBulkRollbackMetaData(
catch (...)
{
}
throw WeException( ex.what(), ex.errorCode() );
}
}
@ -406,9 +417,9 @@ std::string RBMetaWriter::openMetaFile ( uint16_t dbRoot )
bulkRollbackPath += '/';
bulkRollbackPath += DBROOT_BULK_ROLLBACK_SUBDIR;
if( !IDBPolicy::exists( bulkRollbackPath.c_str() ) )
if ( !IDBPolicy::exists( bulkRollbackPath.c_str() ) )
{
if( IDBPolicy::mkdir( bulkRollbackPath.c_str() ) != 0 )
if ( IDBPolicy::mkdir( bulkRollbackPath.c_str() ) != 0 )
{
std::ostringstream oss;
oss << "Error creating bulk rollback directory " <<
@ -422,12 +433,12 @@ std::string RBMetaWriter::openMetaFile ( uint16_t dbRoot )
oss << "/" << fTableOID;
std::string metaFileName( bulkRollbackPath );
metaFileName += oss.str();
fMetaFileNames.insert( make_pair(dbRoot,metaFileName) );
fMetaFileNames.insert( make_pair(dbRoot, metaFileName) );
std::string tmpMetaFileName( metaFileName );
tmpMetaFileName += TMP_FILE_SUFFIX;
fMetaDataFile = IDBDataFile::open(IDBPolicy::getType(tmpMetaFileName.c_str(),
IDBPolicy::WRITEENG),
IDBPolicy::WRITEENG),
tmpMetaFileName.c_str(), "wb", 0);
if ( !fMetaDataFile )
@ -442,20 +453,20 @@ std::string RBMetaWriter::openMetaFile ( uint16_t dbRoot )
}
fMetaDataStream <<
"# VERSION: 4" << std::endl <<
"# APPLICATION: " << fAppDesc<<std::endl<<
"# PID: " << ::getpid() << std::endl <<
"# TABLE: " << fTableName << std::endl <<
"# COLUM1: coloid,"
"dbroot,part,seg,lastLocalHWM,type,typename,width,comp" <<
std::endl <<
"# COLUM2: coloid,"
"dbroot,part,seg,type,typename,width,comp" <<
std::endl <<
"# DSTOR1: coloid,dctoid,"
"dbroot,part,seg,localHWM,comp" << std::endl <<
"# DSTOR2: coloid,dctoid,"
"dbroot,part,seg,comp" << std::endl;
"# VERSION: 4" << std::endl <<
"# APPLICATION: " << fAppDesc << std::endl <<
"# PID: " << ::getpid() << std::endl <<
"# TABLE: " << fTableName << std::endl <<
"# COLUM1: coloid,"
"dbroot,part,seg,lastLocalHWM,type,typename,width,comp" <<
std::endl <<
"# COLUM2: coloid,"
"dbroot,part,seg,type,typename,width,comp" <<
std::endl <<
"# DSTOR1: coloid,dctoid,"
"dbroot,part,seg,localHWM,comp" << std::endl <<
"# DSTOR2: coloid,dctoid,"
"dbroot,part,seg,comp" << std::endl;
// Clear out any data subdirectory
// This is redundant because init() also calls deleteSubDir(), but it can't
@ -484,10 +495,11 @@ void RBMetaWriter::closeMetaFile ( )
//------------------------------------------------------------------------------
void RBMetaWriter::renameMetaFile ( )
{
for(std::map<uint16_t,std::string>::const_iterator iter =
fMetaFileNames.begin(); iter != fMetaFileNames.end(); ++iter)
for (std::map<uint16_t, std::string>::const_iterator iter =
fMetaFileNames.begin(); iter != fMetaFileNames.end(); ++iter)
{
const std::string& metaFileName = iter->second;
if (!metaFileName.empty())
{
std::string tmpMetaFileName = metaFileName;
@ -518,10 +530,11 @@ void RBMetaWriter::renameMetaFile ( )
//------------------------------------------------------------------------------
void RBMetaWriter::deleteFile ( )
{
for(std::map<uint16_t,std::string>::const_iterator iter =
fMetaFileNames.begin(); iter != fMetaFileNames.end(); ++iter)
for (std::map<uint16_t, std::string>::const_iterator iter =
fMetaFileNames.begin(); iter != fMetaFileNames.end(); ++iter)
{
const std::string& metaFileName = iter->second;
if (!metaFileName.empty())
{
std::string tmpMetaFileName = metaFileName;
@ -556,28 +569,30 @@ void RBMetaWriter::writeColumnMetaData (
if (withHWM)
{
fMetaDataStream << "COLUM1: " <<
columnOID << ' ' <<
dbRoot << ' ' <<
partition << ' ' <<
segment << ' ' <<
lastLocalHwm << ' ' <<
colType << ' ' <<
colTypeName << ' ' <<
colWidth;
columnOID << ' ' <<
dbRoot << ' ' <<
partition << ' ' <<
segment << ' ' <<
lastLocalHwm << ' ' <<
colType << ' ' <<
colTypeName << ' ' <<
colWidth;
}
else
{
fMetaDataStream << "COLUM2: " <<
columnOID << ' ' <<
dbRoot << ' ' <<
partition << ' ' <<
segment << ' ' <<
colType << ' ' <<
colTypeName << ' ' <<
colWidth;
columnOID << ' ' <<
dbRoot << ' ' <<
partition << ' ' <<
segment << ' ' <<
colType << ' ' <<
colTypeName << ' ' <<
colWidth;
}
if (compressionType)
fMetaDataStream << ' ' << compressionType << ' ';
fMetaDataStream << std::endl;
// If column is compressed, then create directory for storing HWM chunks
@ -606,14 +621,16 @@ void RBMetaWriter::writeDictionaryStoreMetaData (
int compressionType )
{
fMetaDataStream << "DSTOR1: " <<
columnOID << ' ' <<
dictionaryStoreOID << ' ' <<
dbRoot << ' ' <<
partition << ' ' <<
segment << ' ' <<
localHwm;
columnOID << ' ' <<
dictionaryStoreOID << ' ' <<
dbRoot << ' ' <<
partition << ' ' <<
segment << ' ' <<
localHwm;
if (compressionType)
fMetaDataStream << ' ' << compressionType << ' ';
fMetaDataStream << std::endl;
// Save dictionary meta data for later use in backing up the HWM chunks
@ -640,13 +657,15 @@ void RBMetaWriter::writeDictionaryStoreMetaNoDataMarker (
int compressionType )
{
fMetaDataStream << "DSTOR2: " <<
columnOID << ' ' <<
dictionaryStoreOID << ' ' <<
dbRoot << ' ' <<
partition << ' ' <<
segment;
columnOID << ' ' <<
dictionaryStoreOID << ' ' <<
dbRoot << ' ' <<
partition << ' ' <<
segment;
if (compressionType)
fMetaDataStream << ' ' << compressionType << ' ';
fMetaDataStream << std::endl;
}
@ -658,7 +677,7 @@ void RBMetaWriter::createSubDir( const std::string& metaFileName )
std::string bulkRollbackSubPath( metaFileName );
bulkRollbackSubPath += DATA_DIR_SUFFIX;
if( IDBPolicy::mkdir( bulkRollbackSubPath.c_str() ) != 0 )
if ( IDBPolicy::mkdir( bulkRollbackSubPath.c_str() ) != 0 )
{
std::ostringstream oss;
oss << "Error creating bulk rollback data subdirectory " <<
@ -677,7 +696,7 @@ void RBMetaWriter::deleteSubDir( const std::string& metaFileName )
std::string bulkRollbackSubPath( metaFileName );
bulkRollbackSubPath += DATA_DIR_SUFFIX;
if( IDBPolicy::remove( bulkRollbackSubPath.c_str() ) != 0 )
if ( IDBPolicy::remove( bulkRollbackSubPath.c_str() ) != 0 )
{
std::ostringstream oss;
oss << "Error deleting bulk rollback data subdirectory " <<
@ -702,7 +721,7 @@ void RBMetaWriter::backupColumnHWMChunk(
if (!IDBPolicy::useHdfs())
{
backupHWMChunk( true,
columnOID, dbRoot, partition, segment, startingHWM );
columnOID, dbRoot, partition, segment, startingHWM );
}
}
@ -732,14 +751,18 @@ bool RBMetaWriter::backupDctnryHWMChunk(
{
RBChunkInfo chunkInfo(
dctnryOID, 0, partition, segment, 0);
RBChunkInfo chunkInfoFound(0,0,0,0,0);
RBChunkInfo chunkInfoFound(0, 0, 0, 0, 0);
bool bFound = false;
{ // Use scoped lock to perform "find"
{
// Use scoped lock to perform "find"
boost::mutex::scoped_lock lock( fRBChunkDctnryMutex );
if ( (fLog) && (fLog->isDebug(DEBUG_1)) )
printDctnryChunkList(chunkInfo, "when searching ");
RBChunkSet::iterator iter = fRBChunkDctnrySet.find ( chunkInfo );
if (iter != fRBChunkDctnrySet.end())
{
bFound = true;
@ -755,10 +778,11 @@ bool RBMetaWriter::backupDctnryHWMChunk(
// we use hdfs buffer file. Set backup flag
// so application knows to use tmp buffer file.
bBackupApplies = true;
if (!IDBPolicy::useHdfs())
{
backupHWMChunk(false, dctnryOID,
dbRoot, partition, segment, chunkInfoFound.fHwm);
dbRoot, partition, segment, chunkInfoFound.fHwm);
}
}
else
@ -773,9 +797,11 @@ bool RBMetaWriter::backupDctnryHWMChunk(
// by this RBChunkInfo object.
}
{ // Use scoped lock to perform "erase"
{
// Use scoped lock to perform "erase"
boost::mutex::scoped_lock lock( fRBChunkDctnryMutex );
fRBChunkDctnrySet.erase( chunkInfoFound );
if ( (fLog) && (fLog->isDebug(DEBUG_1)) )
printDctnryChunkList(chunkInfoFound, "after deleting ");
}
@ -806,6 +832,7 @@ void RBMetaWriter::backupHWMFile(
HWM startingHWM) // starting HWM for db segment file
{
std::string fileType("column");
if (!bColumnFile)
fileType = "dictionary";
@ -814,7 +841,8 @@ void RBMetaWriter::backupHWMFile(
// Construct file name for db file to be backed up
char dbFileName[FILE_NAME_SIZE];
int rc = fileOp.getFileName( columnOID, dbFileName,
dbRoot, partition, segment );
dbRoot, partition, segment );
if (rc != NO_ERROR)
{
std::ostringstream oss;
@ -824,13 +852,14 @@ void RBMetaWriter::backupHWMFile(
"; partition-" << partition <<
"; segment-" << segment;
throw WeException( oss.str(), rc );
}
}
// Construct file name for backup copy of db file
std::ostringstream ossFile;
ossFile << "/" << columnOID << ".p" << partition << ".s" << segment;
std::string backupFileName;
rc = getSubDirPath( dbRoot, backupFileName );
if (rc != NO_ERROR)
{
std::ostringstream oss;
@ -839,6 +868,7 @@ void RBMetaWriter::backupHWMFile(
"; Can't find matching meta file for DBRoot" << dbRoot;
throw WeException( oss.str(), rc );
}
backupFileName += ossFile.str();
std::string backupFileNameTmp = backupFileName;
@ -858,9 +888,9 @@ void RBMetaWriter::backupHWMFile(
// Copy the db file to a temporary name
IDBFileSystem& fs = IDBPolicy::getFs( backupFileNameTmp.c_str() );
if ( !fs.exists(dbFileName) )
if ( !fs.exists(dbFileName) )
{
std::ostringstream oss;
std::ostringstream oss;
oss << "Error creating backup " << fileType <<
" file for OID " << columnOID <<
"; dbfile does not exist for DBRoot" << dbRoot <<
@ -870,6 +900,7 @@ void RBMetaWriter::backupHWMFile(
}
rc = fs.copyFile( dbFileName, backupFileNameTmp.c_str() );
if (rc != 0)
{
std::ostringstream oss;
@ -886,6 +917,7 @@ void RBMetaWriter::backupHWMFile(
// Rename temporary named backup file to final name
rc = fs.rename( backupFileNameTmp.c_str(), backupFileName.c_str() );
if (rc != 0)
{
std::ostringstream oss;
@ -921,6 +953,7 @@ void RBMetaWriter::backupHWMChunk(
HWM startingHWM) // starting HWM for db segment file
{
std::string fileType("column");
if (!bColumnFile)
fileType = "dictionary";
@ -928,11 +961,12 @@ void RBMetaWriter::backupHWMChunk(
std::string segFile;
FileOp fileOp; // @bug 4960: to keep thread-safe, we use local FileOp
IDBDataFile* dbFile = fileOp.openFile( columnOID,
dbRoot,
partition,
segment,
segFile,
"rb" );
dbRoot,
partition,
segment,
segFile,
"rb" );
if ( !dbFile )
{
std::ostringstream oss;
@ -947,6 +981,7 @@ void RBMetaWriter::backupHWMChunk(
// Get the size of the file, so we know where to truncate back to.
long long fileSizeBytes;
int rc = fileOp.getFileSize( dbFile, fileSizeBytes);
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -964,7 +999,8 @@ void RBMetaWriter::backupHWMChunk(
// Read Control header
char controlHdr[ IDBCompressInterface::HDR_BUF_LEN ];
rc = fileOp.readFile( dbFile, (unsigned char*)controlHdr,
IDBCompressInterface::HDR_BUF_LEN );
IDBCompressInterface::HDR_BUF_LEN );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -981,6 +1017,7 @@ void RBMetaWriter::backupHWMChunk(
IDBCompressInterface compressor;
int rc1 = compressor.verifyHdr( controlHdr );
if (rc1 != 0)
{
rc = ERR_METADATABKUP_COMP_VERIFY_HDRS;
@ -1003,6 +1040,7 @@ void RBMetaWriter::backupHWMChunk(
uint64_t ptrHdrSize = hdrSize - IDBCompressInterface::HDR_BUF_LEN;
char* pointerHdr = new char[ptrHdrSize];
rc = fileOp.readFile( dbFile, (unsigned char*)pointerHdr, ptrHdrSize );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -1021,6 +1059,7 @@ void RBMetaWriter::backupHWMChunk(
CompChunkPtrList chunkPtrs;
rc = compressor.getPtrList(pointerHdr, ptrHdrSize, chunkPtrs );
delete[] pointerHdr;
if (rc != 0)
{
std::ostringstream oss;
@ -1045,7 +1084,8 @@ void RBMetaWriter::backupHWMChunk(
chunkSize = chunkPtrs[chunkIndex].second;
// Read the HWM chunk
rc = fileOp.setFileOffset(dbFile,chunkPtrs[chunkIndex].first,SEEK_SET);
rc = fileOp.setFileOffset(dbFile, chunkPtrs[chunkIndex].first, SEEK_SET);
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -1062,6 +1102,7 @@ void RBMetaWriter::backupHWMChunk(
buffer = new unsigned char[ chunkPtrs[chunkIndex].second ];
rc = fileOp.readFile( dbFile, buffer, chunkPtrs[chunkIndex].second );
if (rc != NO_ERROR)
{
WErrorCodes ec;
@ -1104,7 +1145,8 @@ void RBMetaWriter::backupHWMChunk(
// Backup the HWM chunk
std::string errMsg;
rc = writeHWMChunk(bColumnFile, columnOID, dbRoot, partition, segment,
buffer, chunkSize, fileSizeBytes, startingHWM, errMsg);
buffer, chunkSize, fileSizeBytes, startingHWM, errMsg);
if (rc != NO_ERROR)
{
std::ostringstream oss;
@ -1118,7 +1160,7 @@ void RBMetaWriter::backupHWMChunk(
fileOp.closeFile( dbFile );
throw WeException( oss.str(), rc );
}
// Close the applicable database column segment file and free memory
delete []buffer;
fileOp.closeFile( dbFile );
@ -1155,6 +1197,7 @@ int RBMetaWriter::writeHWMChunk(
ossFile << "/" << columnOID << ".p" << partition << ".s" << segment;
std::string fileName;
int rc = getSubDirPath( dbRoot, fileName );
if (rc != NO_ERROR)
{
std::ostringstream oss;
@ -1163,6 +1206,7 @@ int RBMetaWriter::writeHWMChunk(
errMsg = oss.str();
return ERR_METADATABKUP_COMP_OPEN_BULK_BKUP;
}
fileName += ossFile.str();
std::string fileNameTmp = fileName;
@ -1172,6 +1216,7 @@ int RBMetaWriter::writeHWMChunk(
if (fLog)
{
std::string fileType("column");
if (!bColumnFile)
fileType = "dictionary";
@ -1186,10 +1231,11 @@ int RBMetaWriter::writeHWMChunk(
}
IDBDataFile* backupFile = IDBDataFile::open(
IDBPolicy::getType( fileNameTmp.c_str(), IDBPolicy::WRITEENG ),
fileNameTmp.c_str(),
"w+b",
0 );
IDBPolicy::getType( fileNameTmp.c_str(), IDBPolicy::WRITEENG ),
fileNameTmp.c_str(),
"w+b",
0 );
if (!backupFile)
{
int errRc = errno;
@ -1212,7 +1258,8 @@ int RBMetaWriter::writeHWMChunk(
uint64_t sizeHdr[2];
sizeHdr[0] = chunkSize;
sizeHdr[1] = fileSize;
size_t itemsWritten = backupFile->write(sizeHdr, sizeof(uint64_t)*2) / (sizeof(uint64_t)*2);
size_t itemsWritten = backupFile->write(sizeHdr, sizeof(uint64_t) * 2) / (sizeof(uint64_t) * 2);
if (itemsWritten != 1)
{
int errRc = errno;
@ -1232,6 +1279,7 @@ int RBMetaWriter::writeHWMChunk(
if (chunkSize > 0)
{
itemsWritten = backupFile->write(compressedOutBuf, chunkSize ) / chunkSize;
if (itemsWritten != 1)
{
int errRc = errno;
@ -1287,14 +1335,16 @@ int RBMetaWriter::writeHWMChunk(
// See that function description for more details.
//------------------------------------------------------------------------------
int RBMetaWriter::getSubDirPath( uint16_t dbRoot,
std::string& bulkRollbackSubPath ) const
std::string& bulkRollbackSubPath ) const
{
std::map<uint16_t,std::string>::const_iterator iter =
std::map<uint16_t, std::string>::const_iterator iter =
fMetaFileNames.find( dbRoot );
if (iter == fMetaFileNames.end())
{
return ERR_INVALID_PARAM;
}
bulkRollbackSubPath = iter->second;
bulkRollbackSubPath += DATA_DIR_SUFFIX;
@ -1307,7 +1357,7 @@ int RBMetaWriter::getSubDirPath( uint16_t dbRoot,
// previously existing chunk.
//------------------------------------------------------------------------------
void RBMetaWriter::printDctnryChunkList(
const RBChunkInfo& rbChk,
const RBChunkInfo& rbChk,
const char* assocAction)
{
if (fLog)
@ -1320,6 +1370,7 @@ void RBMetaWriter::printDctnryChunkList(
{
RBChunkSet::iterator iter = fRBChunkDctnrySet.begin();
int k = 1;
while (iter != fRBChunkDctnrySet.end())
{
oss << std::endl;
@ -1334,6 +1385,7 @@ void RBMetaWriter::printDctnryChunkList(
oss << std::endl;
oss << '\t' << "Empty list";
}
fLog->logMsg( oss.str(), MSGLVL_INFO2 );
}
}
@ -1366,7 +1418,7 @@ bool RBMetaWriter::verifyVersion4(const char* versionRec)
// Verify that specified record type is a Column1 record
//------------------------------------------------------------------------------
/* static */
bool RBMetaWriter::verifyColumn1Rec(const char* recType)
bool RBMetaWriter::verifyColumn1Rec(const char* recType)
{
if (strncmp(recType, COLUMN1_REC, COLUMN1_REC_LEN) == 0)
return true;

View File

@ -51,7 +51,7 @@
namespace WriteEngine
{
class Log;
class Log;
//------------------------------------------------------------------------------
/** @brief Class used to store Dictionary store file information used in backing
@ -61,12 +61,12 @@ namespace WriteEngine
struct RBChunkInfo
{
OID fOid; // dctnry store OID containing relevant chunk
uint16_t fDbRoot; // dbroot, partition, segment of file
uint16_t fDbRoot; // dbroot, partition, segment of file
uint32_t fPartition; // containing relevant HWM chunk
uint16_t fSegment; //
HWM fHwm; // HWM block of interest
RBChunkInfo(OID oid, uint16_t dbRoot, uint32_t partition,
uint16_t segment, HWM hwm ) :
uint16_t segment, HWM hwm ) :
fOid(oid), fDbRoot(dbRoot), fPartition(partition),
fSegment(segment), fHwm(hwm) { }
};
@ -81,7 +81,7 @@ typedef std::set< RBChunkInfo, RBChunkInfoCompare > RBChunkSet;
//------------------------------------------------------------------------------
/** @brief Class to write HWM-related information to support bulk rollbacks.
*
*
* Should cpimport.bin terminate abnormally, leaving the db in an inconsistent
* state, then the information written by this class can be used to perform
* a bulk rollback, to restore the db to its previous state, prior to the
@ -119,7 +119,7 @@ typedef std::set< RBChunkInfo, RBChunkInfoCompare > RBChunkSet;
* writeDictionaryStoreMetaData()
* writeDictionaryStoreMetaNoDataMarker()
* closeMetaFile()
* 3. Backup necessary HWM chunks to backup chunk files:
* 3. Backup necessary HWM chunks to backup chunk files:
* a. backupColumnHWMChunk()
* b. backupDctnryHWMChunk()
* 4. Delete meta data file and HWM chunk files at end of successful job:
@ -149,7 +149,10 @@ public:
/** @brief RBMetaWriter destructor
*/
EXPORT ~RBMetaWriter ( ) { closeMetaFile ( ); }
EXPORT ~RBMetaWriter ( )
{
closeMetaFile ( );
}
/** @brief Initialize this RBMetaWriter object
* Warning: This function may throw a WeException.
@ -158,7 +161,7 @@ public:
* @param tableName Name of the table associated with tableOID.
*/
EXPORT void init ( OID tableOID,
const std::string& tableName );
const std::string& tableName );
/** @brief Make a backup copy of the specified HWM dictionary store chunk.
* This operation only applies to compressed columns. Backup may not be
@ -249,7 +252,7 @@ private:
// This function must be thread-safe since it is called directly by
// backupDctnryHWMChunk(). Employed by non-hdfs.
void backupHWMChunk (
void backupHWMChunk (
bool bColumnFile,
OID columnOID,
uint16_t dbRoot,
@ -259,7 +262,7 @@ private:
// This function must be thread-safe since it is called directly by
// backupDctnryHWMFile(). Employed by hdfs.
void backupHWMFile (
void backupHWMFile (
bool bColumnFile,
OID columnOID,
uint16_t dbRoot,
@ -273,7 +276,7 @@ private:
void createSubDir( const std::string& metaFileName );
void deleteSubDir( const std::string& metaFileName );
int getSubDirPath(const uint16_t dbRoot,
std::string& subDirPath ) const;
std::string& subDirPath ) const;
// Open a meta data file to save HWM bulk rollback info for tableOID
// Warning: This function may throw a WeException.

View File

@ -71,6 +71,7 @@ void SimpleSysLog::logMsg( const logging::Message::Args& msgArgs,
m.format(msgArgs);
boost::mutex::scoped_lock lk(fWriteLockMutex);
switch (logType)
{
case logging::LOG_TYPE_DEBUG:
@ -78,22 +79,26 @@ void SimpleSysLog::logMsg( const logging::Message::Args& msgArgs,
ml.logDebugMessage(m);
break;
}
case logging::LOG_TYPE_INFO:
default:
{
ml.logInfoMessage(m);
break;
}
case logging::LOG_TYPE_WARNING:
{
ml.logWarningMessage(m);
break;
}
case logging::LOG_TYPE_ERROR:
{
ml.logErrorMessage(m);
break;
}
case logging::LOG_TYPE_CRITICAL:
{
ml.logCriticalMessage(m);

View File

@ -48,20 +48,20 @@ namespace WriteEngine
class SimpleSysLog
{
public:
/**
* @brief Singleton accessor.
*/
/**
* @brief Singleton accessor.
*/
EXPORT static SimpleSysLog* instance();
/**
* @brief Modify the LoggingID to be used. Mainly used to control the
* subsystem ID.
*/
/**
* @brief Modify the LoggingID to be used. Mainly used to control the
* subsystem ID.
*/
EXPORT void setLoggingID( const logging::LoggingID& loggingID );
/**
* @brief Function that logs a syslog msg.
*/
/**
* @brief Function that logs a syslog msg.
*/
EXPORT void logMsg( const logging::Message::Args& msgArgs,
logging::LOG_TYPE logType,
logging::Message::MessageID msgId );

View File

@ -28,202 +28,209 @@ using namespace std;
namespace WriteEngine
{
#ifdef PROFILE
/* static */ bool Stats::fProfiling = false;
/* static */ boost::mutex Stats::fRegisterReaderMutex;
/* static */ boost::mutex Stats::fRegisterParseMutex;
/* static */ std::vector<pthread_t> Stats::fReadProfThreads;
/* static */ std::vector<pthread_t> Stats::fParseProfThreads;
/* static */ std::vector<logging::StopWatch> Stats::fReadStopWatch;
/* static */ std::vector<logging::StopWatch> Stats::fParseStopWatch;
/* static */ bool Stats::fProfiling = false;
/* static */ boost::mutex Stats::fRegisterReaderMutex;
/* static */ boost::mutex Stats::fRegisterParseMutex;
/* static */ std::vector<pthread_t> Stats::fReadProfThreads;
/* static */ std::vector<pthread_t> Stats::fParseProfThreads;
/* static */ std::vector<logging::StopWatch> Stats::fReadStopWatch;
/* static */ std::vector<logging::StopWatch> Stats::fParseStopWatch;
#endif
struct IoStats Stats::m_ioStats = { 0, 0 };
bool Stats::m_bUseStats = false;
/***********************************************************
* DESCRIPTION:
* Increase the counter for block read
* PARAMETERS:
* blockNum - the number of blocks
* RETURN:
* none
***********************************************************/
void Stats::incIoBlockRead( const int blockNum )
{
if( !m_bUseStats )
return;
m_ioStats.blockRead += blockNum;
}
struct IoStats Stats::m_ioStats = { 0, 0 };
bool Stats::m_bUseStats = false;
/***********************************************************
* DESCRIPTION:
* Increase the counter for block read
* PARAMETERS:
* blockNum - the number of blocks
* RETURN:
* none
***********************************************************/
void Stats::incIoBlockRead( const int blockNum )
{
if ( !m_bUseStats )
return;
/***********************************************************
* DESCRIPTION:
* Increase the counter for block write
* PARAMETERS:
* blockNum - the number of blocks
* RETURN:
* none
***********************************************************/
void Stats::incIoBlockWrite( const int blockNum )
{
if( !m_bUseStats )
return;
m_ioStats.blockWrite += blockNum;
}
m_ioStats.blockRead += blockNum;
}
/***********************************************************
* DESCRIPTION:
* Increase the counter for block write
* PARAMETERS:
* blockNum - the number of blocks
* RETURN:
* none
***********************************************************/
void Stats::incIoBlockWrite( const int blockNum )
{
if ( !m_bUseStats )
return;
m_ioStats.blockWrite += blockNum;
}
#ifdef PROFILE
//-------------------------------------------------------------------------------
// Functions that follow are used for profiling using the StopWatch class
//-------------------------------------------------------------------------------
/***********************************************************
* DESCRIPTION:
* Enable/Initialize the profiling functions
* PARAMETERS:
* nReadThreads - number of read threads to be profiled
* nParseThreads - number of parse threads to be profiled
* RETURN:
* none
***********************************************************/
void Stats::enableProfiling(int nReadThreads, int nParseThreads)
{
fProfiling = true;
/***********************************************************
* DESCRIPTION:
* Enable/Initialize the profiling functions
* PARAMETERS:
* nReadThreads - number of read threads to be profiled
* nParseThreads - number of parse threads to be profiled
* RETURN:
* none
***********************************************************/
void Stats::enableProfiling(int nReadThreads, int nParseThreads)
{
fProfiling = true;
// @bug 2625: pre-reserve space for our vectors; else we could have a race
// condition whereby one parsing thread is adding itself to the vectors
// and thus "growing" the vector (in registerParseProfThread), at the
// same time that another parsing thread is reading the vector in parse-
// Event(). By pre-reserving the space, the vectors won't be growing,
// thus eliminating the problem with this race condition.
fReadProfThreads.reserve ( nReadThreads );
fReadStopWatch.reserve ( nReadThreads );
fParseProfThreads.reserve( nParseThreads );
fParseStopWatch.reserve ( nParseThreads );
}
// @bug 2625: pre-reserve space for our vectors; else we could have a race
// condition whereby one parsing thread is adding itself to the vectors
// and thus "growing" the vector (in registerParseProfThread), at the
// same time that another parsing thread is reading the vector in parse-
// Event(). By pre-reserving the space, the vectors won't be growing,
// thus eliminating the problem with this race condition.
fReadProfThreads.reserve ( nReadThreads );
fReadStopWatch.reserve ( nReadThreads );
fParseProfThreads.reserve( nParseThreads );
fParseStopWatch.reserve ( nParseThreads );
}
/***********************************************************
* DESCRIPTION:
* Register the current thread as a Read thread to be profiled
* PARAMETERS:
* none
* RETURN:
* none
***********************************************************/
void Stats::registerReadProfThread( )
{
boost::mutex::scoped_lock lk(fRegisterReaderMutex);
/***********************************************************
* DESCRIPTION:
* Register the current thread as a Read thread to be profiled
* PARAMETERS:
* none
* RETURN:
* none
***********************************************************/
void Stats::registerReadProfThread( )
{
boost::mutex::scoped_lock lk(fRegisterReaderMutex);
fReadProfThreads.push_back( pthread_self() );
logging::StopWatch readStopWatch;
fReadStopWatch.push_back ( readStopWatch );
}
fReadProfThreads.push_back( pthread_self() );
logging::StopWatch readStopWatch;
fReadStopWatch.push_back ( readStopWatch );
}
/***********************************************************
* DESCRIPTION:
* Register the current thread as a Parse thread to be profiled
* PARAMETERS:
* none
* RETURN:
* none
***********************************************************/
void Stats::registerParseProfThread( )
{
boost::mutex::scoped_lock lk(fRegisterParseMutex);
/***********************************************************
* DESCRIPTION:
* Register the current thread as a Parse thread to be profiled
* PARAMETERS:
* none
* RETURN:
* none
***********************************************************/
void Stats::registerParseProfThread( )
{
boost::mutex::scoped_lock lk(fRegisterParseMutex);
fParseProfThreads.push_back( pthread_self() );
logging::StopWatch parseStopWatch;
fParseStopWatch.push_back ( parseStopWatch );
}
fParseProfThreads.push_back( pthread_self() );
logging::StopWatch parseStopWatch;
fParseStopWatch.push_back ( parseStopWatch );
}
/***********************************************************
* DESCRIPTION:
* Track the specified Read event in the current Read thread.
* PARAMETERS:
* eventString - string that identifies the event.
* start - boolean indicating whether the is the start or the
* end of the event. TRUE=>start FALSE=>end
* RETURN:
* none
***********************************************************/
void Stats::readEvent ( const std::string& eventString, bool start )
{
if (fProfiling)
{
pthread_t thread = pthread_self();
for (unsigned i=0; i<fReadProfThreads.size(); i++)
{
/***********************************************************
* DESCRIPTION:
* Track the specified Read event in the current Read thread.
* PARAMETERS:
* eventString - string that identifies the event.
* start - boolean indicating whether the is the start or the
* end of the event. TRUE=>start FALSE=>end
* RETURN:
* none
***********************************************************/
void Stats::readEvent ( const std::string& eventString, bool start )
{
if (fProfiling)
{
pthread_t thread = pthread_self();
for (unsigned i = 0; i < fReadProfThreads.size(); i++)
{
if (fReadProfThreads[i] == thread)
{
if (start)
fReadStopWatch[i].start( eventString );
else
fReadStopWatch[i].stop ( eventString );
break;
}
}
}
}
if (start)
fReadStopWatch[i].start( eventString );
else
fReadStopWatch[i].stop ( eventString );
/***********************************************************
* DESCRIPTION:
* Track the specified Parse event in the current Parse thread.
* PARAMETERS:
* eventString - string that identifies the event.
* start - boolean indicating whether the is the start or the
* end of the event. TRUE=>start FALSE=>end
* RETURN:
* none
***********************************************************/
void Stats::parseEvent ( const std::string& eventString, bool start )
{
if (fProfiling)
{
pthread_t thread = pthread_self();
for (unsigned i=0; i<fParseProfThreads.size(); i++)
{
break;
}
}
}
}
/***********************************************************
* DESCRIPTION:
* Track the specified Parse event in the current Parse thread.
* PARAMETERS:
* eventString - string that identifies the event.
* start - boolean indicating whether the is the start or the
* end of the event. TRUE=>start FALSE=>end
* RETURN:
* none
***********************************************************/
void Stats::parseEvent ( const std::string& eventString, bool start )
{
if (fProfiling)
{
pthread_t thread = pthread_self();
for (unsigned i = 0; i < fParseProfThreads.size(); i++)
{
if (fParseProfThreads[i] == thread)
{
if (start)
fParseStopWatch[i].start( eventString );
else
fParseStopWatch[i].stop ( eventString );
break;
}
}
}
}
if (start)
fParseStopWatch[i].start( eventString );
else
fParseStopWatch[i].stop ( eventString );
/***********************************************************
* DESCRIPTION:
* Print profiling results.
* PARAMETERS:
* none
* RETURN:
* none
***********************************************************/
void Stats::printProfilingResults ( )
{
if (fProfiling)
{
std::cout << endl;
for (unsigned j=0; j<fReadStopWatch.size(); j++)
{
break;
}
}
}
}
/***********************************************************
* DESCRIPTION:
* Print profiling results.
* PARAMETERS:
* none
* RETURN:
* none
***********************************************************/
void Stats::printProfilingResults ( )
{
if (fProfiling)
{
std::cout << endl;
for (unsigned j = 0; j < fReadStopWatch.size(); j++)
{
std::cout << "Execution Stats for Read Thread " << j << " (" <<
fReadProfThreads[j] << ")" << std::endl <<
"-------------------------------" << std::endl;
fReadProfThreads[j] << ")" << std::endl <<
"-------------------------------" << std::endl;
fReadStopWatch[j].finish();
std::cout << std::endl;
}
}
for (unsigned j=0; j<fParseStopWatch.size(); j++)
{
std::cout << "Execution Stats for Parse Thread "<< j << " (" <<
fParseProfThreads[j] << ")" << std::endl <<
"--------------------------------" << std::endl;
for (unsigned j = 0; j < fParseStopWatch.size(); j++)
{
std::cout << "Execution Stats for Parse Thread " << j << " (" <<
fParseProfThreads[j] << ")" << std::endl <<
"--------------------------------" << std::endl;
fParseStopWatch[j].finish();
std::cout << std::endl;
}
}
}
}
}
}
#endif
} //end of namespace

View File

@ -55,76 +55,96 @@ public:
/**
* @brief I/O
*/
static long getIoBlockRead() { return m_ioStats.blockRead; }
static long getIoBlockWrite() { return m_ioStats.blockWrite; }
static long getIoBlockRead()
{
return m_ioStats.blockRead;
}
static long getIoBlockWrite()
{
return m_ioStats.blockWrite;
}
static void incIoBlockRead( const int blockNum = 1 );
static void incIoBlockWrite( const int blockNum = 1 );
static bool getUseStats() { return m_bUseStats; }
static void setUseStats( const bool flag ) { m_bUseStats = flag; }
static bool getUseStats()
{
return m_bUseStats;
}
static void setUseStats( const bool flag )
{
m_bUseStats = flag;
}
static IoStats m_ioStats; // IO
#ifdef PROFILE
// Prefined event labels
#define WE_STATS_ALLOC_DCT_EXTENT "AllocDctExtent"
#define WE_STATS_COMPACT_VARBINARY "CompactingVarBinary"
#define WE_STATS_COMPLETING_PARSE "CompletingParse"
#define WE_STATS_COMPLETING_READ "CompletingRead"
#define WE_STATS_COMPRESS_COL_INIT_ABBREV_EXT "CmpColInitAbbrevExtent"
#define WE_STATS_COMPRESS_COL_INIT_BUF "CmpColInitBuf"
#define WE_STATS_COMPRESS_COL_COMPRESS "CmpColCompress"
#define WE_STATS_COMPRESS_COL_FINISH_EXTENT "CmpColFinishExtent"
#define WE_STATS_COMPRESS_DCT_INIT_BUF "CmpDctInitBuf"
#define WE_STATS_COMPRESS_DCT_COMPRESS "CmpDctCompress"
#define WE_STATS_COMPRESS_DCT_SEEKO_CHUNK "CmpDctSeekOutputChunk"
#define WE_STATS_COMPRESS_DCT_WRITE_CHUNK "CmpDctWriteChunk"
#define WE_STATS_COMPRESS_DCT_SEEKO_HDR "CmpDctSeekOutputHdr"
#define WE_STATS_COMPRESS_DCT_WRITE_HDR "CmpDctWriteHdr"
#define WE_STATS_COMPRESS_DCT_BACKUP_CHUNK "CmpDctBackupChunk"
#define WE_STATS_CREATE_COL_EXTENT "CreateColExtent"
#define WE_STATS_CREATE_DCT_EXTENT "CreateDctExtent"
#define WE_STATS_EXPAND_COL_EXTENT "ExpandColExtent"
#define WE_STATS_EXPAND_DCT_EXTENT "ExpandDctExtent"
#define WE_STATS_FLUSH_PRIMPROC_BLOCKS "FlushPrimProcBlocks"
#define WE_STATS_INIT_COL_EXTENT "InitColExtent"
#define WE_STATS_INIT_DCT_EXTENT "InitDctExtent"
#define WE_STATS_OPEN_DCT_FILE "OpenDctFile"
#define WE_STATS_PARSE_COL "ParseCol"
#define WE_STATS_PARSE_DCT "ParseDct"
#define WE_STATS_PARSE_DCT_SEEK_EXTENT_BLK "ParseDctSeekExtentBlk"
#define WE_STATS_READ_INTO_BUF "ReadIntoBuf"
#define WE_STATS_RESIZE_OUT_BUF "ResizeOutBuf"
#define WE_STATS_WAIT_FOR_INTERMEDIATE_FLUSH "WaitForIntermediateFlush"
#define WE_STATS_WAIT_FOR_READ_BUF "WaitForReadBuf"
#define WE_STATS_WAIT_TO_COMPLETE_PARSE "WaitCompleteParse"
#define WE_STATS_WAIT_TO_COMPLETE_READ "WaitCompleteRead"
#define WE_STATS_WAIT_TO_CREATE_COL_EXTENT "WaitCreateColExtent"
#define WE_STATS_WAIT_TO_CREATE_DCT_EXTENT "WaitCreateDctExtent"
#define WE_STATS_WAIT_TO_EXPAND_COL_EXTENT "WaitExpandColExtent"
#define WE_STATS_WAIT_TO_EXPAND_DCT_EXTENT "WaitExpandDctExtent"
#define WE_STATS_WAIT_TO_PARSE_DCT "WaitParseDct"
#define WE_STATS_WAIT_TO_RELEASE_OUT_BUF "WaitReleaseOutBuf"
#define WE_STATS_WAIT_TO_RESERVE_OUT_BUF "WaitReserveOutBuf"
#define WE_STATS_WAIT_TO_RESIZE_OUT_BUF "WaitResizeOutBuf"
#define WE_STATS_WAIT_TO_SELECT_COL "WaitSelectCol"
#define WE_STATS_WAIT_TO_SELECT_TBL "WaitSelectTbl"
#define WE_STATS_WRITE_COL "WriteCol"
#define WE_STATS_WRITE_DCT "WriteDct"
#define WE_STATS_ALLOC_DCT_EXTENT "AllocDctExtent"
#define WE_STATS_COMPACT_VARBINARY "CompactingVarBinary"
#define WE_STATS_COMPLETING_PARSE "CompletingParse"
#define WE_STATS_COMPLETING_READ "CompletingRead"
#define WE_STATS_COMPRESS_COL_INIT_ABBREV_EXT "CmpColInitAbbrevExtent"
#define WE_STATS_COMPRESS_COL_INIT_BUF "CmpColInitBuf"
#define WE_STATS_COMPRESS_COL_COMPRESS "CmpColCompress"
#define WE_STATS_COMPRESS_COL_FINISH_EXTENT "CmpColFinishExtent"
#define WE_STATS_COMPRESS_DCT_INIT_BUF "CmpDctInitBuf"
#define WE_STATS_COMPRESS_DCT_COMPRESS "CmpDctCompress"
#define WE_STATS_COMPRESS_DCT_SEEKO_CHUNK "CmpDctSeekOutputChunk"
#define WE_STATS_COMPRESS_DCT_WRITE_CHUNK "CmpDctWriteChunk"
#define WE_STATS_COMPRESS_DCT_SEEKO_HDR "CmpDctSeekOutputHdr"
#define WE_STATS_COMPRESS_DCT_WRITE_HDR "CmpDctWriteHdr"
#define WE_STATS_COMPRESS_DCT_BACKUP_CHUNK "CmpDctBackupChunk"
#define WE_STATS_CREATE_COL_EXTENT "CreateColExtent"
#define WE_STATS_CREATE_DCT_EXTENT "CreateDctExtent"
#define WE_STATS_EXPAND_COL_EXTENT "ExpandColExtent"
#define WE_STATS_EXPAND_DCT_EXTENT "ExpandDctExtent"
#define WE_STATS_FLUSH_PRIMPROC_BLOCKS "FlushPrimProcBlocks"
#define WE_STATS_INIT_COL_EXTENT "InitColExtent"
#define WE_STATS_INIT_DCT_EXTENT "InitDctExtent"
#define WE_STATS_OPEN_DCT_FILE "OpenDctFile"
#define WE_STATS_PARSE_COL "ParseCol"
#define WE_STATS_PARSE_DCT "ParseDct"
#define WE_STATS_PARSE_DCT_SEEK_EXTENT_BLK "ParseDctSeekExtentBlk"
#define WE_STATS_READ_INTO_BUF "ReadIntoBuf"
#define WE_STATS_RESIZE_OUT_BUF "ResizeOutBuf"
#define WE_STATS_WAIT_FOR_INTERMEDIATE_FLUSH "WaitForIntermediateFlush"
#define WE_STATS_WAIT_FOR_READ_BUF "WaitForReadBuf"
#define WE_STATS_WAIT_TO_COMPLETE_PARSE "WaitCompleteParse"
#define WE_STATS_WAIT_TO_COMPLETE_READ "WaitCompleteRead"
#define WE_STATS_WAIT_TO_CREATE_COL_EXTENT "WaitCreateColExtent"
#define WE_STATS_WAIT_TO_CREATE_DCT_EXTENT "WaitCreateDctExtent"
#define WE_STATS_WAIT_TO_EXPAND_COL_EXTENT "WaitExpandColExtent"
#define WE_STATS_WAIT_TO_EXPAND_DCT_EXTENT "WaitExpandDctExtent"
#define WE_STATS_WAIT_TO_PARSE_DCT "WaitParseDct"
#define WE_STATS_WAIT_TO_RELEASE_OUT_BUF "WaitReleaseOutBuf"
#define WE_STATS_WAIT_TO_RESERVE_OUT_BUF "WaitReserveOutBuf"
#define WE_STATS_WAIT_TO_RESIZE_OUT_BUF "WaitResizeOutBuf"
#define WE_STATS_WAIT_TO_SELECT_COL "WaitSelectCol"
#define WE_STATS_WAIT_TO_SELECT_TBL "WaitSelectTbl"
#define WE_STATS_WRITE_COL "WriteCol"
#define WE_STATS_WRITE_DCT "WriteDct"
// Functions used to support performance profiling
static void enableProfiling(int nReadThreads, int nParseThreads);
static void registerReadProfThread ( );
static void registerParseProfThread( );
static void startReadEvent ( const std::string& eventString )
{ readEvent ( eventString, true ); }
{
readEvent ( eventString, true );
}
static void stopReadEvent ( const std::string& eventString )
{ readEvent ( eventString, false ); }
{
readEvent ( eventString, false );
}
static void startParseEvent( const std::string& eventString )
{ parseEvent( eventString, true ); }
{
parseEvent( eventString, true );
}
static void stopParseEvent ( const std::string& eventString )
{ parseEvent( eventString, false ); }
{
parseEvent( eventString, false );
}
static void printProfilingResults( );
#endif

View File

@ -47,428 +47,490 @@
/** Namespace WriteEngine */
namespace WriteEngine
{
typedef idbdatafile::IDBDataFile IDBDataFile;
typedef idbdatafile::IDBDataFile IDBDataFile;
/************************************************************************
* Type definitions
************************************************************************/
typedef uint32_t OID; /** @brief Object ID */
typedef uint32_t FID; /** @brief File ID */
typedef uint64_t RID; /** @brief Row ID */
typedef uint32_t TxnID; /** @brief Transaction ID (New)*/
typedef uint32_t HWM; /** @brief high water mark */
/************************************************************************
* Type definitions
************************************************************************/
typedef uint32_t OID; /** @brief Object ID */
typedef uint32_t FID; /** @brief File ID */
typedef uint64_t RID; /** @brief Row ID */
typedef uint32_t TxnID; /** @brief Transaction ID (New)*/
typedef uint32_t HWM; /** @brief high water mark */
/************************************************************************
* Type enumerations
************************************************************************/
enum DebugLevel { /** @brief Debug level type */
DEBUG_0 = 0, /** @brief No debug info */
DEBUG_1 = 1, /** @brief Summary level debug */
DEBUG_2 = 2, /** @brief Moderate debug */
DEBUG_3 = 3, /** @brief Detail debug */
};
/************************************************************************
* Type enumerations
************************************************************************/
enum DebugLevel /** @brief Debug level type */
{
DEBUG_0 = 0, /** @brief No debug info */
DEBUG_1 = 1, /** @brief Summary level debug */
DEBUG_2 = 2, /** @brief Moderate debug */
DEBUG_3 = 3, /** @brief Detail debug */
};
// INFO2 only goes to log file unless '-i' cmd line arg is specified,
// in which case the msg will also get logged to the console.
// All other messages always get logged to the log file and the console.
enum MsgLevel { /** @brief Message level */
MSGLVL_INFO1 = 0, /** @brief Basic Information level*/
MSGLVL_INFO2 = 1, /** @brief More Information level */
MSGLVL_WARNING = 2, /** @brief Warning level */
MSGLVL_ERROR = 3, /** @brief Error level */
MSGLVL_CRITICAL = 4, /** @brief Critical level */
};
// INFO2 only goes to log file unless '-i' cmd line arg is specified,
// in which case the msg will also get logged to the console.
// All other messages always get logged to the log file and the console.
enum MsgLevel /** @brief Message level */
{
MSGLVL_INFO1 = 0, /** @brief Basic Information level*/
MSGLVL_INFO2 = 1, /** @brief More Information level */
MSGLVL_WARNING = 2, /** @brief Warning level */
MSGLVL_ERROR = 3, /** @brief Error level */
MSGLVL_CRITICAL = 4, /** @brief Critical level */
};
enum OpType { /** @brief Operation type */
NOOP = 0, /** @brief No oper */
INSERT = 1, /** @brief Insert */
UPDATE = 2, /** @brief Update */
DELETE = 4, /** @brief Delete */
QUERY = 8, /** @brief Query */
};
enum OpType /** @brief Operation type */
{
NOOP = 0, /** @brief No oper */
INSERT = 1, /** @brief Insert */
UPDATE = 2, /** @brief Update */
DELETE = 4, /** @brief Delete */
QUERY = 8, /** @brief Query */
};
enum ColType { /** @brief Column type enumeration*/
enum ColType /** @brief Column type enumeration*/
{
// WR_BIT = 1, /** @brief Bit */
WR_BYTE = 2, /** @brief Byte */
WR_SHORT = 3, /** @brief Short */
WR_INT = 4, /** @brief Int */
WR_BYTE = 2, /** @brief Byte */
WR_SHORT = 3, /** @brief Short */
WR_INT = 4, /** @brief Int */
// WR_LONG = 5, /** @brief Long */
WR_LONGLONG = 6, /** @brief Long long*/
WR_FLOAT = 7, /** @brief Float */
WR_DOUBLE = 8, /** @brief Double */
WR_CHAR = 9, /** @brief Char */
WR_TOKEN = 10, /** @brief Token */
WR_BLOB = 11, /** @brief BLOB */
WR_VARBINARY = 12, /** @brief VARBINARY */
WR_UBYTE = 13, /** @brief Unsigned Byte */
WR_USHORT = 14, /** @brief Unsigned Short */
WR_UINT = 15, /** @brief Unsigned Int */
WR_ULONGLONG = 16, /** @brief Unsigned Long long*/
WR_TEXT = 17 /** @brief TEXT */
};
WR_LONGLONG = 6, /** @brief Long long*/
WR_FLOAT = 7, /** @brief Float */
WR_DOUBLE = 8, /** @brief Double */
WR_CHAR = 9, /** @brief Char */
WR_TOKEN = 10, /** @brief Token */
WR_BLOB = 11, /** @brief BLOB */
WR_VARBINARY = 12, /** @brief VARBINARY */
WR_UBYTE = 13, /** @brief Unsigned Byte */
WR_USHORT = 14, /** @brief Unsigned Short */
WR_UINT = 15, /** @brief Unsigned Int */
WR_ULONGLONG = 16, /** @brief Unsigned Long long*/
WR_TEXT = 17 /** @brief TEXT */
};
// Describes relation of field to column for a bulk load
enum BulkFldColRel { BULK_FLDCOL_COLUMN_FIELD, // map input field to db col
BULK_FLDCOL_COLUMN_DEFAULT,// import def val to db col
BULK_FLDCOL_IGNORE_FIELD };// ignore fld in import file
// Describes relation of field to column for a bulk load
enum BulkFldColRel { BULK_FLDCOL_COLUMN_FIELD, // map input field to db col
BULK_FLDCOL_COLUMN_DEFAULT,// import def val to db col
BULK_FLDCOL_IGNORE_FIELD
};// ignore fld in import file
// Bulk Load Mode (ex: local vs remote, single src vs multiple src files)
enum BulkModeType { BULK_MODE_REMOTE_SINGLE_SRC = 1,
BULK_MODE_REMOTE_MULTIPLE_SRC = 2,
BULK_MODE_LOCAL = 3 };
// Bulk Load Mode (ex: local vs remote, single src vs multiple src files)
enum BulkModeType { BULK_MODE_REMOTE_SINGLE_SRC = 1,
BULK_MODE_REMOTE_MULTIPLE_SRC = 2,
BULK_MODE_LOCAL = 3
};
// Import Mode 0-text Import (default)
// 1-Binary Import with NULL values
// 2-Binary Import with saturated NULL values
enum ImportDataMode { IMPORT_DATA_TEXT = 0,
IMPORT_DATA_BIN_ACCEPT_NULL = 1,
IMPORT_DATA_BIN_SAT_NULL = 2 };
// Import Mode 0-text Import (default)
// 1-Binary Import with NULL values
// 2-Binary Import with saturated NULL values
enum ImportDataMode { IMPORT_DATA_TEXT = 0,
IMPORT_DATA_BIN_ACCEPT_NULL = 1,
IMPORT_DATA_BIN_SAT_NULL = 2
};
/**
* the set of Calpont column data type names; MUST match ColDataType in
* calpontsystemcatalog.h.
*/
const char ColDataTypeStr[execplan::CalpontSystemCatalog::NUM_OF_COL_DATA_TYPE][20] = {
"bit",
"tinyint",
"char",
"smallint",
"decimal",
"medint",
"integer",
"float",
"date",
"bigint",
"double",
"datetime",
"varchar",
"varbinary",
"clob",
"blob",
"unsigned-tinyint",
"unsigned-smallint",
"unsigned-decimal",
"unsigned-med int",
"unsigned-int",
"unsigned-float",
"unsigned-bigint",
"unsigned-double",
"text"
};
/**
* the set of Calpont column data type names; MUST match ColDataType in
* calpontsystemcatalog.h.
*/
const char ColDataTypeStr[execplan::CalpontSystemCatalog::NUM_OF_COL_DATA_TYPE][20] =
{
"bit",
"tinyint",
"char",
"smallint",
"decimal",
"medint",
"integer",
"float",
"date",
"bigint",
"double",
"datetime",
"varchar",
"varbinary",
"clob",
"blob",
"unsigned-tinyint",
"unsigned-smallint",
"unsigned-decimal",
"unsigned-med int",
"unsigned-int",
"unsigned-float",
"unsigned-bigint",
"unsigned-double",
"text"
};
enum FuncType { FUNC_WRITE_ENGINE, FUNC_INDEX, FUNC_DICTIONARY };
enum FuncType { FUNC_WRITE_ENGINE, FUNC_INDEX, FUNC_DICTIONARY };
enum CacheListType { FREE_LIST, LRU_LIST, WRITE_LIST }; /** @brief List type */
enum CacheListType { FREE_LIST, LRU_LIST, WRITE_LIST }; /** @brief List type */
/************************************************************************
* struct data block structure
************************************************************************/
struct DataBlock /** @brief Data block structure */
/************************************************************************
* struct data block structure
************************************************************************/
struct DataBlock /** @brief Data block structure */
{
long no; /** @brief block number */
uint64_t lbid; /** @brief lbid */
bool dirty; /** @brief block dirty flag */
int state; /** @brief initialized 0, read 1 , modified 2 */
unsigned char data[BYTE_PER_BLOCK];/** @brief data buffer */
DataBlock()
{
long no; /** @brief block number */
uint64_t lbid; /** @brief lbid */
bool dirty; /** @brief block dirty flag */
int state; /** @brief initialized 0, read 1 , modified 2 */
unsigned char data[BYTE_PER_BLOCK];/** @brief data buffer */
DataBlock() { dirty = false; /** @brief constructor */
memset( data, 0, BYTE_PER_BLOCK ); }
};
dirty = false; /** @brief constructor */
memset( data, 0, BYTE_PER_BLOCK );
}
};
struct DataSubBlock /** @brief Data subblock structure*/
struct DataSubBlock /** @brief Data subblock structure*/
{
long no; /** @brief sub block number */
bool dirty; /** @brief block dirty flag */
unsigned char data[BYTE_PER_SUBBLOCK]; /** @brief data buffer */
DataSubBlock()
{
long no; /** @brief sub block number */
bool dirty; /** @brief block dirty flag */
unsigned char data[BYTE_PER_SUBBLOCK]; /** @brief data buffer */
DataSubBlock() { dirty = false; memset( data, 0, BYTE_PER_SUBBLOCK ); } /** @brief constructor */
};
dirty = false; /** @brief constructor */
memset( data, 0, BYTE_PER_SUBBLOCK );
}
};
/************************************************************************
* @brief file structure. Default copy constructor, assignment oper, etc
* are in play here, as they are not overridden. Beware that if copies
* of a File object are made, only one user should be closing the pFile.
* oid and fid replicate one another. oid mostly used by index, cache,
* and dictionary. fid mostly used by colop and bulk.
************************************************************************/
struct File /** @brief File structure */
/************************************************************************
* @brief file structure. Default copy constructor, assignment oper, etc
* are in play here, as they are not overridden. Beware that if copies
* of a File object are made, only one user should be closing the pFile.
* oid and fid replicate one another. oid mostly used by index, cache,
* and dictionary. fid mostly used by colop and bulk.
************************************************************************/
struct File /** @brief File structure */
{
OID oid; /** @brief Oid */
FID fid; /** @brief File id */
HWM hwm; /** @brief High water mark */
IDBDataFile* pFile; /** @brief File handle */
uint32_t fPartition; /** @brief Partition for pFile*/
uint16_t fSegment; /** @brief Segment for pFile */
uint16_t fDbRoot; /** @brief DbRoot for pFile */
std::string fSegFileName; /** @brief Current seg file path */
File()
{
OID oid; /** @brief Oid */
FID fid; /** @brief File id */
HWM hwm; /** @brief High water mark */
IDBDataFile* pFile; /** @brief File handle */
uint32_t fPartition; /** @brief Partition for pFile*/
uint16_t fSegment; /** @brief Segment for pFile */
uint16_t fDbRoot; /** @brief DbRoot for pFile */
std::string fSegFileName; /** @brief Current seg file path */
File() { clear(); } /** @brief constructor */
void clear() { pFile = NULL; oid = fid = hwm = 0;
fPartition = fSegment = fDbRoot = 0;
fSegFileName.clear(); }
};
/************************************************************************
* @brief Internal communication block structure
************************************************************************/
struct CommBlock /** @brief Communication Block */
clear(); /** @brief constructor */
}
void clear()
{
File file; /** @brief File structure */
void clear() { file.clear(); }
};
pFile = NULL;
oid = fid = hwm = 0;
fPartition = fSegment = fDbRoot = 0;
fSegFileName.clear();
}
};
/************************************************************************
* @brief column structure used to pass data in/out of we_colop functions
************************************************************************/
struct Column /** @brief Column structure */
/************************************************************************
* @brief Internal communication block structure
************************************************************************/
struct CommBlock /** @brief Communication Block */
{
File file; /** @brief File structure */
void clear()
{
int colNo; /** @brief column number */
int colWidth; /** @brief column width */
ColType colType; /** @brief column type (internal use)*/
execplan::CalpontSystemCatalog::ColDataType colDataType; /** @brief column data type (from interface)*/
File dataFile; /** @brief column data file */
int compressionType; /** @brief column compression type*/
Column() : colNo(0), colWidth(0), colType(WR_INT),
colDataType(execplan::CalpontSystemCatalog::INT),
compressionType(idbdatafile::IDBPolicy::useHdfs()?2:0) { }
};
file.clear();
}
};
/************************************************************************
* @brief dictionary related structures (Token struct is defined in
* we_typeext.h to facilitate its use in dbcon and utils/dataconvert).
************************************************************************/
typedef struct offset_ /** @brief Offset structure */
/************************************************************************
* @brief column structure used to pass data in/out of we_colop functions
************************************************************************/
struct Column /** @brief Column structure */
{
int colNo; /** @brief column number */
int colWidth; /** @brief column width */
ColType colType; /** @brief column type (internal use)*/
execplan::CalpontSystemCatalog::ColDataType colDataType; /** @brief column data type (from interface)*/
File dataFile; /** @brief column data file */
int compressionType; /** @brief column compression type*/
Column() : colNo(0), colWidth(0), colType(WR_INT),
colDataType(execplan::CalpontSystemCatalog::INT),
compressionType(idbdatafile::IDBPolicy::useHdfs() ? 2 : 0) { }
};
/************************************************************************
* @brief dictionary related structures (Token struct is defined in
* we_typeext.h to facilitate its use in dbcon and utils/dataconvert).
************************************************************************/
typedef struct offset_ /** @brief Offset structure */
{
int hdrLoc; /** @brief offset postion in hdr */
uint16_t offset; /** @brief offset in block */
} Offset;
/************************************************************************
* @brief interfaces with DDL/DML
************************************************************************/
typedef struct colTuple_struct /** @brief Column Tuple definition*/
{
boost::any data; /** @brief column value */
} ColTuple;
typedef std::vector<ColTuple> ColTupleList; /** @brief column value list */
struct ColStruct /** @brief Column Interface Struct*/
{
OID dataOid; /** @brief column data file object id */
int colWidth; /** @brief column width */
bool tokenFlag; /** @brief column token flag, must be set to true if it is a token column */
execplan::CalpontSystemCatalog::ColDataType colDataType; /** @brief column data type (for interface)*/
ColType colType; /** @brief column type (internal use for write engine)*/
uint32_t fColPartition; /** @brief Partition for column file */
uint16_t fColSegment; /** @brief Segment for column file*/
uint16_t fColDbRoot; /** @brief DBRoot for column file */
int fCompressionType; /** @brief Compression tpye for column file */
ColStruct() : dataOid(0), colWidth(0), /** @brief constructor */
tokenFlag(false), colDataType(execplan::CalpontSystemCatalog::INT), colType(WR_INT),
fColPartition(0), fColSegment(0), fColDbRoot(0),
fCompressionType(idbdatafile::IDBPolicy::useHdfs() ? 2 : 0) { }
};
typedef std::vector<ColStruct> ColStructList; /** @brief column struct list */
typedef std::vector<ColTupleList> ColValueList; /** @brief column value list */
typedef std::vector<RID> RIDList; /** @brief RID list */
typedef std::vector<std::string> dictStr;
typedef std::vector<dictStr> DictStrList;
// dictionary
struct DctnryStruct /** @brief Dctnry Interface Struct*/
{
OID dctnryOid; /** @brief dictionary signature file */
OID columnOid; /** @brief corresponding column file */
int colWidth; /** @brief string width for the dictionary column*/
uint32_t fColPartition; /** @brief Partition for column file */
uint16_t fColSegment; /** @brief Segment for column file */
uint16_t fColDbRoot; /** @brief DBRoot for column file */
int fCompressionType; /** @brief Compression tpye for column file */
DctnryStruct() : dctnryOid(0), columnOid(0), /** @brief constructor */
colWidth(0),
fColPartition(0), fColSegment(0),
fColDbRoot(0), fCompressionType(idbdatafile::IDBPolicy::useHdfs() ? 2 : 0) { }
};
struct DctnryTuple /** @brief Dictionary Tuple struct*/
{
unsigned char* sigValue; /** @brief dictionary signature value*/
int sigSize; /** @brief dictionary signature size */
Token token; /** @brief dictionary token */
bool isNull;
DctnryTuple() { }
~DctnryTuple() { }
};
typedef std::vector<DctnryTuple> DctColTupleList;
typedef std::vector<DctnryStruct> DctnryStructList; /** @brief column struct list */
typedef std::vector<DctColTupleList> DctnryValueList; /** @brief column value list */
/************************************************************************
* @brief Used by Bulk Load to describe a column
************************************************************************/
struct JobColumn /** @brief Job Column Structure */
{
std::string colName; /** @brief column name */
OID mapOid; /** @brief column OID */
execplan::CalpontSystemCatalog::ColDataType dataType; /** @brief column data type */
ColType weType; /** @brief write engine data type */
std::string typeName; /** @brief data type name */
uint64_t emptyVal; /** @brief default empty value */
int width; /** @brief column width; for a dictionary column, this is "eventually" the token width */
int definedWidth; /** @brief column width as defined in the table, used for non-dictionary strings */
int dctnryWidth; /** @brief dictionary width */
int precision; /** @brief precision of decimal */
int scale; /** @brief scale of decimal */
bool fNotNull; /** @brief not null flag */
BulkFldColRel fFldColRelation; /** @brief type of field/col relation*/
char colType; /** @brief column type, blank is regular, D is dictionary */
int compressionType; /** @brief compression type */
bool autoIncFlag; /** @brief auto increment flag */
DctnryStruct dctnry; /** @brief dictionary structure */
int64_t fMinIntSat; /** @brief For integer type, the min saturation value */
uint64_t fMaxIntSat; /** @brief For integer type, the max saturation value */
double fMinDblSat; /** @brief for float/double, the min saturation value */
double fMaxDblSat; /** @brief for float/double, the max saturation value */
bool fWithDefault; /** @brief With default */
long long fDefaultInt; /** @brief Integer column default */
unsigned long long fDefaultUInt; /** @brief UnsignedInt col default*/
double fDefaultDbl; /** @brief Dbl/Flt column default */
std::string fDefaultChr; /** @brief Char column default */
JobColumn() : mapOid(0), dataType(execplan::CalpontSystemCatalog::INT), weType(WR_INT),
typeName("integer"), emptyVal(0),
width(0), definedWidth(0), dctnryWidth(0),
precision(0), scale(0), fNotNull(false),
fFldColRelation(BULK_FLDCOL_COLUMN_FIELD), colType(' '),
compressionType(0), autoIncFlag(false),
fMinIntSat(0), fMaxIntSat(0),
fMinDblSat(0), fMaxDblSat(0), fWithDefault(false),
fDefaultInt(0), fDefaultUInt(0), fDefaultDbl(0.0)
{ }
};
typedef std::vector<JobColumn> JobColList; /** @brief column value list */
struct JobFieldRef // references field/column in JobTable
{
BulkFldColRel fFldColType; // type of field or column
unsigned fArrayIndex; // index into colList or fIgnoredFields
// in JobTable based on fFldColType.
JobFieldRef( ) : fFldColType(BULK_FLDCOL_COLUMN_FIELD), fArrayIndex(0) { }
JobFieldRef( BulkFldColRel fldColType, unsigned idx ) :
fFldColType( fldColType ), fArrayIndex( idx ) { }
};
typedef std::vector<JobFieldRef> JobFieldRefList;
struct JobTable /** @brief Job Table Structure */
{
std::string tblName; /** @brief table name */
OID mapOid; /** @brief table OID */
std::string loadFileName; /** @brief table load file name */
uint64_t maxErrNum; /** @brief max number of error rows before abort */
JobColList colList; /** @brief list of columns to be loaded; followed by default columns to be loaded */
JobColList fIgnoredFields; /** @brief list of fields in input file to be ignored */
JobFieldRefList fFldRefs; /** @brief Combined list of refs to entries in colList and fIgnoredFields */
JobTable() : mapOid(0), maxErrNum(0) { }
};
typedef std::vector<JobTable> JobTableList;/** @brief table list */
struct Job /** @brief Job Structure */
{
int id; /** @brief job id */
std::string schema; /** @brief database name */
std::string name; /** @brief job name */
std::string desc; /** @brief job description */
std::string userName; /** @brief user name */
JobTableList jobTableList; /** @brief job table list */
std::string createDate; /** @brief job create date */
std::string createTime; /** @brief job create time */
char fDelimiter;
char fEnclosedByChar;
char fEscapeChar;
int numberOfReadBuffers;
unsigned readBufferSize;
unsigned writeBufferSize;
Job() : id(0), fDelimiter('|'),
fEnclosedByChar('\0'), fEscapeChar('\0'),
numberOfReadBuffers(0), readBufferSize(0), writeBufferSize(0) { }
};
/************************************************************************
* @brief Cache memory
************************************************************************/
struct CacheBlock /** @brief Cache block structure */
{
uint64_t fbo; /** @brief file fbo */
uint64_t lbid; /** @brief lbid */
bool dirty; /** @brief dirty flag */
int hitCount; /** @brief hit count */
unsigned char* data; /** @brief block buffer */
CacheBlock()
{
int hdrLoc; /** @brief offset postion in hdr */
uint16_t offset; /** @brief offset in block */
} Offset;
/************************************************************************
* @brief interfaces with DDL/DML
************************************************************************/
typedef struct colTuple_struct /** @brief Column Tuple definition*/
data = NULL; /** @brief constructor */
clear();
}
void clear()
{
boost::any data; /** @brief column value */
} ColTuple;
fbo = lbid = hitCount = 0;
dirty = false;
typedef std::vector<ColTuple> ColTupleList; /** @brief column value list */
struct ColStruct /** @brief Column Interface Struct*/
if ( data ) memset( data, 0, BYTE_PER_BLOCK);
} /** @brief clear, NOTE: buf must be free by caller first */
void init()
{
OID dataOid; /** @brief column data file object id */
int colWidth; /** @brief column width */
bool tokenFlag; /** @brief column token flag, must be set to true if it is a token column */
execplan::CalpontSystemCatalog::ColDataType colDataType; /** @brief column data type (for interface)*/
ColType colType; /** @brief column type (internal use for write engine)*/
uint32_t fColPartition; /** @brief Partition for column file */
uint16_t fColSegment; /** @brief Segment for column file*/
uint16_t fColDbRoot; /** @brief DBRoot for column file */
int fCompressionType; /** @brief Compression tpye for column file */
ColStruct() : dataOid(0), colWidth(0), /** @brief constructor */
tokenFlag(false), colDataType(execplan::CalpontSystemCatalog::INT), colType(WR_INT),
fColPartition(0), fColSegment(0), fColDbRoot(0),
fCompressionType(idbdatafile::IDBPolicy::useHdfs()?2:0) { }
};
typedef std::vector<ColStruct> ColStructList; /** @brief column struct list */
typedef std::vector<ColTupleList> ColValueList; /** @brief column value list */
typedef std::vector<RID> RIDList; /** @brief RID list */
typedef std::vector<std::string> dictStr;
typedef std::vector<dictStr> DictStrList;
// dictionary
struct DctnryStruct /** @brief Dctnry Interface Struct*/
data = (unsigned char*)malloc(BYTE_PER_BLOCK);
}
void freeMem()
{
OID dctnryOid; /** @brief dictionary signature file */
OID columnOid; /** @brief corresponding column file */
int colWidth; /** @brief string width for the dictionary column*/
uint32_t fColPartition; /** @brief Partition for column file */
uint16_t fColSegment; /** @brief Segment for column file */
uint16_t fColDbRoot; /** @brief DBRoot for column file */
int fCompressionType; /** @brief Compression tpye for column file */
DctnryStruct() : dctnryOid(0), columnOid(0), /** @brief constructor */
colWidth(0),
fColPartition(0), fColSegment(0),
fColDbRoot(0), fCompressionType(idbdatafile::IDBPolicy::useHdfs()?2:0) { }
};
if ( data ) free( data );
}
};
struct DctnryTuple /** @brief Dictionary Tuple struct*/
struct BlockBuffer /** @brief Block buffer */
{
CommBlock cb; /** @brief Communication block structure */
CacheBlock block; /** @brief Cache block strucutre */
CacheListType listType; /** @brief List number, 0 - free, 1 - LRU, 2 - write */
BlockBuffer()
{
unsigned char *sigValue; /** @brief dictionary signature value*/
int sigSize; /** @brief dictionary signature size */
Token token; /** @brief dictionary token */
bool isNull;
DctnryTuple() { }
~DctnryTuple() { }
};
typedef std::vector<DctnryTuple> DctColTupleList;
typedef std::vector<DctnryStruct> DctnryStructList; /** @brief column struct list */
typedef std::vector<DctColTupleList> DctnryValueList; /** @brief column value list */
/************************************************************************
* @brief Used by Bulk Load to describe a column
************************************************************************/
struct JobColumn /** @brief Job Column Structure */
clear(); /** @brief constructor */
}
void init()
{
std::string colName; /** @brief column name */
OID mapOid; /** @brief column OID */
execplan::CalpontSystemCatalog::ColDataType dataType; /** @brief column data type */
ColType weType; /** @brief write engine data type */
std::string typeName; /** @brief data type name */
uint64_t emptyVal; /** @brief default empty value */
int width; /** @brief column width; for a dictionary column, this is "eventually" the token width */
int definedWidth; /** @brief column width as defined in the table, used for non-dictionary strings */
int dctnryWidth; /** @brief dictionary width */
int precision; /** @brief precision of decimal */
int scale; /** @brief scale of decimal */
bool fNotNull; /** @brief not null flag */
BulkFldColRel fFldColRelation; /** @brief type of field/col relation*/
char colType; /** @brief column type, blank is regular, D is dictionary */
int compressionType; /** @brief compression type */
bool autoIncFlag; /** @brief auto increment flag */
DctnryStruct dctnry; /** @brief dictionary structure */
int64_t fMinIntSat; /** @brief For integer type, the min saturation value */
uint64_t fMaxIntSat; /** @brief For integer type, the max saturation value */
double fMinDblSat; /** @brief for float/double, the min saturation value */
double fMaxDblSat; /** @brief for float/double, the max saturation value */
bool fWithDefault; /** @brief With default */
long long fDefaultInt; /** @brief Integer column default */
unsigned long long fDefaultUInt; /** @brief UnsignedInt col default*/
double fDefaultDbl; /** @brief Dbl/Flt column default */
std::string fDefaultChr; /** @brief Char column default */
JobColumn() : mapOid(0), dataType(execplan::CalpontSystemCatalog::INT), weType(WR_INT),
typeName("integer"), emptyVal(0),
width(0), definedWidth(0), dctnryWidth(0),
precision(0), scale(0), fNotNull(false),
fFldColRelation(BULK_FLDCOL_COLUMN_FIELD), colType(' '),
compressionType(0),autoIncFlag(false),
fMinIntSat(0), fMaxIntSat(0),
fMinDblSat(0), fMaxDblSat(0), fWithDefault(false),
fDefaultInt(0), fDefaultUInt(0), fDefaultDbl(0.0)
{ }
};
typedef std::vector<JobColumn> JobColList; /** @brief column value list */
struct JobFieldRef // references field/column in JobTable
block.init();
}
void freeMem()
{
BulkFldColRel fFldColType; // type of field or column
unsigned fArrayIndex; // index into colList or fIgnoredFields
// in JobTable based on fFldColType.
JobFieldRef( ) : fFldColType(BULK_FLDCOL_COLUMN_FIELD), fArrayIndex(0) { }
JobFieldRef( BulkFldColRel fldColType, unsigned idx ) :
fFldColType( fldColType ), fArrayIndex( idx ) { }
};
typedef std::vector<JobFieldRef> JobFieldRefList;
struct JobTable /** @brief Job Table Structure */
block.freeMem();
}
void clear()
{
std::string tblName; /** @brief table name */
OID mapOid; /** @brief table OID */
std::string loadFileName; /** @brief table load file name */
uint64_t maxErrNum; /** @brief max number of error rows before abort */
JobColList colList; /** @brief list of columns to be loaded; followed by default columns to be loaded */
JobColList fIgnoredFields; /** @brief list of fields in input file to be ignored */
JobFieldRefList fFldRefs; /** @brief Combined list of refs to entries in colList and fIgnoredFields */
JobTable() : mapOid(0), maxErrNum(0) { }
};
cb.clear();
block.clear();
listType = FREE_LIST;
}
};
typedef std::vector<JobTable> JobTableList;/** @brief table list */
struct Job /** @brief Job Structure */
struct CacheControl /** @brief Cache control structure */
{
int totalBlock; /** @brief The toal number of allocated blocks */
int pctFree; /** @brief The percentage of free blocks when some blocks must be aged out */
int checkInterval; /** @brief A check point interval in seconds */
CacheControl()
{
int id; /** @brief job id */
std::string schema; /** @brief database name */
std::string name; /** @brief job name */
std::string desc; /** @brief job description */
std::string userName; /** @brief user name */
JobTableList jobTableList; /** @brief job table list */
std::string createDate; /** @brief job create date */
std::string createTime; /** @brief job create time */
totalBlock = pctFree = checkInterval; /** @brief constructor */
}
};
char fDelimiter;
char fEnclosedByChar;
char fEscapeChar;
int numberOfReadBuffers;
unsigned readBufferSize;
unsigned writeBufferSize;
Job() : id(0), fDelimiter('|'),
fEnclosedByChar('\0'), fEscapeChar('\0'),
numberOfReadBuffers(0), readBufferSize(0), writeBufferSize(0) { }
};
/************************************************************************
* @brief Bulk parse meta data describing data in a read buffer.
* An offset of COLPOSPAIR_NULL_TOKEN_OFFSET represents a null token.
************************************************************************/
struct ColPosPair /** @brief Column position pair structure */
{
int start; /** @brief start position */
int offset; /** @brief length of token*/
};
/************************************************************************
* @brief Cache memory
************************************************************************/
struct CacheBlock /** @brief Cache block structure */
/************************************************************************
* @brief SecondaryShutdown used to terminate a thread when it sees that the
* JobStatus flag has been set to EXIT_FAILURE (by another thread).
************************************************************************/
class SecondaryShutdownException : public std::runtime_error
{
public:
SecondaryShutdownException(const std::string& msg) :
std::runtime_error(msg) { }
};
/************************************************************************
* @brief Generic exception class used to store exception string and error
* code for a writeengine error.
************************************************************************/
class WeException : public std::runtime_error
{
public:
WeException(const std::string& msg, int err = 0) :
std::runtime_error(msg), fErrorCode(err) { }
void errorCode(int code)
{
uint64_t fbo; /** @brief file fbo */
uint64_t lbid; /** @brief lbid */
bool dirty; /** @brief dirty flag */
int hitCount; /** @brief hit count */
unsigned char* data; /** @brief block buffer */
CacheBlock() { data = NULL; clear(); }/** @brief constructor */
void clear() { fbo = lbid = hitCount = 0;
dirty = false;
if( data ) memset( data, 0, BYTE_PER_BLOCK); } /** @brief clear, NOTE: buf must be free by caller first */
void init() { data = (unsigned char*)malloc(BYTE_PER_BLOCK); }
void freeMem() { if( data ) free( data ); }
};
struct BlockBuffer /** @brief Block buffer */
fErrorCode = code;
}
int errorCode() const
{
CommBlock cb; /** @brief Communication block structure */
CacheBlock block; /** @brief Cache block strucutre */
CacheListType listType; /** @brief List number, 0 - free, 1 - LRU, 2 - write */
BlockBuffer() { clear(); } /** @brief constructor */
void init() { block.init(); }
void freeMem() { block.freeMem(); }
void clear() { cb.clear(); block.clear(); listType = FREE_LIST; }
};
struct CacheControl /** @brief Cache control structure */
{
int totalBlock; /** @brief The toal number of allocated blocks */
int pctFree; /** @brief The percentage of free blocks when some blocks must be aged out */
int checkInterval; /** @brief A check point interval in seconds */
CacheControl() { totalBlock = pctFree = checkInterval; } /** @brief constructor */
};
/************************************************************************
* @brief Bulk parse meta data describing data in a read buffer.
* An offset of COLPOSPAIR_NULL_TOKEN_OFFSET represents a null token.
************************************************************************/
struct ColPosPair /** @brief Column position pair structure */
{
int start; /** @brief start position */
int offset; /** @brief length of token*/
};
/************************************************************************
* @brief SecondaryShutdown used to terminate a thread when it sees that the
* JobStatus flag has been set to EXIT_FAILURE (by another thread).
************************************************************************/
class SecondaryShutdownException : public std::runtime_error
{
public:
SecondaryShutdownException(const std::string& msg) :
std::runtime_error(msg) { }
};
/************************************************************************
* @brief Generic exception class used to store exception string and error
* code for a writeengine error.
************************************************************************/
class WeException : public std::runtime_error
{
public:
WeException(const std::string& msg, int err=0) :
std::runtime_error(msg), fErrorCode(err) { }
void errorCode(int code) { fErrorCode = code; }
int errorCode() const { return fErrorCode; }
private:
int fErrorCode;
};
return fErrorCode;
}
private:
int fErrorCode;
};
} //end of namespace

View File

@ -34,25 +34,26 @@
/** Namespace WriteEngine */
namespace WriteEngine
{
/************************************************************************
* Type definitions
************************************************************************/
typedef uint64_t RID; // Row ID
/************************************************************************
* Type definitions
************************************************************************/
typedef uint64_t RID; // Row ID
/************************************************************************
* Dictionary related structure
************************************************************************/
struct Token {
uint64_t op : 10; // ordinal position within a block
uint64_t fbo : 36; // file block number
uint64_t bc : 18; // block count
Token() // constructor, set to null value
{
op = 0x3FE;
fbo = 0xFFFFFFFFFLL;
bc = 0x3FFFF;
}
};
/************************************************************************
* Dictionary related structure
************************************************************************/
struct Token
{
uint64_t op : 10; // ordinal position within a block
uint64_t fbo : 36; // file block number
uint64_t bc : 18; // block count
Token() // constructor, set to null value
{
op = 0x3FE;
fbo = 0xFFFFFFFFFLL;
bc = 0x3FFFF;
}
};
} //end of namespace