diff --git a/dbcon/mysql/columnstore_info.sql b/dbcon/mysql/columnstore_info.sql index b545ce647..d900c99b0 100644 --- a/dbcon/mysql/columnstore_info.sql +++ b/dbcon/mysql/columnstore_info.sql @@ -42,7 +42,7 @@ END // CREATE PROCEDURE compression_ratio() BEGIN -SELECT CONCAT(((1 - (sum(data_size) / sum(compressed_data_size))) * 100), '%') COMPRESSION_RATIO FROM INFORMATION_SCHEMA.COLUMNSTORE_EXTENTS ce +SELECT CONCAT(((sum(compressed_data_size) / sum(data_size)) * 100), '%') COMPRESSION_RATIO FROM INFORMATION_SCHEMA.COLUMNSTORE_EXTENTS ce JOIN INFORMATION_SCHEMA.COLUMNSTORE_FILES cf ON ce.object_id = cf.object_id WHERE compressed_data_size IS NOT NULL; END // diff --git a/dbcon/mysql/is_columnstore_columns.cpp b/dbcon/mysql/is_columnstore_columns.cpp index d97ca8823..ea9e80aa3 100644 --- a/dbcon/mysql/is_columnstore_columns.cpp +++ b/dbcon/mysql/is_columnstore_columns.cpp @@ -60,7 +60,7 @@ static int is_columnstore_columns_fill(THD *thd, TABLE_LIST *tables, COND *cond) TABLE *table = tables->table; boost::shared_ptr systemCatalogPtr = - execplan::CalpontSystemCatalog::makeCalpontSystemCatalog(0); + execplan::CalpontSystemCatalog::makeCalpontSystemCatalog(execplan::CalpontSystemCatalog::idb_tid2sid(thd->thread_id)); const std::vector< std::pair > catalog_tables = systemCatalogPtr->getTables(); diff --git a/dbcon/mysql/is_columnstore_tables.cpp b/dbcon/mysql/is_columnstore_tables.cpp index e38c8d5e7..5b3bc7fc6 100644 --- a/dbcon/mysql/is_columnstore_tables.cpp +++ b/dbcon/mysql/is_columnstore_tables.cpp @@ -48,7 +48,7 @@ static int is_columnstore_tables_fill(THD *thd, TABLE_LIST *tables, COND *cond) TABLE *table = tables->table; boost::shared_ptr systemCatalogPtr = - execplan::CalpontSystemCatalog::makeCalpontSystemCatalog(0); + execplan::CalpontSystemCatalog::makeCalpontSystemCatalog(execplan::CalpontSystemCatalog::idb_tid2sid(thd->thread_id)); const std::vector< std::pair > catalog_tables = systemCatalogPtr->getTables(); diff --git a/utils/idbdatafile/PosixFileSystem.cpp b/utils/idbdatafile/PosixFileSystem.cpp index 59685c1f9..01d542086 100644 --- a/utils/idbdatafile/PosixFileSystem.cpp +++ b/utils/idbdatafile/PosixFileSystem.cpp @@ -159,7 +159,7 @@ size_t readFillBuffer( off64_t PosixFileSystem::compressedSize(const char *path) const { - IDBDataFile *pFile = 0; + IDBDataFile *pFile = NULL; size_t nBytes; off64_t dataSize = 0; @@ -178,6 +178,14 @@ off64_t PosixFileSystem::compressedSize(const char *path) const nBytes = readFillBuffer( pFile,hdr1,compress::IDBCompressInterface::HDR_BUF_LEN); if ( nBytes != compress::IDBCompressInterface::HDR_BUF_LEN ) { + delete pFile; + return -1; + } + + // Verify we are a compressed file + if (decompressor.verifyHdr(hdr1) < 0) + { + delete pFile; return -1; } @@ -186,6 +194,8 @@ off64_t PosixFileSystem::compressedSize(const char *path) const nBytes = readFillBuffer( pFile,hdr2,ptrSecSize); if ( (int64_t)nBytes != ptrSecSize ) { + delete[] hdr2; + delete pFile; return -1; } @@ -194,17 +204,24 @@ off64_t PosixFileSystem::compressedSize(const char *path) const delete[] hdr2; if (rc != 0) { + delete pFile; return -1; } unsigned k = chunkPtrs.size(); // last header's offset + length will be the data bytes + if (k < 1) + { + delete pFile; + return -1; + } dataSize = chunkPtrs[k-1].first + chunkPtrs[k-1].second; delete pFile; return dataSize; } catch (...) { + delete pFile; return -1; } } diff --git a/utils/idbhdfs/hdfs-shared/HdfsFileSystem.cpp b/utils/idbhdfs/hdfs-shared/HdfsFileSystem.cpp index 1e5626147..a3a874b38 100644 --- a/utils/idbhdfs/hdfs-shared/HdfsFileSystem.cpp +++ b/utils/idbhdfs/hdfs-shared/HdfsFileSystem.cpp @@ -151,7 +151,7 @@ size_t readFillBuffer( off64_t HdfsFileSystem::compressedSize(const char *path) const { - IDBDataFile *pFile = 0; + IDBDataFile *pFile = NULL; size_t nBytes; off64_t dataSize = 0; @@ -170,6 +170,14 @@ off64_t HdfsFileSystem::compressedSize(const char *path) const nBytes = readFillBuffer( pFile,hdr1,compress::IDBCompressInterface::HDR_BUF_LEN); if ( nBytes != compress::IDBCompressInterface::HDR_BUF_LEN ) { + delete pFile; + return -1; + } + + // Verify we are a compressed file + if (decompressor.verifyHdr(hdr1) < 0) + { + delete pFile; return -1; } @@ -178,6 +186,8 @@ off64_t HdfsFileSystem::compressedSize(const char *path) const nBytes = readFillBuffer( pFile,hdr2,ptrSecSize); if ( (int64_t)nBytes != ptrSecSize ) { + delete[] hdr2; + delete pFile; return -1; } @@ -186,17 +196,24 @@ off64_t HdfsFileSystem::compressedSize(const char *path) const delete[] hdr2; if (rc != 0) { + delete pFile; return -1; } unsigned k = chunkPtrs.size(); // last header's offset + length will be the data bytes + if (k < 1) + { + delete pFile; + return -1; + } dataSize = chunkPtrs[k-1].first + chunkPtrs[k-1].second; delete pFile; return dataSize; } catch (...) { + delete pFile; return -1; } }