1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-08-05 16:15:50 +03:00

MCOL-423 Fixes to I_S tables

Fixes the following:

* Compression ratio calculation was incorrect
* Possible issues due to system catalog thread ID usage
* Compressed file size data count was leaking many FDs when the table
  wasn't compressed
* Compressed file size data count was allocating random large amounts
  of RAM and then leaking it when the table wasn't compressed
This commit is contained in:
Andrew Hutchings
2016-11-29 10:45:38 +00:00
parent 08ae2dac1a
commit f586f3b46a
5 changed files with 39 additions and 5 deletions

View File

@@ -42,7 +42,7 @@ END //
CREATE PROCEDURE compression_ratio() CREATE PROCEDURE compression_ratio()
BEGIN BEGIN
SELECT CONCAT(((1 - (sum(data_size) / sum(compressed_data_size))) * 100), '%') COMPRESSION_RATIO FROM INFORMATION_SCHEMA.COLUMNSTORE_EXTENTS ce SELECT CONCAT(((sum(compressed_data_size) / sum(data_size)) * 100), '%') COMPRESSION_RATIO FROM INFORMATION_SCHEMA.COLUMNSTORE_EXTENTS ce
JOIN INFORMATION_SCHEMA.COLUMNSTORE_FILES cf ON ce.object_id = cf.object_id JOIN INFORMATION_SCHEMA.COLUMNSTORE_FILES cf ON ce.object_id = cf.object_id
WHERE compressed_data_size IS NOT NULL; WHERE compressed_data_size IS NOT NULL;
END // END //

View File

@@ -60,7 +60,7 @@ static int is_columnstore_columns_fill(THD *thd, TABLE_LIST *tables, COND *cond)
TABLE *table = tables->table; TABLE *table = tables->table;
boost::shared_ptr<execplan::CalpontSystemCatalog> systemCatalogPtr = boost::shared_ptr<execplan::CalpontSystemCatalog> systemCatalogPtr =
execplan::CalpontSystemCatalog::makeCalpontSystemCatalog(0); execplan::CalpontSystemCatalog::makeCalpontSystemCatalog(execplan::CalpontSystemCatalog::idb_tid2sid(thd->thread_id));
const std::vector< std::pair<execplan::CalpontSystemCatalog::OID, execplan::CalpontSystemCatalog::TableName> > catalog_tables const std::vector< std::pair<execplan::CalpontSystemCatalog::OID, execplan::CalpontSystemCatalog::TableName> > catalog_tables
= systemCatalogPtr->getTables(); = systemCatalogPtr->getTables();

View File

@@ -48,7 +48,7 @@ static int is_columnstore_tables_fill(THD *thd, TABLE_LIST *tables, COND *cond)
TABLE *table = tables->table; TABLE *table = tables->table;
boost::shared_ptr<execplan::CalpontSystemCatalog> systemCatalogPtr = boost::shared_ptr<execplan::CalpontSystemCatalog> systemCatalogPtr =
execplan::CalpontSystemCatalog::makeCalpontSystemCatalog(0); execplan::CalpontSystemCatalog::makeCalpontSystemCatalog(execplan::CalpontSystemCatalog::idb_tid2sid(thd->thread_id));
const std::vector< std::pair<execplan::CalpontSystemCatalog::OID, execplan::CalpontSystemCatalog::TableName> > catalog_tables const std::vector< std::pair<execplan::CalpontSystemCatalog::OID, execplan::CalpontSystemCatalog::TableName> > catalog_tables
= systemCatalogPtr->getTables(); = systemCatalogPtr->getTables();

View File

@@ -159,7 +159,7 @@ size_t readFillBuffer(
off64_t PosixFileSystem::compressedSize(const char *path) const off64_t PosixFileSystem::compressedSize(const char *path) const
{ {
IDBDataFile *pFile = 0; IDBDataFile *pFile = NULL;
size_t nBytes; size_t nBytes;
off64_t dataSize = 0; off64_t dataSize = 0;
@@ -178,6 +178,14 @@ off64_t PosixFileSystem::compressedSize(const char *path) const
nBytes = readFillBuffer( pFile,hdr1,compress::IDBCompressInterface::HDR_BUF_LEN); nBytes = readFillBuffer( pFile,hdr1,compress::IDBCompressInterface::HDR_BUF_LEN);
if ( nBytes != compress::IDBCompressInterface::HDR_BUF_LEN ) if ( nBytes != compress::IDBCompressInterface::HDR_BUF_LEN )
{ {
delete pFile;
return -1;
}
// Verify we are a compressed file
if (decompressor.verifyHdr(hdr1) < 0)
{
delete pFile;
return -1; return -1;
} }
@@ -186,6 +194,8 @@ off64_t PosixFileSystem::compressedSize(const char *path) const
nBytes = readFillBuffer( pFile,hdr2,ptrSecSize); nBytes = readFillBuffer( pFile,hdr2,ptrSecSize);
if ( (int64_t)nBytes != ptrSecSize ) if ( (int64_t)nBytes != ptrSecSize )
{ {
delete[] hdr2;
delete pFile;
return -1; return -1;
} }
@@ -194,17 +204,24 @@ off64_t PosixFileSystem::compressedSize(const char *path) const
delete[] hdr2; delete[] hdr2;
if (rc != 0) if (rc != 0)
{ {
delete pFile;
return -1; return -1;
} }
unsigned k = chunkPtrs.size(); unsigned k = chunkPtrs.size();
// last header's offset + length will be the data bytes // last header's offset + length will be the data bytes
if (k < 1)
{
delete pFile;
return -1;
}
dataSize = chunkPtrs[k-1].first + chunkPtrs[k-1].second; dataSize = chunkPtrs[k-1].first + chunkPtrs[k-1].second;
delete pFile; delete pFile;
return dataSize; return dataSize;
} }
catch (...) catch (...)
{ {
delete pFile;
return -1; return -1;
} }
} }

View File

@@ -151,7 +151,7 @@ size_t readFillBuffer(
off64_t HdfsFileSystem::compressedSize(const char *path) const off64_t HdfsFileSystem::compressedSize(const char *path) const
{ {
IDBDataFile *pFile = 0; IDBDataFile *pFile = NULL;
size_t nBytes; size_t nBytes;
off64_t dataSize = 0; off64_t dataSize = 0;
@@ -170,6 +170,14 @@ off64_t HdfsFileSystem::compressedSize(const char *path) const
nBytes = readFillBuffer( pFile,hdr1,compress::IDBCompressInterface::HDR_BUF_LEN); nBytes = readFillBuffer( pFile,hdr1,compress::IDBCompressInterface::HDR_BUF_LEN);
if ( nBytes != compress::IDBCompressInterface::HDR_BUF_LEN ) if ( nBytes != compress::IDBCompressInterface::HDR_BUF_LEN )
{ {
delete pFile;
return -1;
}
// Verify we are a compressed file
if (decompressor.verifyHdr(hdr1) < 0)
{
delete pFile;
return -1; return -1;
} }
@@ -178,6 +186,8 @@ off64_t HdfsFileSystem::compressedSize(const char *path) const
nBytes = readFillBuffer( pFile,hdr2,ptrSecSize); nBytes = readFillBuffer( pFile,hdr2,ptrSecSize);
if ( (int64_t)nBytes != ptrSecSize ) if ( (int64_t)nBytes != ptrSecSize )
{ {
delete[] hdr2;
delete pFile;
return -1; return -1;
} }
@@ -186,17 +196,24 @@ off64_t HdfsFileSystem::compressedSize(const char *path) const
delete[] hdr2; delete[] hdr2;
if (rc != 0) if (rc != 0)
{ {
delete pFile;
return -1; return -1;
} }
unsigned k = chunkPtrs.size(); unsigned k = chunkPtrs.size();
// last header's offset + length will be the data bytes // last header's offset + length will be the data bytes
if (k < 1)
{
delete pFile;
return -1;
}
dataSize = chunkPtrs[k-1].first + chunkPtrs[k-1].second; dataSize = chunkPtrs[k-1].first + chunkPtrs[k-1].second;
delete pFile; delete pFile;
return dataSize; return dataSize;
} }
catch (...) catch (...)
{ {
delete pFile;
return -1; return -1;
} }
} }