1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-07-29 08:21:15 +03:00

MCOL-987 Add LZ4 compression.

* Adds CompressInterfaceLZ4 which uses LZ4 API for compress/uncompress.
* Adds CMake machinery to search LZ4 on running host.
* All methods which use static data and do not modify any internal data - become `static`,
  so we can use them without creation of the specific object. This is possible, because
  the header specification has not been modified. We still use 2 sections in header, first
  one with file meta data, the second one with pointers for compressed chunks.
* Methods `compress`, `uncompress`, `maxCompressedSize`, `getUncompressedSize` - become
  pure virtual, so we can override them for the other compression algos.
* Adds method `getChunkMagicNumber`, so we can verify chunk magic number
  for each compression algo.
* Renames "s/IDBCompressInterface/CompressInterface/g" according to requirement.
This commit is contained in:
Denis Khalikov
2021-04-01 17:26:38 +03:00
parent dd12bd3cd0
commit cc1c3629c5
45 changed files with 1311 additions and 549 deletions

View File

@ -108,7 +108,7 @@ int ColumnInfoCompressed::closeColumnFile(bool bCompletingExtent, bool bAbort)
//------------------------------------------------------------------------------
int ColumnInfoCompressed::setupInitialColumnFile( HWM oldHwm, HWM hwm )
{
char hdr[ compress::IDBCompressInterface::HDR_BUF_LEN * 2 ];
char hdr[ compress::CompressInterface::HDR_BUF_LEN * 2 ];
RETURN_ON_ERROR( colOp->readHeaders(curCol.dataFile.pFile, hdr) );
// Initialize the output buffer manager for the column.
@ -129,10 +129,9 @@ int ColumnInfoCompressed::setupInitialColumnFile( HWM oldHwm, HWM hwm )
fColBufferMgr = mgr;
IDBCompressInterface compressor;
int abbrevFlag =
( compressor.getBlockCount(hdr) ==
uint64_t(INITIAL_EXTENT_ROWS_TO_DISK * column.width / BYTE_PER_BLOCK) );
int abbrevFlag = (compress::CompressInterface::getBlockCount(hdr) ==
uint64_t(INITIAL_EXTENT_ROWS_TO_DISK * column.width /
BYTE_PER_BLOCK));
setFileSize( hwm, abbrevFlag );
// See if dealing with abbreviated extent that will need expanding.
@ -324,9 +323,9 @@ int ColumnInfoCompressed::truncateDctnryStore(
return rc;
}
char controlHdr[ IDBCompressInterface::HDR_BUF_LEN ];
char controlHdr[ CompressInterface::HDR_BUF_LEN ];
rc = fTruncateDctnryFileOp.readFile( dFile,
(unsigned char*)controlHdr, IDBCompressInterface::HDR_BUF_LEN);
(unsigned char*)controlHdr, CompressInterface::HDR_BUF_LEN);
if (rc != NO_ERROR)
{
@ -345,8 +344,7 @@ int ColumnInfoCompressed::truncateDctnryStore(
return rc;
}
IDBCompressInterface compressor;
int rc1 = compressor.verifyHdr( controlHdr );
int rc1 = compress::CompressInterface::verifyHdr(controlHdr);
if (rc1 != 0)
{
@ -372,7 +370,8 @@ int ColumnInfoCompressed::truncateDctnryStore(
// actually grow the file (something we don't want to do), because we have
// not yet reserved a full extent (on disk) for this dictionary store file.
const int PSEUDO_COL_WIDTH = 8;
uint64_t numBlocks = compressor.getBlockCount( controlHdr );
uint64_t numBlocks =
compress::CompressInterface::getBlockCount(controlHdr);
if ( numBlocks == uint64_t
(INITIAL_EXTENT_ROWS_TO_DISK * PSEUDO_COL_WIDTH / BYTE_PER_BLOCK) )
@ -390,8 +389,8 @@ int ColumnInfoCompressed::truncateDctnryStore(
return NO_ERROR;
}
uint64_t hdrSize = compressor.getHdrSize(controlHdr);
uint64_t ptrHdrSize = hdrSize - IDBCompressInterface::HDR_BUF_LEN;
uint64_t hdrSize = compress::CompressInterface::getHdrSize(controlHdr);
uint64_t ptrHdrSize = hdrSize - CompressInterface::HDR_BUF_LEN;
char* pointerHdr = new char[ptrHdrSize];
rc = fTruncateDctnryFileOp.readFile(dFile,
@ -416,7 +415,8 @@ int ColumnInfoCompressed::truncateDctnryStore(
}
CompChunkPtrList chunkPtrs;
rc1 = compressor.getPtrList( pointerHdr, ptrHdrSize, chunkPtrs );
rc1 = compress::CompressInterface::getPtrList(pointerHdr, ptrHdrSize,
chunkPtrs);
delete[] pointerHdr;
if (rc1 != 0)