1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-07-30 19:23:07 +03:00

feat(): propagated changes into SLTPoolAllocator and friends

This commit is contained in:
drrtuy
2025-01-10 18:53:49 +00:00
parent a6de8ec1ac
commit 90b4322470
18 changed files with 516 additions and 129 deletions

View File

@ -21,21 +21,13 @@
******************************************************************************************/
// This is one of the first files we compile, check the compiler...
#if defined(__GNUC__)
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
#error "This is a very old GCC, and it's probably not going to work."
#endif
#else
#error "This compiler is not known and it's probably not going to work."
#endif
#include <stdint.h>
#include <iostream>
#include <memory>
#define FIXEDALLOCATOR_DLLEXPORT
#include "fixedallocator.h"
#undef FIXEDALLOCATOR_DLLEXPORT
#include <boost/smart_ptr/allocate_shared_array.hpp>
#include <boost/smart_ptr/make_shared_array.hpp>
using namespace std;
@ -75,15 +67,24 @@ void FixedAllocator::setAllocSize(uint allocSize)
void FixedAllocator::newBlock()
{
std::shared_ptr<uint8_t[]> next;
// boost::shared_ptr<FixedAllocatorBufType> next;
capacityRemaining = elementCount * elementSize;
if (!tmpSpace || mem.size() == 0)
{
next.reset(new uint8_t[elementCount * elementSize]);
mem.push_back(next);
nextAlloc = next.get();
if (alloc)
{
mem.emplace_back(boost::allocate_shared<FixedAllocatorBufType>(*alloc, elementCount * elementSize));
}
else
{
mem.emplace_back(boost::make_shared<FixedAllocatorBufType>(elementCount * elementSize));
}
// next.reset(new uint8_t[elementCount * elementSize]);
// mem.push_back(next);
// nextAlloc = next.get();
nextAlloc = mem.back().get();
}
else
{

View File

@ -34,16 +34,23 @@
#include <stdint.h>
#include <optional>
#include <vector>
#include <limits>
#include <unistd.h>
#include <atomic>
#include <boost/smart_ptr/allocate_shared_array.hpp>
#include "countingallocator.h"
#include "spinlock.h"
#define EXPORT
namespace utils
{
using FixedAllocatorBufIntegralType = uint8_t;
using FixedAllocatorBufType = FixedAllocatorBufIntegralType[];
class FixedAllocator
{
public:
@ -60,7 +67,8 @@ class FixedAllocator
, lock(false)
{
}
EXPORT explicit FixedAllocator(unsigned long allocSize, bool isTmpSpace = false,
EXPORT explicit FixedAllocator(allocators::CountingAllocator<FixedAllocatorBufType> alloc, unsigned long allocSize, bool isTmpSpace = false,
unsigned long numElements = DEFAULT_NUM_ELEMENTS)
: capacityRemaining(0)
, elementCount(numElements)
@ -70,8 +78,10 @@ class FixedAllocator
, nextAlloc(0)
, useLock(false)
, lock(false)
, alloc(alloc)
{
}
EXPORT FixedAllocator(const FixedAllocator&);
EXPORT FixedAllocator& operator=(const FixedAllocator&);
virtual ~FixedAllocator()
@ -88,20 +98,21 @@ class FixedAllocator
EXPORT void deallocateAll(); // drops all memory in use
EXPORT uint64_t getMemUsage() const;
void setUseLock(bool);
void setAllocSize(uint);
void setAllocSize(uint32_t);
private:
void newBlock();
std::vector<std::shared_ptr<uint8_t[]>> mem;
std::vector<boost::shared_ptr<FixedAllocatorBufType>> mem;
unsigned long capacityRemaining;
uint64_t elementCount;
unsigned long elementSize;
uint64_t currentlyStored;
bool tmpSpace;
uint8_t* nextAlloc;
FixedAllocatorBufIntegralType* nextAlloc;
bool useLock;
std::atomic<bool> lock;
std::optional<allocators::CountingAllocator<FixedAllocatorBufType>> alloc {};
};
inline void* FixedAllocator::allocate()

View File

@ -23,7 +23,8 @@
#include <iostream>
//#define NDEBUG
#include <cassert>
#include <boost/smart_ptr/allocate_shared_array.hpp>
#include <boost/smart_ptr/make_shared_array.hpp>
#include "poolallocator.h"
@ -37,6 +38,7 @@ PoolAllocator& PoolAllocator::operator=(const PoolAllocator& v)
allocSize = v.allocSize;
tmpSpace = v.tmpSpace;
useLock = v.useLock;
alloc = v.alloc;
deallocateAll();
return *this;
}
@ -46,21 +48,29 @@ void PoolAllocator::deallocateAll()
capacityRemaining = 0;
nextAlloc = NULL;
memUsage = 0;
// WIP double check the space is cleaned up.
mem.clear();
oob.clear();
}
void PoolAllocator::newBlock()
{
std::shared_ptr<PoolAllocatorBufType[]> next;
// boost::shared_ptr<PoolAllocatorBufType[]> next;
capacityRemaining = allocSize;
if (!tmpSpace || mem.size() == 0)
{
next.reset(new PoolAllocatorBufType[allocSize]);
mem.push_back(next);
nextAlloc = next.get();
if (alloc)
{
mem.emplace_back(boost::allocate_shared<PoolAllocatorBufType>(*alloc, allocSize));
}
else
{
mem.emplace_back(boost::make_shared<PoolAllocatorBufType>(allocSize));
}
// mem.push_back(next);
nextAlloc = mem.back().get();
}
else
nextAlloc = mem.front().get();
@ -71,7 +81,14 @@ void* PoolAllocator::allocOOB(uint64_t size)
OOBMemInfo memInfo;
memUsage += size;
memInfo.mem.reset(new PoolAllocatorBufType[size]);
if (alloc)
{
memInfo.mem = boost::allocate_shared<PoolAllocatorBufType>(*alloc, size);
}
else
{
memInfo.mem = boost::make_shared<PoolAllocatorBufType>(size);
}
memInfo.size = size;
void* ret = (void*)memInfo.mem.get();
oob[ret] = memInfo;

View File

@ -27,17 +27,21 @@
#include <unistd.h>
#include <stdint.h>
#include <optional>
#include <vector>
#include <map>
#include <memory>
#include <boost/smart_ptr/allocate_shared_array.hpp>
#include <atomic>
#include "countingallocator.h"
namespace utils
{
using PoolAllocatorBufType = uint8_t;
using PoolAllocatorBufIntegralType = uint8_t;
using PoolAllocatorBufType = PoolAllocatorBufIntegralType[];
class PoolAllocator
{
public:
@ -54,7 +58,7 @@ class PoolAllocator
, lock(false)
{
}
PoolAllocator(allocators::CountingAllocator<PoolAllocatorBufType>* allocator, unsigned windowSize = DEFAULT_WINDOW_SIZE,
PoolAllocator(allocators::CountingAllocator<PoolAllocatorBufType> alloc, unsigned windowSize = DEFAULT_WINDOW_SIZE,
bool isTmpSpace = false, bool _useLock = false)
: allocSize(windowSize)
, tmpSpace(isTmpSpace)
@ -63,7 +67,7 @@ class PoolAllocator
, nextAlloc(0)
, useLock(_useLock)
, lock(false)
, allocator(allocator)
, alloc(alloc)
{
}
PoolAllocator(const PoolAllocator& p)
@ -74,7 +78,7 @@ class PoolAllocator
, nextAlloc(0)
, useLock(p.useLock)
, lock(false)
, allocator(p.allocator)
, alloc(p.alloc)
{
}
virtual ~PoolAllocator()
@ -106,23 +110,22 @@ class PoolAllocator
void* allocOOB(uint64_t size);
unsigned allocSize;
std::vector<std::shared_ptr<PoolAllocatorBufType[]>> mem;
std::vector<boost::shared_ptr<PoolAllocatorBufType>> mem;
bool tmpSpace;
unsigned capacityRemaining;
uint64_t memUsage;
PoolAllocatorBufType* nextAlloc;
PoolAllocatorBufIntegralType* nextAlloc;
bool useLock;
std::atomic<bool> lock;
struct OOBMemInfo
{
std::shared_ptr<PoolAllocatorBufType[]> mem;
boost::shared_ptr<PoolAllocatorBufType> mem;
uint64_t size;
};
typedef std::map<void*, OOBMemInfo> OutOfBandMap;
OutOfBandMap oob; // for mem chunks bigger than the window size; these can be dealloc'd
// WIP rename to allocator
allocators::CountingAllocator<PoolAllocatorBufType>* allocator = nullptr;
std::optional<allocators::CountingAllocator<PoolAllocatorBufType>> alloc {};
};
inline void* PoolAllocator::allocate(uint64_t size)

View File

@ -25,6 +25,7 @@
#include <memory>
#include <boost/shared_ptr.hpp>
#include "poolallocator.h"
#include "resourcemanager.h"
#undef min
#undef max
@ -61,6 +62,7 @@ class STLPoolAllocator
};
STLPoolAllocator() throw();
STLPoolAllocator(joblist::ResourceManager* rm);
STLPoolAllocator(const STLPoolAllocator&) throw();
STLPoolAllocator(uint32_t capacity) throw();
template <class U>
@ -94,6 +96,20 @@ STLPoolAllocator<T>::STLPoolAllocator() throw()
pa.reset(new PoolAllocator(DEFAULT_SIZE));
}
template <class T>
STLPoolAllocator<T>::STLPoolAllocator(joblist::ResourceManager* rm)
{
if (rm)
{
auto alloc = rm->getAllocator<PoolAllocatorBufType>();
pa.reset(new PoolAllocator(alloc));
}
else
{
pa.reset(new PoolAllocator(DEFAULT_SIZE));
}
}
template <class T>
STLPoolAllocator<T>::STLPoolAllocator(const STLPoolAllocator<T>& s) throw()
{

View File

@ -24,6 +24,7 @@
#include "hasher.h"
#include "lbidlist.h"
#include "resourcemanager.h"
#include "spinlock.h"
#include "vlarray.h"
#include "threadnaming.h"
@ -36,10 +37,17 @@ using namespace joblist;
namespace joiner
{
// TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
// uint32_t smallJoinColumn, uint32_t largeJoinColumn, JoinType jt,
// threadpool::ThreadPool* jsThreadPool)
// : TupleJoiner(smallInput, largeInput, smallJoinColumn, largeJoinColumn, jt, jsThreadPool, nullptr)
// {
// }
// Typed joiner ctor
TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
uint32_t smallJoinColumn, uint32_t largeJoinColumn, JoinType jt,
threadpool::ThreadPool* jsThreadPool, const uint64_t numCores)
threadpool::ThreadPool* jsThreadPool, joblist::ResourceManager* rm, const uint64_t numCores)
: smallRG(smallInput)
, largeRG(largeInput)
, joinAlg(INSERTING)
@ -64,7 +72,7 @@ TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::R
_pool.reset(new boost::shared_ptr<PoolAllocator>[bucketCount]);
for (i = 0; i < bucketCount; i++)
{
STLPoolAllocator<pair<const long double, Row::Pointer>> alloc;
STLPoolAllocator<pair<const long double, Row::Pointer>> alloc(resourceManager_);
_pool[i] = alloc.getPoolAllocator();
ld[i].reset(new ldhash_t(10, hasher(), ldhash_t::key_equal(), alloc));
}
@ -75,7 +83,7 @@ TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::R
_pool.reset(new boost::shared_ptr<PoolAllocator>[bucketCount]);
for (i = 0; i < bucketCount; i++)
{
STLPoolAllocator<pair<const int64_t, Row::Pointer>> alloc;
STLPoolAllocator<pair<const int64_t, Row::Pointer>> alloc(resourceManager_);
_pool[i] = alloc.getPoolAllocator();
sth[i].reset(new sthash_t(10, hasher(), sthash_t::key_equal(), alloc));
}
@ -86,7 +94,7 @@ TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::R
_pool.reset(new boost::shared_ptr<PoolAllocator>[bucketCount]);
for (i = 0; i < bucketCount; i++)
{
STLPoolAllocator<pair<const int64_t, uint8_t*>> alloc;
STLPoolAllocator<pair<const int64_t, uint8_t*>> alloc(resourceManager_);
_pool[i] = alloc.getPoolAllocator();
h[i].reset(new hash_t(10, hasher(), hash_t::key_equal(), alloc));
}
@ -143,10 +151,17 @@ TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::R
nullValueForJoinColumn = smallNullRow.getSignedNullValue(smallJoinColumn);
}
// TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
// const vector<uint32_t>& smallJoinColumns, const vector<uint32_t>& largeJoinColumns,
// JoinType jt, threadpool::ThreadPool* jsThreadPool)
// : TupleJoiner(smallInput, largeInput, smallJoinColumns, largeJoinColumns, jt, jsThreadPool, nullptr)
// {
// }
// Typeless joiner ctor
TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
const vector<uint32_t>& smallJoinColumns, const vector<uint32_t>& largeJoinColumns,
JoinType jt, threadpool::ThreadPool* jsThreadPool, const uint64_t numCores)
JoinType jt, threadpool::ThreadPool* jsThreadPool, joblist::ResourceManager* rm, const uint64_t numCores)
: smallRG(smallInput)
, largeRG(largeInput)
, joinAlg(INSERTING)
@ -170,7 +185,7 @@ TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::R
ht.reset(new boost::scoped_ptr<typelesshash_t>[bucketCount]);
for (i = 0; i < bucketCount; i++)
{
STLPoolAllocator<pair<const TypelessData, Row::Pointer>> alloc;
STLPoolAllocator<pair<const TypelessData, Row::Pointer>> alloc(resourceManager_);
_pool[i] = alloc.getPoolAllocator();
ht[i].reset(new typelesshash_t(10, hasher(), typelesshash_t::key_equal(), alloc));
}
@ -226,7 +241,10 @@ TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::R
// TODO: make it explicit to avoid future confusion.
storedKeyAlloc.reset(new FixedAllocator[numCores]);
for (i = 0; i < (uint)numCores; i++)
storedKeyAlloc[i].setAllocSize(keyLength);
{
auto alloc = resourceManager_->getAllocator<utils::FixedAllocatorBufType>();
storedKeyAlloc[i] = FixedAllocator(alloc, keyLength);
}
}
TupleJoiner::TupleJoiner()
@ -856,7 +874,11 @@ void TupleJoiner::setInUM()
tmpKeyAlloc.reset(new FixedAllocator[threadCount]);
for (i = 0; i < threadCount; i++)
tmpKeyAlloc[i] = FixedAllocator(keyLength, true);
{
auto alloc = resourceManager_->getAllocator<utils::FixedAllocatorBufType>();
tmpKeyAlloc[i] = FixedAllocator(alloc, keyLength, true);
}
}
}
@ -911,7 +933,10 @@ void TupleJoiner::setInUM(vector<RGData>& rgs)
tmpKeyAlloc.reset(new FixedAllocator[threadCount]);
for (i = 0; i < threadCount; i++)
tmpKeyAlloc[i] = FixedAllocator(keyLength, true);
{
auto alloc = resourceManager_->getAllocator<utils::FixedAllocatorBufType>();
tmpKeyAlloc[i] = FixedAllocator(alloc, keyLength, true);
}
}
}
@ -967,7 +992,10 @@ void TupleJoiner::setThreadCount(uint32_t cnt)
tmpKeyAlloc.reset(new FixedAllocator[threadCount]);
for (uint32_t i = 0; i < threadCount; i++)
tmpKeyAlloc[i] = FixedAllocator(keyLength, true);
{
auto alloc = resourceManager_->getAllocator<utils::FixedAllocatorBufType>();
tmpKeyAlloc[i] = FixedAllocator(alloc, keyLength, true);
}
}
if (fe)
@ -1839,6 +1867,7 @@ std::shared_ptr<TupleJoiner> TupleJoiner::copyForDiskJoin()
ret->discreteValues.reset(new bool[smallKeyColumns.size()]);
ret->cpValues.reset(new vector<int128_t>[smallKeyColumns.size()]);
ret->resourceManager_ = resourceManager_;
for (uint32_t i = 0; i < smallKeyColumns.size(); i++)
{
@ -1877,7 +1906,10 @@ std::shared_ptr<TupleJoiner> TupleJoiner::copyForDiskJoin()
{
ret->storedKeyAlloc.reset(new FixedAllocator[numCores]);
for (int i = 0; i < numCores; i++)
ret->storedKeyAlloc[i].setAllocSize(keyLength);
{
auto alloc = resourceManager_->getAllocator<utils::FixedAllocatorBufType>();
storedKeyAlloc[i] = FixedAllocator(alloc, keyLength);
}
}
ret->numCores = numCores;

View File

@ -26,6 +26,7 @@
#include <boost/scoped_array.hpp>
#include <unordered_map>
#include "resourcemanager.h"
#include "rowgroup.h"
#include "joiner.h"
#include "fixedallocator.h"
@ -266,14 +267,22 @@ class TupleJoiner
};
/* ctor to use for numeric join */
// TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
// uint32_t smallJoinColumn, uint32_t largeJoinColumn, joblist::JoinType jt,
// threadpool::ThreadPool* jsThreadPool);
TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
uint32_t smallJoinColumn, uint32_t largeJoinColumn, joblist::JoinType jt,
threadpool::ThreadPool* jsThreadPool, const uint64_t numCores);
threadpool::ThreadPool* jsThreadPool, joblist::ResourceManager* rm, const uint64_t numCores);
/* ctor to use for string & compound join */
TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
// TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
// const std::vector<uint32_t>& smallJoinColumns, const std::vector<uint32_t>& largeJoinColumns,
// joblist::JoinType jt, threadpool::ThreadPool* jsThreadPool);
TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
const std::vector<uint32_t>& smallJoinColumns, const std::vector<uint32_t>& largeJoinColumns,
joblist::JoinType jt, threadpool::ThreadPool* jsThreadPool, const uint64_t numCores);
joblist::JoinType jt, threadpool::ThreadPool* jsThreadPool, joblist::ResourceManager* rm, const uint64_t numCores);
~TupleJoiner();
@ -562,6 +571,8 @@ class TupleJoiner
void bucketsToTables(buckets_t*, hash_table_t*);
bool _convertToDiskJoin;
joblist::ResourceManager* resourceManager_ = nullptr;
};
} // namespace joiner