1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-08-08 14:22:09 +03:00

Squash merge of the threaded UM hash table construction feature.

Conflicts:
	oam/etc/Columnstore.xml.singleserver
This commit is contained in:
Patrick LeBlanc
2019-11-21 14:41:00 -05:00
parent 8c2cef3727
commit 0d26dc447c
11 changed files with 837 additions and 375 deletions

View File

@@ -50,6 +50,9 @@ FixedAllocator::FixedAllocator(const FixedAllocator& f)
tmpSpace = f.tmpSpace;
capacityRemaining = 0;
currentlyStored = 0;
useLock = f.useLock;
lock = false;
}
FixedAllocator& FixedAllocator::operator=(const FixedAllocator& f)
@@ -57,10 +60,22 @@ FixedAllocator& FixedAllocator::operator=(const FixedAllocator& f)
elementCount = f.elementCount;
elementSize = f.elementSize;
tmpSpace = f.tmpSpace;
useLock = f.useLock;
lock = false;
deallocateAll();
return *this;
}
void FixedAllocator::setUseLock(bool useIt)
{
useLock = useIt;
}
void FixedAllocator::setAllocSize(uint allocSize)
{
elementSize = allocSize;
}
void FixedAllocator::newBlock()
{
shared_array<uint8_t> next;
@@ -80,39 +95,15 @@ void FixedAllocator::newBlock()
}
}
void* FixedAllocator::allocate()
{
void* ret;
if (capacityRemaining < elementSize)
newBlock();
ret = nextAlloc;
nextAlloc += elementSize;
capacityRemaining -= elementSize;
currentlyStored += elementSize;
return ret;
}
void* FixedAllocator::allocate(uint32_t len)
{
void* ret;
if (capacityRemaining < len)
newBlock();
ret = nextAlloc;
nextAlloc += len;
capacityRemaining -= len;
currentlyStored += len;
return ret;
}
void FixedAllocator::truncateBy(uint32_t amt)
{
if (useLock)
getSpinlock(lock);
nextAlloc -= amt;
capacityRemaining += amt;
currentlyStored -= amt;
if (useLock)
releaseSpinlock(lock);
}
void FixedAllocator::deallocateAll()

View File

@@ -38,6 +38,8 @@
#include <vector>
#include <limits>
#include <unistd.h>
#include <atomic>
#include "spinlock.h"
#if defined(_MSC_VER) && defined(xxxFIXEDALLOCATOR_DLLEXPORT)
#define EXPORT __declspec(dllexport)
@@ -55,11 +57,13 @@ public:
EXPORT FixedAllocator() :
capacityRemaining(0),
elementCount(std::numeric_limits<unsigned long>::max()),
elementCount(DEFAULT_NUM_ELEMENTS),
elementSize(0),
currentlyStored(0),
tmpSpace(false),
nextAlloc(0) {}
nextAlloc(0),
useLock(false),
lock(false) {}
EXPORT explicit FixedAllocator(unsigned long allocSize, bool isTmpSpace = false,
unsigned long numElements = DEFAULT_NUM_ELEMENTS) :
capacityRemaining(0),
@@ -67,7 +71,9 @@ public:
elementSize(allocSize),
currentlyStored(0),
tmpSpace(isTmpSpace),
nextAlloc(0) {}
nextAlloc(0),
useLock(false),
lock(false) {}
EXPORT FixedAllocator(const FixedAllocator&);
EXPORT FixedAllocator& operator=(const FixedAllocator&);
virtual ~FixedAllocator() {}
@@ -78,6 +84,8 @@ public:
void deallocate() { } // does nothing
EXPORT void deallocateAll(); // drops all memory in use
EXPORT uint64_t getMemUsage() const;
void setUseLock(bool);
void setAllocSize(uint);
private:
void newBlock();
@@ -89,10 +97,46 @@ private:
uint64_t currentlyStored;
bool tmpSpace;
uint8_t* nextAlloc;
bool useLock;
std::atomic<bool> lock;
};
inline void* FixedAllocator::allocate()
{
void* ret;
if (useLock)
getSpinlock(lock);
if (capacityRemaining < elementSize)
newBlock();
ret = nextAlloc;
nextAlloc += elementSize;
capacityRemaining -= elementSize;
currentlyStored += elementSize;
if (useLock)
releaseSpinlock(lock);
return ret;
}
inline void* FixedAllocator::allocate(uint32_t len)
{
void* ret;
if (useLock)
getSpinlock(lock);
if (capacityRemaining < len)
newBlock();
ret = nextAlloc;
nextAlloc += len;
capacityRemaining -= len;
currentlyStored += len;
if (useLock)
releaseSpinlock(lock);
return ret;
}
#undef EXPORT
} // namespace
#endif

27
utils/common/spinlock.h Normal file
View File

@@ -0,0 +1,27 @@
#pragma once
#include <atomic>
namespace utils
{
inline void getSpinlock(std::atomic<bool> &lock)
{
bool _false = false;
while (!lock.compare_exchange_weak(_false, true, std::memory_order_acquire))
_false = false;
}
inline bool trySpinlock(std::atomic<bool> &lock)
{
bool _false = false;
bool ret = lock.compare_exchange_weak(_false, true, std::memory_order_acquire);
return ret;
}
inline void releaseSpinlock(std::atomic<bool> &lock)
{
lock.store(false, std::memory_order_release);
}
}