1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-08-01 06:46:55 +03:00

clang format apply

This commit is contained in:
Leonid Fedorov
2022-01-21 16:43:49 +00:00
parent 6b6411229f
commit 04752ec546
1376 changed files with 393460 additions and 412662 deletions

View File

@ -16,9 +16,9 @@
MA 02110-1301, USA. */
/******************************************************************************************
* $Id$
*
******************************************************************************************/
* $Id$
*
******************************************************************************************/
/* This allocator is an attempt to consolidate small allocations and
deallocations to boost performance and reduce mem fragmentation. */
@ -34,100 +34,105 @@
namespace utils
{
class PoolAllocator
{
public:
static const unsigned DEFAULT_WINDOW_SIZE = 4096 * 40; // should be an integral # of pages
public:
static const unsigned DEFAULT_WINDOW_SIZE = 4096 * 40; // should be an integral # of pages
explicit PoolAllocator(unsigned windowSize = DEFAULT_WINDOW_SIZE, bool isTmpSpace = false, bool _useLock = false) :
allocSize(windowSize),
tmpSpace(isTmpSpace),
capacityRemaining(0),
memUsage(0),
nextAlloc(0),
useLock(_useLock),
lock(false) { }
PoolAllocator(const PoolAllocator& p) :
allocSize(p.allocSize),
tmpSpace(p.tmpSpace),
capacityRemaining(0),
memUsage(0),
nextAlloc(0),
useLock(p.useLock),
lock(false) { }
virtual ~PoolAllocator() {}
explicit PoolAllocator(unsigned windowSize = DEFAULT_WINDOW_SIZE, bool isTmpSpace = false,
bool _useLock = false)
: allocSize(windowSize)
, tmpSpace(isTmpSpace)
, capacityRemaining(0)
, memUsage(0)
, nextAlloc(0)
, useLock(_useLock)
, lock(false)
{
}
PoolAllocator(const PoolAllocator& p)
: allocSize(p.allocSize)
, tmpSpace(p.tmpSpace)
, capacityRemaining(0)
, memUsage(0)
, nextAlloc(0)
, useLock(p.useLock)
, lock(false)
{
}
virtual ~PoolAllocator()
{
}
PoolAllocator& operator=(const PoolAllocator&);
PoolAllocator& operator=(const PoolAllocator&);
void* allocate(uint64_t size);
void deallocate(void* p);
void deallocateAll();
void* allocate(uint64_t size);
void deallocate(void* p);
void deallocateAll();
inline uint64_t getMemUsage() const
{
return memUsage;
}
unsigned getWindowSize() const
{
return allocSize;
}
inline uint64_t getMemUsage() const
{
return memUsage;
}
unsigned getWindowSize() const
{
return allocSize;
}
void setUseLock(bool ul)
{
useLock = ul;
}
void setUseLock(bool ul)
{
useLock = ul;
}
private:
void newBlock();
void *allocOOB(uint64_t size);
private:
void newBlock();
void* allocOOB(uint64_t size);
unsigned allocSize;
std::vector<boost::shared_array<uint8_t> > mem;
bool tmpSpace;
unsigned capacityRemaining;
uint64_t memUsage;
uint8_t* nextAlloc;
bool useLock;
std::atomic<bool> lock;
unsigned allocSize;
std::vector<boost::shared_array<uint8_t> > mem;
bool tmpSpace;
unsigned capacityRemaining;
uint64_t memUsage;
uint8_t* nextAlloc;
bool useLock;
std::atomic<bool> lock;
struct OOBMemInfo
{
boost::shared_array<uint8_t> mem;
uint64_t size;
};
typedef std::map<void*, OOBMemInfo> OutOfBandMap;
OutOfBandMap oob; // for mem chunks bigger than the window size; these can be dealloc'd
struct OOBMemInfo
{
boost::shared_array<uint8_t> mem;
uint64_t size;
};
typedef std::map<void*, OOBMemInfo> OutOfBandMap;
OutOfBandMap oob; // for mem chunks bigger than the window size; these can be dealloc'd
};
inline void* PoolAllocator::allocate(uint64_t size)
{
void *ret;
bool _false = false;
void* ret;
bool _false = false;
if (useLock)
while (!lock.compare_exchange_weak(_false, true, std::memory_order_acquire))
_false = false;
if (size > allocSize)
{
ret = allocOOB(size);
if (useLock)
while (!lock.compare_exchange_weak(_false, true, std::memory_order_acquire))
_false = false;
if (size > allocSize)
{
ret = allocOOB(size);
if (useLock)
lock.store(false, std::memory_order_release);
return ret;
}
if (size > capacityRemaining)
newBlock();
ret = (void*) nextAlloc;
nextAlloc += size;
capacityRemaining -= size;
memUsage += size;
if (useLock)
lock.store(false, std::memory_order_release);
lock.store(false, std::memory_order_release);
return ret;
}
if (size > capacityRemaining)
newBlock();
ret = (void*)nextAlloc;
nextAlloc += size;
capacityRemaining -= size;
memUsage += size;
if (useLock)
lock.store(false, std::memory_order_release);
return ret;
}
}
} // namespace utils