You've already forked mariadb-columnstore-engine
mirror of
https://github.com/mariadb-corporation/mariadb-columnstore-engine.git
synced 2025-07-29 08:21:15 +03:00
feat(): propagated changes into SLTPoolAllocator and friends
This commit is contained in:
@ -54,7 +54,7 @@ using namespace joiner;
|
||||
|
||||
namespace joblist
|
||||
{
|
||||
BatchPrimitiveProcessorJL::BatchPrimitiveProcessorJL(const ResourceManager* rm)
|
||||
BatchPrimitiveProcessorJL::BatchPrimitiveProcessorJL(ResourceManager* rm)
|
||||
: ot(BPS_ELEMENT_TYPE)
|
||||
, needToSetLBID(true)
|
||||
, count(1)
|
||||
@ -80,6 +80,7 @@ BatchPrimitiveProcessorJL::BatchPrimitiveProcessorJL(const ResourceManager* rm)
|
||||
, fJoinerChunkSize(rm->getJlJoinerChunkSize())
|
||||
, hasSmallOuterJoin(false)
|
||||
, _priority(1)
|
||||
, rm_(rm)
|
||||
{
|
||||
PMJoinerCount = 0;
|
||||
uuid = bu::nil_generator()();
|
||||
@ -1481,7 +1482,8 @@ bool BatchPrimitiveProcessorJL::nextTupleJoinerMsg(ByteStream& bs)
|
||||
|
||||
if (tJoiners[joinerNum]->isTypelessJoin())
|
||||
{
|
||||
utils::FixedAllocator fa(tlKeyLens[joinerNum], true);
|
||||
auto alloc = rm_->getAllocator<utils::FixedAllocatorBufType>();
|
||||
utils::FixedAllocator fa(alloc, tlKeyLens[joinerNum], true);
|
||||
|
||||
for (i = pos; i < pos + toSend; i++)
|
||||
{
|
||||
|
@ -59,7 +59,7 @@ class BatchPrimitiveProcessorJL
|
||||
{
|
||||
public:
|
||||
/* Constructor used by the JobStep */
|
||||
explicit BatchPrimitiveProcessorJL(const ResourceManager* rm);
|
||||
explicit BatchPrimitiveProcessorJL(ResourceManager* rm);
|
||||
~BatchPrimitiveProcessorJL();
|
||||
|
||||
/* Interface used by the JobStep */
|
||||
@ -384,6 +384,8 @@ class BatchPrimitiveProcessorJL
|
||||
|
||||
boost::uuids::uuid uuid;
|
||||
|
||||
joblist::ResourceManager* rm_ = nullptr;
|
||||
|
||||
friend class CommandJL;
|
||||
friend class ColumnCommandJL;
|
||||
friend class PassThruCommandJL;
|
||||
|
@ -277,22 +277,21 @@ void TupleHashJoinStep::startSmallRunners(uint index)
|
||||
|
||||
if (typelessJoin[index])
|
||||
{
|
||||
joiner.reset(new TupleJoiner(smallRGs[index], largeRG, smallSideKeys[index], largeSideKeys[index], jt,
|
||||
&jobstepThreadPool, numCores));
|
||||
joiners[index].reset(new TupleJoiner(smallRGs[index], largeRG, smallSideKeys[index], largeSideKeys[index],
|
||||
jt, &jobstepThreadPool, resourceManager, numCores));
|
||||
}
|
||||
else
|
||||
{
|
||||
joiner.reset(new TupleJoiner(smallRGs[index], largeRG, smallSideKeys[index][0], largeSideKeys[index][0],
|
||||
jt, &jobstepThreadPool, numCores));
|
||||
joiners[index].reset(new TupleJoiner(smallRGs[index], largeRG, smallSideKeys[index][0], largeSideKeys[index][0],
|
||||
jt, &jobstepThreadPool, resourceManager, numCores));
|
||||
}
|
||||
|
||||
joiner->setUniqueLimit(uniqueLimit);
|
||||
joiner->setTableName(smallTableNames[index]);
|
||||
joiners[index] = joiner;
|
||||
joiners[index]->setUniqueLimit(uniqueLimit);
|
||||
joiners[index]->setTableName(smallTableNames[index]);
|
||||
|
||||
/* check for join types unsupported on the PM. */
|
||||
if (!largeBPS || !isExeMgr)
|
||||
joiner->setInUM(rgData[index]);
|
||||
joiners[index]->setInUM(rgData[index]);
|
||||
|
||||
/*
|
||||
start the small runners
|
||||
@ -306,7 +305,7 @@ void TupleHashJoinStep::startSmallRunners(uint index)
|
||||
uint64_t memMonitor = jobstepThreadPool.invoke([this, index] { this->trackMem(index); });
|
||||
// starting 1 thread when in PM mode, since it's only inserting into a
|
||||
// vector of rows. The rest will be started when converted to UM mode.
|
||||
if (joiner->inUM())
|
||||
if (joiners[index]->inUM())
|
||||
{
|
||||
for (int i = 0; i < numCores; i++)
|
||||
{
|
||||
@ -320,7 +319,7 @@ void TupleHashJoinStep::startSmallRunners(uint index)
|
||||
|
||||
// wait for the first thread to join, then decide whether the others exist and need joining
|
||||
jobstepThreadPool.join(jobs[0]);
|
||||
if (joiner->inUM())
|
||||
if (joiners[index]->inUM())
|
||||
{
|
||||
for (int i = 1; i < numCores; i++)
|
||||
{
|
||||
@ -352,7 +351,7 @@ void TupleHashJoinStep::startSmallRunners(uint index)
|
||||
end_time = boost::posix_time::microsec_clock::universal_time();
|
||||
if (!(fSessionId & 0x80000000))
|
||||
cout << "hash table construction time = " << end_time - start_time <<
|
||||
" size = " << joiner->size() << endl;
|
||||
" size = " << joiners[index]->size() << endl;
|
||||
*/
|
||||
|
||||
if (traceOn())
|
||||
@ -361,13 +360,13 @@ void TupleHashJoinStep::startSmallRunners(uint index)
|
||||
}
|
||||
|
||||
ostringstream oss;
|
||||
if (!joiner->onDisk())
|
||||
if (!joiners[index]->onDisk())
|
||||
{
|
||||
// add extended info, and if not aborted then tell joiner
|
||||
// we're done reading the small side.
|
||||
if (traceOn())
|
||||
{
|
||||
if (joiner->inPM())
|
||||
if (joiners[index]->inPM())
|
||||
{
|
||||
{
|
||||
oss << "PM join (" << index << ")" << endl;
|
||||
@ -377,7 +376,7 @@ void TupleHashJoinStep::startSmallRunners(uint index)
|
||||
extendedInfo += oss.str();
|
||||
}
|
||||
}
|
||||
else if (joiner->inUM())
|
||||
else if (joiners[index]->inUM())
|
||||
{
|
||||
oss << "UM join (" << index << ")" << endl;
|
||||
#ifdef JLF_DEBUG
|
||||
@ -387,7 +386,7 @@ void TupleHashJoinStep::startSmallRunners(uint index)
|
||||
}
|
||||
}
|
||||
if (!cancelled())
|
||||
joiner->doneInserting();
|
||||
joiners[index]->doneInserting();
|
||||
}
|
||||
|
||||
if (traceOn())
|
||||
|
@ -5,54 +5,54 @@ create table t1 (a int, b int) engine=columnstore;
|
||||
create table t2 (a int, b int) engine=columnstore;
|
||||
insert into t1 values (1, 2), (1, 3), (1, 4), (2, 5), (2, 6), (2, 7);
|
||||
insert into t2 values (1, 2), (1, 2), (1, 4), (2, 5), (2, 6), (2, 8);
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b = (select max(b) from t2 where t1.a = t2.a) order by t2.b;
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b = (select max(b) from t2 where t1.a = t2.a);
|
||||
a b a b
|
||||
1 4 1 4
|
||||
1 2 1 4
|
||||
1 3 1 4
|
||||
2 7 2 8
|
||||
1 4 1 4
|
||||
2 5 2 8
|
||||
2 6 2 8
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b < (select max(b) from t2 where t1.a = t2.a) order by t2.b;
|
||||
2 7 2 8
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b < (select max(b) from t2 where t1.a = t2.a);
|
||||
a b a b
|
||||
1 3 1 2
|
||||
1 4 1 2
|
||||
1 2 1 2
|
||||
1 4 1 2
|
||||
1 2 1 2
|
||||
1 3 1 2
|
||||
2 6 2 5
|
||||
1 3 1 2
|
||||
1 4 1 2
|
||||
1 4 1 2
|
||||
2 5 2 5
|
||||
2 7 2 5
|
||||
2 5 2 6
|
||||
2 6 2 6
|
||||
2 7 2 6
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b > (select max(b) from t2 where t1.a = t2.a) order by t2.b;
|
||||
a b a b
|
||||
select * from t1, t2 where t1.a = t2.a and t1.b = (select avg(t2.b) from t2 where t1.a = t2.a group by t2.a) order by t2.b;
|
||||
a b a b
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b < (select avg(t2.b) from t2 where t1.a = t2.a group by t2.a) order by t2.b;
|
||||
a b a b
|
||||
1 3 1 2
|
||||
1 4 1 2
|
||||
1 2 1 2
|
||||
1 4 1 2
|
||||
1 2 1 2
|
||||
1 3 1 2
|
||||
2 6 2 5
|
||||
2 5 2 5
|
||||
2 7 2 5
|
||||
2 5 2 6
|
||||
2 6 2 6
|
||||
2 7 2 5
|
||||
2 7 2 6
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b > (select avg(t2.b) from t2 where t1.a = t2.a group by t2.a) order by t2.b;
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b > (select max(b) from t2 where t1.a = t2.a);
|
||||
a b a b
|
||||
select * from t1, t2 where t1.a = t2.a and t1.b = (select avg(t2.b) from t2 where t1.a = t2.a group by t2.a);
|
||||
a b a b
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b < (select avg(t2.b) from t2 where t1.a = t2.a group by t2.a);
|
||||
a b a b
|
||||
1 2 1 2
|
||||
1 2 1 2
|
||||
1 3 1 2
|
||||
1 3 1 2
|
||||
1 4 1 2
|
||||
1 4 1 2
|
||||
2 5 2 5
|
||||
2 5 2 6
|
||||
2 6 2 5
|
||||
2 6 2 6
|
||||
2 7 2 5
|
||||
2 7 2 6
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b > (select avg(t2.b) from t2 where t1.a = t2.a group by t2.a);
|
||||
a b a b
|
||||
1 4 1 4
|
||||
1 2 1 4
|
||||
1 3 1 4
|
||||
2 7 2 8
|
||||
1 4 1 4
|
||||
2 5 2 8
|
||||
2 6 2 8
|
||||
2 7 2 8
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
DROP DATABASE mcol5195;
|
||||
|
@ -112,7 +112,7 @@ a
|
||||
1
|
||||
2
|
||||
3
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a in (select b from cs2) order by 1,2,3;
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a in (select b from cs2);
|
||||
a b c
|
||||
1 1 100
|
||||
1 1 101
|
||||
@ -124,7 +124,7 @@ select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a in (select b from cs2) and c
|
||||
a b c
|
||||
1 1 100
|
||||
1 1 101
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a in (select t1.b from cs2 t1 join cs2 t2 on t1.b=t2.b and t1.b=1) order by 1,2,3;
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a in (select t1.b from cs2 t1 join cs2 t2 on t1.b=t2.b and t1.b=1);
|
||||
a b c
|
||||
1 1 100
|
||||
1 1 101
|
||||
@ -193,7 +193,7 @@ select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a not in (select b from cs2);
|
||||
a b c
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a not in (select b from cs2) and cs1.a=1;
|
||||
a b c
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a not in (select t1.b from cs2 t1 join cs2 t2 on t1.b=t2.b and t1.b=1) order by 1,2,3;
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a not in (select t1.b from cs2 t1 join cs2 t2 on t1.b=t2.b and t1.b=1);
|
||||
a b c
|
||||
2 2 200
|
||||
3 3 300
|
||||
@ -249,7 +249,7 @@ select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a not in (select b from cs2 wh
|
||||
a b c
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a not in (select b from cs2 where b is not null) and cs1.a=1;
|
||||
a b c
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a not in (select t1.b from (select b from cs2 where b is not null) t1 join cs2 t2 on t1.b=t2.b and t1.b=1) order by 1,2,3;
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a not in (select t1.b from (select b from cs2 where b is not null) t1 join cs2 t2 on t1.b=t2.b and t1.b=1);
|
||||
a b c
|
||||
2 2 200
|
||||
3 3 300
|
||||
@ -348,21 +348,21 @@ select * from cs1 where (a,d) in (select t1.b,t1.c from cs2 t1 join cs2 t2 on t1
|
||||
a d
|
||||
1 100
|
||||
3 302
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and (cs1.a,cs1.d) in (select b,c from cs2) order by 1,2,3,4;
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and (cs1.a,cs1.d) in (select b,c from cs2);
|
||||
a d b c
|
||||
1 100 1 100
|
||||
1 100 1 101
|
||||
3 302 3 300
|
||||
3 302 3 301
|
||||
3 302 3 302
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and (cs1.a,cs1.d) in (select b,c from cs2) and cs1.a=1 order by 1,2,3,4;
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and (cs1.a,cs1.d) in (select b,c from cs2) and cs1.a=1;
|
||||
a d b c
|
||||
1 100 1 100
|
||||
1 100 1 101
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and (cs1.a,cs1.d) in (select t1.b,t1.c from cs2 t1 join cs2 t2 on t1.b=t2.b and t1.b=1);
|
||||
a d b c
|
||||
1 100 1 100
|
||||
1 100 1 101
|
||||
1 100 1 100
|
||||
drop table cs1;
|
||||
create table cs1 (a int);
|
||||
insert into cs1 values (1), (2), (3), (4), (null);
|
||||
|
@ -15,13 +15,19 @@ create table t2 (a int, b int) engine=columnstore;
|
||||
insert into t1 values (1, 2), (1, 3), (1, 4), (2, 5), (2, 6), (2, 7);
|
||||
insert into t2 values (1, 2), (1, 2), (1, 4), (2, 5), (2, 6), (2, 8);
|
||||
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b = (select max(b) from t2 where t1.a = t2.a) order by t2.b;
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b < (select max(b) from t2 where t1.a = t2.a) order by t2.b;
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b > (select max(b) from t2 where t1.a = t2.a) order by t2.b;
|
||||
--sorted_result
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b = (select max(b) from t2 where t1.a = t2.a);
|
||||
--sorted_result
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b < (select max(b) from t2 where t1.a = t2.a);
|
||||
--sorted_result
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b > (select max(b) from t2 where t1.a = t2.a);
|
||||
|
||||
select * from t1, t2 where t1.a = t2.a and t1.b = (select avg(t2.b) from t2 where t1.a = t2.a group by t2.a) order by t2.b;
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b < (select avg(t2.b) from t2 where t1.a = t2.a group by t2.a) order by t2.b;
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b > (select avg(t2.b) from t2 where t1.a = t2.a group by t2.a) order by t2.b;
|
||||
--sorted_result
|
||||
select * from t1, t2 where t1.a = t2.a and t1.b = (select avg(t2.b) from t2 where t1.a = t2.a group by t2.a);
|
||||
--sorted_result
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b < (select avg(t2.b) from t2 where t1.a = t2.a group by t2.a);
|
||||
--sorted_result
|
||||
select * from t1, t2 where t1.a = t2.a and t2.b > (select avg(t2.b) from t2 where t1.a = t2.a group by t2.a);
|
||||
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
|
@ -69,11 +69,13 @@ select * from cs1 where a in (select t1.b from cs2 t1, cs2 t2 where t1.b=t2.b an
|
||||
select * from cs1 where a in (select t1.b from cs2 t1 join cs2 t2 on t1.b=t2.b and t1.c=t2.c);
|
||||
|
||||
### Outer query containing joins
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a in (select b from cs2) order by 1,2,3;
|
||||
--sorted_result
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a in (select b from cs2);
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a in (select b from cs2) and cs1.a=1;
|
||||
|
||||
### Both IN subquery and outer queries containing joins
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a in (select t1.b from cs2 t1 join cs2 t2 on t1.b=t2.b and t1.b=1) order by 1,2,3;
|
||||
--sorted_result
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a in (select t1.b from cs2 t1 join cs2 t2 on t1.b=t2.b and t1.b=1);
|
||||
|
||||
## NOT IN subquery
|
||||
### Basic tests
|
||||
@ -120,7 +122,8 @@ select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a not in (select b from cs2);
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a not in (select b from cs2) and cs1.a=1;
|
||||
|
||||
### Both IN subquery and outer queries containing joins
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a not in (select t1.b from cs2 t1 join cs2 t2 on t1.b=t2.b and t1.b=1) order by 1,2,3;
|
||||
--sorted_result
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a not in (select t1.b from cs2 t1 join cs2 t2 on t1.b=t2.b and t1.b=1);
|
||||
|
||||
## NOT IN subquery without NULLs
|
||||
### Basic tests
|
||||
@ -158,7 +161,8 @@ select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a not in (select b from cs2 wh
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a not in (select b from cs2 where b is not null) and cs1.a=1;
|
||||
|
||||
### Both IN subquery and outer queries containing joins
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a not in (select t1.b from (select b from cs2 where b is not null) t1 join cs2 t2 on t1.b=t2.b and t1.b=1) order by 1,2,3;
|
||||
--sorted_result
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and cs1.a not in (select t1.b from (select b from cs2 where b is not null) t1 join cs2 t2 on t1.b=t2.b and t1.b=1);
|
||||
|
||||
# Special cases involving NULLs
|
||||
select * from cs1 where a in (select b from cs2 where b is null);
|
||||
@ -213,8 +217,10 @@ select * from cs1 where (a,d) in (select t1.b,t1.c from cs2 t1, cs2 t2 where t1.
|
||||
select * from cs1 where (a,d) in (select t1.b,t1.c from cs2 t1 join cs2 t2 on t1.b=t2.b and t1.c=t2.c);
|
||||
|
||||
### Outer query containing joins
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and (cs1.a,cs1.d) in (select b,c from cs2) order by 1,2,3,4;
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and (cs1.a,cs1.d) in (select b,c from cs2) and cs1.a=1 order by 1,2,3,4;
|
||||
--sorted_result
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and (cs1.a,cs1.d) in (select b,c from cs2);
|
||||
--sorted_result
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and (cs1.a,cs1.d) in (select b,c from cs2) and cs1.a=1;
|
||||
|
||||
### Both IN subquery and outer queries containing joins
|
||||
select * from cs1 join cs2 on cs1.a=cs2.b and (cs1.a,cs1.d) in (select t1.b,t1.c from cs2 t1 join cs2 t2 on t1.b=t2.b and t1.b=1);
|
||||
|
@ -331,8 +331,8 @@ void BatchPrimitiveProcessor::initBPP(ByteStream& bs)
|
||||
{
|
||||
// storedKeyAllocators[j].setUseLock(true);
|
||||
// WIP use one copy of the allocator
|
||||
auto allocator = exemgr::globServiceExeMgr->getRm().getAllocator<utils::PoolAllocatorBufType>();
|
||||
storedKeyAllocators.emplace_back(PoolAllocator(&allocator, PoolAllocator::DEFAULT_WINDOW_SIZE, false, true));
|
||||
auto alloc = exemgr::globServiceExeMgr->getRm().getAllocator<utils::PoolAllocatorBufType>();
|
||||
storedKeyAllocators.emplace_back(PoolAllocator(alloc, PoolAllocator::DEFAULT_WINDOW_SIZE, false, true));
|
||||
}
|
||||
|
||||
joinNullValues.reset(new uint64_t[joinerCount]);
|
||||
|
@ -81,6 +81,11 @@ if (WITH_UNITTESTS)
|
||||
target_link_libraries(counting_allocator ${ENGINE_LDFLAGS} ${ENGINE_WRITE_LIBS} ${GTEST_LIBRARIES})
|
||||
gtest_add_tests(TARGET counting_allocator TEST_PREFIX columnstore:)
|
||||
|
||||
add_executable(poolallocator poolallocator.cpp)
|
||||
add_dependencies(poolallocator googletest)
|
||||
target_link_libraries(poolallocator ${ENGINE_LDFLAGS} ${ENGINE_WRITE_LIBS} ${GTEST_LIBRARIES})
|
||||
gtest_add_tests(TARGET poolallocator TEST_PREFIX columnstore:)
|
||||
|
||||
add_executable(comparators_tests comparators-tests.cpp)
|
||||
target_link_libraries(comparators_tests ${ENGINE_LDFLAGS} ${ENGINE_WRITE_LIBS} ${CPPUNIT_LIBRARIES} cppunit)
|
||||
add_test(NAME columnstore:comparators_tests COMMAND comparators_tests)
|
||||
|
287
tests/poolallocator.cpp
Normal file
287
tests/poolallocator.cpp
Normal file
@ -0,0 +1,287 @@
|
||||
/* Copyright (C) 2024 MariaDB Corporation
|
||||
|
||||
This program is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU General Public License
|
||||
as published by the Free Software Foundation; version 2 of
|
||||
the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||
MA 02110-1301, USA. */
|
||||
#include <gtest/gtest.h>
|
||||
#include <sys/types.h>
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
|
||||
#include "countingallocator.h"
|
||||
#include "poolallocator.h"
|
||||
|
||||
using namespace allocators;
|
||||
using namespace utils;
|
||||
|
||||
/**
|
||||
* Тест явного задания windowSize при создании:
|
||||
*/
|
||||
TEST(PoolAllocatorTest, CustomWindowSize)
|
||||
{
|
||||
const unsigned CUSTOM_SIZE = 1024;
|
||||
PoolAllocator pa(CUSTOM_SIZE);
|
||||
EXPECT_EQ(pa.getWindowSize(), CUSTOM_SIZE);
|
||||
EXPECT_EQ(pa.getMemUsage(), 0ULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Тест базового выделения небольшого блока памяти:
|
||||
* - Выделяем блок меньше, чем windowSize.
|
||||
* - Проверяем, что memUsage увеличился на размер выделенного блока.
|
||||
* - Указатель не должен быть равен nullptr.
|
||||
*/
|
||||
TEST(PoolAllocatorTest, AllocateSmallBlock)
|
||||
{
|
||||
PoolAllocator pa;
|
||||
uint64_t initialUsage = pa.getMemUsage();
|
||||
const uint64_t ALLOC_SIZE = 128;
|
||||
|
||||
void* ptr = pa.allocate(ALLOC_SIZE);
|
||||
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
EXPECT_EQ(pa.getMemUsage(), initialUsage + ALLOC_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Тест выделения блока памяти больше, чем windowSize (Out-Of-Band - OOB):
|
||||
* - Проверяем, что memUsage увеличился на нужное количество байт.
|
||||
* - Указатель не nullptr.
|
||||
*/
|
||||
TEST(PoolAllocatorTest, AllocateOOBBlock)
|
||||
{
|
||||
// Выбираем размер гарантированно больше, чем окно по умолчанию
|
||||
const uint64_t BIG_BLOCK_SIZE = PoolAllocator::DEFAULT_WINDOW_SIZE + 1024;
|
||||
PoolAllocator pa;
|
||||
uint64_t initialUsage = pa.getMemUsage();
|
||||
|
||||
void* ptr = pa.allocate(BIG_BLOCK_SIZE);
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
EXPECT_EQ(pa.getMemUsage(), initialUsage + BIG_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Тест деаллокации (deallocate) Out-Of-Band блока:
|
||||
* - Убеждаемся, что после deallocate memUsage возвращается к исходному значению.
|
||||
*/
|
||||
TEST(PoolAllocatorTest, DeallocateOOBBlock)
|
||||
{
|
||||
PoolAllocator pa;
|
||||
// Блок больше windowSize
|
||||
const uint64_t BIG_BLOCK_SIZE = pa.getWindowSize() + 1024;
|
||||
|
||||
uint64_t initialUsage = pa.getMemUsage();
|
||||
void* ptr = pa.allocate(BIG_BLOCK_SIZE);
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
EXPECT_EQ(pa.getMemUsage(), initialUsage + BIG_BLOCK_SIZE);
|
||||
|
||||
pa.deallocate(ptr);
|
||||
EXPECT_EQ(pa.getMemUsage(), initialUsage);
|
||||
}
|
||||
|
||||
/**
|
||||
* Тест деаллокации блока, который был выделен внутри "windowSize".
|
||||
* По текущей логике PoolAllocator::deallocate для "маленьких" блоков ничего не делает.
|
||||
* Основная проверка – что код не падает и не меняет memUsage.
|
||||
*/
|
||||
TEST(PoolAllocatorTest, DeallocateSmallBlock)
|
||||
{
|
||||
PoolAllocator pa;
|
||||
const uint64_t ALLOC_SIZE = 128;
|
||||
|
||||
uint64_t initialUsage = pa.getMemUsage();
|
||||
void* ptr = pa.allocate(ALLOC_SIZE);
|
||||
ASSERT_NE(ptr, nullptr);
|
||||
EXPECT_EQ(pa.getMemUsage(), initialUsage + ALLOC_SIZE);
|
||||
|
||||
// Попытка деаллокации "маленького" блока – в текущей реализации
|
||||
// код его не возвращает в пул, следовательно memUsage не уменьшится.
|
||||
pa.deallocate(ptr);
|
||||
EXPECT_EQ(pa.getMemUsage(), initialUsage + ALLOC_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Тест полного освобождения памяти (deallocateAll):
|
||||
* - Выделяем несколько блоков: и маленький, и большой.
|
||||
* - После вызова deallocateAll всё должно освободиться, memUsage вернётся к 0.
|
||||
*/
|
||||
TEST(PoolAllocatorTest, DeallocateAll)
|
||||
{
|
||||
PoolAllocator pa;
|
||||
// Блок в пределах windowSize
|
||||
const uint64_t SMALL_BLOCK = 256;
|
||||
// Блок Out-Of-Band
|
||||
const uint64_t LARGE_BLOCK = pa.getWindowSize() + 1024;
|
||||
|
||||
pa.allocate(SMALL_BLOCK);
|
||||
pa.allocate(LARGE_BLOCK);
|
||||
// Убедимся, что memUsage > 0
|
||||
EXPECT_GT(pa.getMemUsage(), 0ULL);
|
||||
|
||||
// Освобождаем всё
|
||||
pa.deallocateAll();
|
||||
EXPECT_EQ(pa.getMemUsage(), 0ULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Тест копирующего оператора присваивания:
|
||||
* - Проверяем, что параметры (allocSize, tmpSpace, useLock) копируются.
|
||||
* - Однако выделенная память не копируется (т.к. после operator= вызывается deallocateAll).
|
||||
*/
|
||||
TEST(PoolAllocatorTest, AssignmentOperator)
|
||||
{
|
||||
PoolAllocator pa1(2048, true, true); // windowSize=2048, tmpSpace=true, useLock=true
|
||||
// Выделяем немного памяти
|
||||
pa1.allocate(100);
|
||||
pa1.allocate(200);
|
||||
|
||||
EXPECT_EQ(pa1.getWindowSize(), 2048U);
|
||||
EXPECT_TRUE(pa1.getMemUsage() > 0);
|
||||
|
||||
// С помощью оператора присваивания: pa2 = pa1
|
||||
PoolAllocator pa2;
|
||||
pa2 = pa1; // После этого deallocateAll() вызывается внутри operator= (в нашем коде)
|
||||
|
||||
// Проверяем скопированные поля:
|
||||
EXPECT_EQ(pa2.getWindowSize(), 2048U);
|
||||
// tmpSpace и useLock также должны совпасть
|
||||
// (В данном коде напрямую нет геттеров для них,
|
||||
// но, если нужно, можете добавить соответствующие методы или рефлексировать код.)
|
||||
|
||||
// Проверяем, что у pa2 memUsage == 0 после deallocateAll
|
||||
EXPECT_EQ(pa2.getMemUsage(), 0ULL);
|
||||
// А у pa1 осталась прежняя статистика использования памяти,
|
||||
// т.к. operator= сделал deallocateAll только внутри pa2.
|
||||
EXPECT_TRUE(pa1.getMemUsage() > 0);
|
||||
}
|
||||
|
||||
TEST(PoolAllocatorTest, MultithreadedAllocationWithLock)
|
||||
{
|
||||
PoolAllocator pa(PoolAllocator::DEFAULT_WINDOW_SIZE, false, true);
|
||||
// useLock = true
|
||||
|
||||
const int THREAD_COUNT = 4;
|
||||
const uint64_t ALLOC_PER_THREAD = 1024;
|
||||
std::vector<std::thread> threads;
|
||||
|
||||
// Стартовое значение
|
||||
uint64_t initialUsage = pa.getMemUsage();
|
||||
|
||||
// Запускаем несколько потоков, каждый сделает небольшое кол-во аллокаций
|
||||
for (int i = 0; i < THREAD_COUNT; i++)
|
||||
{
|
||||
threads.emplace_back(
|
||||
[&pa]()
|
||||
{
|
||||
for (int j = 0; j < 10; j++)
|
||||
{
|
||||
pa.allocate(ALLOC_PER_THREAD);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
for (auto& th : threads)
|
||||
th.join();
|
||||
|
||||
uint64_t expected = initialUsage + THREAD_COUNT * 10ULL * ALLOC_PER_THREAD;
|
||||
EXPECT_GE(pa.getMemUsage(), expected);
|
||||
}
|
||||
|
||||
static const constexpr int64_t MemoryAllowance = 10 * 1024 * 1024;
|
||||
|
||||
// Test Fixture for AtomicCounterAllocator
|
||||
class PoolallocatorTest : public ::testing::Test
|
||||
{
|
||||
protected:
|
||||
// Atomic counter to track allocated memory
|
||||
std::atomic<int64_t> allocatedMemory{MemoryAllowance};
|
||||
|
||||
// Custom allocator instance
|
||||
CountingAllocator<PoolAllocatorBufType> allocator;
|
||||
|
||||
// Constructor
|
||||
PoolallocatorTest() : allocatedMemory(MemoryAllowance), allocator(&allocatedMemory, MemoryAllowance / 100)
|
||||
{
|
||||
}
|
||||
|
||||
// Destructor
|
||||
~PoolallocatorTest() override = default;
|
||||
};
|
||||
|
||||
// Тест для проверки учёта потребления памяти в PoolAllocator.
|
||||
TEST_F(PoolallocatorTest, AllocationWithAccounting)
|
||||
{
|
||||
int bufSize = 512;
|
||||
const unsigned CUSTOM_SIZE = 1024;
|
||||
PoolAllocator pa(allocator, CUSTOM_SIZE, false, true);
|
||||
EXPECT_EQ(pa.getWindowSize(), CUSTOM_SIZE);
|
||||
EXPECT_EQ(pa.getMemUsage(), 0ULL);
|
||||
auto* ptr = pa.allocate(bufSize);
|
||||
|
||||
EXPECT_NE(ptr, nullptr);
|
||||
EXPECT_LE(allocatedMemory.load(), MemoryAllowance - bufSize);
|
||||
EXPECT_LE(allocatedMemory.load(), MemoryAllowance - CUSTOM_SIZE);
|
||||
pa.deallocate(ptr);
|
||||
// B/c this PoolAllocator frees memory only when it's destroyed.
|
||||
EXPECT_LE(allocatedMemory.load(), MemoryAllowance - bufSize);
|
||||
EXPECT_LE(allocatedMemory.load(), MemoryAllowance - CUSTOM_SIZE);
|
||||
|
||||
bufSize = 64536;
|
||||
auto* ptr1 = pa.allocate(bufSize);
|
||||
|
||||
EXPECT_NE(ptr1, nullptr);
|
||||
EXPECT_LE(allocatedMemory.load(), MemoryAllowance - bufSize);
|
||||
|
||||
pa.deallocate(ptr1);
|
||||
EXPECT_LE(allocatedMemory.load(), MemoryAllowance - CUSTOM_SIZE);
|
||||
EXPECT_GE(allocatedMemory.load(), MemoryAllowance - bufSize);
|
||||
}
|
||||
|
||||
TEST_F(PoolallocatorTest, MultithreadedAccountedAllocationWithLock)
|
||||
{
|
||||
const unsigned CUSTOM_SIZE = 1024;
|
||||
PoolAllocator pa(allocator, CUSTOM_SIZE, false, true);
|
||||
|
||||
const int THREAD_COUNT = 4;
|
||||
const uint64_t ALLOC_PER_THREAD = 1024;
|
||||
const uint64_t NUM_ALLOCS_PER_THREAD = 10;
|
||||
std::vector<std::thread> threads;
|
||||
|
||||
// Стартовое значение
|
||||
uint64_t initialUsage = pa.getMemUsage();
|
||||
|
||||
// Запускаем несколько потоков, каждый сделает небольшое кол-во аллокаций
|
||||
for (int i = 0; i < THREAD_COUNT; i++)
|
||||
{
|
||||
threads.emplace_back(
|
||||
[&pa]()
|
||||
{
|
||||
for (uint64_t j = 0; j < NUM_ALLOCS_PER_THREAD; j++)
|
||||
{
|
||||
pa.allocate(ALLOC_PER_THREAD);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
for (auto& th : threads)
|
||||
th.join();
|
||||
|
||||
uint64_t expected = initialUsage + THREAD_COUNT * 10ULL * ALLOC_PER_THREAD;
|
||||
EXPECT_GE(pa.getMemUsage(), expected);
|
||||
// 2 * CUSTOM_SIZE semantics is structs allocation overhead.
|
||||
EXPECT_GE(allocatedMemory.load(),
|
||||
MemoryAllowance - (THREAD_COUNT * ALLOC_PER_THREAD * NUM_ALLOCS_PER_THREAD) - 2 * CUSTOM_SIZE);
|
||||
}
|
@ -380,42 +380,31 @@ class RGDataTest : public ::testing::Test
|
||||
// bool useStringTable = true;
|
||||
TEST_F(RGDataTest, AllocData)
|
||||
{
|
||||
std::cout << " test allocatedMemery " << allocatedMemory.load() << " rowsize " << rg.getRowSize() << " " << rg.getMaxDataSize() << std::endl;
|
||||
rgD = rowgroup::RGData(rg, alloc);
|
||||
rg.setData(&rgD);
|
||||
rg.initRow(&r);
|
||||
rg.getRow(0, &r);
|
||||
std::cout << " test inStringTable(colIndex) " << r.inStringTable(0) << std::endl;
|
||||
|
||||
std::cout << " test allocatedMemery " << allocatedMemory.load() << std::endl;
|
||||
auto currentAllocation = allocatedMemory.load();
|
||||
EXPECT_LE(currentAllocation, MemoryAllowance - rg.getMaxDataSize());
|
||||
|
||||
r.setStringField(utils::ConstString{"testaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, 0);
|
||||
std::cout << " test allocatedMemery " << allocatedMemory.load() << std::endl;
|
||||
std::cout << " test inStringTable " << r.getColumnWidth(0) << std::endl;
|
||||
EXPECT_LE(allocatedMemory.load(), currentAllocation);
|
||||
|
||||
currentAllocation = allocatedMemory.load();
|
||||
r.nextRow();
|
||||
r.setStringField(utils::ConstString{"testaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, 0);
|
||||
std::cout << " test allocatedMemery " << allocatedMemory.load() << std::endl;
|
||||
std::cout << " test inStringTable " << r.getColumnWidth(0) << std::endl;
|
||||
EXPECT_EQ(allocatedMemory.load(), currentAllocation);
|
||||
|
||||
currentAllocation = allocatedMemory.load();
|
||||
r.nextRow();
|
||||
std::string longString(64 * 1024 + 1000, 'a');
|
||||
auto cs = utils::ConstString(longString);
|
||||
std::cout << "test longString " << longString.size() << " cs len " << cs.length()<< std::endl;
|
||||
|
||||
r.setStringField(cs, 0);
|
||||
std::cout << " test allocatedMemery " << allocatedMemory.load() << std::endl;
|
||||
std::cout << " test inStringTable " << r.getColumnWidth(0) << std::endl;
|
||||
EXPECT_LE(allocatedMemory.load(), currentAllocation);
|
||||
|
||||
rgD = rowgroup::RGData(rg);
|
||||
std::cout << " test allocatedMemery " << allocatedMemory.load() << std::endl;
|
||||
|
||||
EXPECT_EQ(allocatedMemory.load(), MemoryAllowance);
|
||||
|
||||
|
@ -21,21 +21,13 @@
|
||||
******************************************************************************************/
|
||||
|
||||
// This is one of the first files we compile, check the compiler...
|
||||
#if defined(__GNUC__)
|
||||
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
|
||||
#error "This is a very old GCC, and it's probably not going to work."
|
||||
#endif
|
||||
#else
|
||||
#error "This compiler is not known and it's probably not going to work."
|
||||
#endif
|
||||
|
||||
#include <stdint.h>
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
|
||||
#define FIXEDALLOCATOR_DLLEXPORT
|
||||
#include "fixedallocator.h"
|
||||
#undef FIXEDALLOCATOR_DLLEXPORT
|
||||
|
||||
#include <boost/smart_ptr/allocate_shared_array.hpp>
|
||||
#include <boost/smart_ptr/make_shared_array.hpp>
|
||||
|
||||
|
||||
using namespace std;
|
||||
|
||||
@ -75,15 +67,24 @@ void FixedAllocator::setAllocSize(uint allocSize)
|
||||
|
||||
void FixedAllocator::newBlock()
|
||||
{
|
||||
std::shared_ptr<uint8_t[]> next;
|
||||
// boost::shared_ptr<FixedAllocatorBufType> next;
|
||||
|
||||
capacityRemaining = elementCount * elementSize;
|
||||
|
||||
if (!tmpSpace || mem.size() == 0)
|
||||
{
|
||||
next.reset(new uint8_t[elementCount * elementSize]);
|
||||
mem.push_back(next);
|
||||
nextAlloc = next.get();
|
||||
if (alloc)
|
||||
{
|
||||
mem.emplace_back(boost::allocate_shared<FixedAllocatorBufType>(*alloc, elementCount * elementSize));
|
||||
}
|
||||
else
|
||||
{
|
||||
mem.emplace_back(boost::make_shared<FixedAllocatorBufType>(elementCount * elementSize));
|
||||
}
|
||||
// next.reset(new uint8_t[elementCount * elementSize]);
|
||||
// mem.push_back(next);
|
||||
// nextAlloc = next.get();
|
||||
nextAlloc = mem.back().get();
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -34,16 +34,23 @@
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
#include <limits>
|
||||
#include <unistd.h>
|
||||
#include <atomic>
|
||||
|
||||
#include <boost/smart_ptr/allocate_shared_array.hpp>
|
||||
|
||||
#include "countingallocator.h"
|
||||
#include "spinlock.h"
|
||||
|
||||
#define EXPORT
|
||||
|
||||
namespace utils
|
||||
{
|
||||
using FixedAllocatorBufIntegralType = uint8_t;
|
||||
using FixedAllocatorBufType = FixedAllocatorBufIntegralType[];
|
||||
|
||||
class FixedAllocator
|
||||
{
|
||||
public:
|
||||
@ -60,7 +67,8 @@ class FixedAllocator
|
||||
, lock(false)
|
||||
{
|
||||
}
|
||||
EXPORT explicit FixedAllocator(unsigned long allocSize, bool isTmpSpace = false,
|
||||
|
||||
EXPORT explicit FixedAllocator(allocators::CountingAllocator<FixedAllocatorBufType> alloc, unsigned long allocSize, bool isTmpSpace = false,
|
||||
unsigned long numElements = DEFAULT_NUM_ELEMENTS)
|
||||
: capacityRemaining(0)
|
||||
, elementCount(numElements)
|
||||
@ -70,8 +78,10 @@ class FixedAllocator
|
||||
, nextAlloc(0)
|
||||
, useLock(false)
|
||||
, lock(false)
|
||||
, alloc(alloc)
|
||||
{
|
||||
}
|
||||
|
||||
EXPORT FixedAllocator(const FixedAllocator&);
|
||||
EXPORT FixedAllocator& operator=(const FixedAllocator&);
|
||||
virtual ~FixedAllocator()
|
||||
@ -88,20 +98,21 @@ class FixedAllocator
|
||||
EXPORT void deallocateAll(); // drops all memory in use
|
||||
EXPORT uint64_t getMemUsage() const;
|
||||
void setUseLock(bool);
|
||||
void setAllocSize(uint);
|
||||
void setAllocSize(uint32_t);
|
||||
|
||||
private:
|
||||
void newBlock();
|
||||
|
||||
std::vector<std::shared_ptr<uint8_t[]>> mem;
|
||||
std::vector<boost::shared_ptr<FixedAllocatorBufType>> mem;
|
||||
unsigned long capacityRemaining;
|
||||
uint64_t elementCount;
|
||||
unsigned long elementSize;
|
||||
uint64_t currentlyStored;
|
||||
bool tmpSpace;
|
||||
uint8_t* nextAlloc;
|
||||
FixedAllocatorBufIntegralType* nextAlloc;
|
||||
bool useLock;
|
||||
std::atomic<bool> lock;
|
||||
std::optional<allocators::CountingAllocator<FixedAllocatorBufType>> alloc {};
|
||||
};
|
||||
|
||||
inline void* FixedAllocator::allocate()
|
||||
|
@ -23,7 +23,8 @@
|
||||
#include <iostream>
|
||||
//#define NDEBUG
|
||||
#include <cassert>
|
||||
|
||||
#include <boost/smart_ptr/allocate_shared_array.hpp>
|
||||
#include <boost/smart_ptr/make_shared_array.hpp>
|
||||
|
||||
#include "poolallocator.h"
|
||||
|
||||
@ -37,6 +38,7 @@ PoolAllocator& PoolAllocator::operator=(const PoolAllocator& v)
|
||||
allocSize = v.allocSize;
|
||||
tmpSpace = v.tmpSpace;
|
||||
useLock = v.useLock;
|
||||
alloc = v.alloc;
|
||||
deallocateAll();
|
||||
return *this;
|
||||
}
|
||||
@ -46,21 +48,29 @@ void PoolAllocator::deallocateAll()
|
||||
capacityRemaining = 0;
|
||||
nextAlloc = NULL;
|
||||
memUsage = 0;
|
||||
// WIP double check the space is cleaned up.
|
||||
mem.clear();
|
||||
oob.clear();
|
||||
}
|
||||
|
||||
void PoolAllocator::newBlock()
|
||||
{
|
||||
std::shared_ptr<PoolAllocatorBufType[]> next;
|
||||
// boost::shared_ptr<PoolAllocatorBufType[]> next;
|
||||
|
||||
capacityRemaining = allocSize;
|
||||
|
||||
if (!tmpSpace || mem.size() == 0)
|
||||
{
|
||||
next.reset(new PoolAllocatorBufType[allocSize]);
|
||||
mem.push_back(next);
|
||||
nextAlloc = next.get();
|
||||
if (alloc)
|
||||
{
|
||||
mem.emplace_back(boost::allocate_shared<PoolAllocatorBufType>(*alloc, allocSize));
|
||||
}
|
||||
else
|
||||
{
|
||||
mem.emplace_back(boost::make_shared<PoolAllocatorBufType>(allocSize));
|
||||
}
|
||||
// mem.push_back(next);
|
||||
nextAlloc = mem.back().get();
|
||||
}
|
||||
else
|
||||
nextAlloc = mem.front().get();
|
||||
@ -71,7 +81,14 @@ void* PoolAllocator::allocOOB(uint64_t size)
|
||||
OOBMemInfo memInfo;
|
||||
|
||||
memUsage += size;
|
||||
memInfo.mem.reset(new PoolAllocatorBufType[size]);
|
||||
if (alloc)
|
||||
{
|
||||
memInfo.mem = boost::allocate_shared<PoolAllocatorBufType>(*alloc, size);
|
||||
}
|
||||
else
|
||||
{
|
||||
memInfo.mem = boost::make_shared<PoolAllocatorBufType>(size);
|
||||
}
|
||||
memInfo.size = size;
|
||||
void* ret = (void*)memInfo.mem.get();
|
||||
oob[ret] = memInfo;
|
||||
|
@ -27,17 +27,21 @@
|
||||
|
||||
#include <unistd.h>
|
||||
#include <stdint.h>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
|
||||
#include <boost/smart_ptr/allocate_shared_array.hpp>
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "countingallocator.h"
|
||||
|
||||
namespace utils
|
||||
{
|
||||
using PoolAllocatorBufType = uint8_t;
|
||||
using PoolAllocatorBufIntegralType = uint8_t;
|
||||
using PoolAllocatorBufType = PoolAllocatorBufIntegralType[];
|
||||
class PoolAllocator
|
||||
{
|
||||
public:
|
||||
@ -54,7 +58,7 @@ class PoolAllocator
|
||||
, lock(false)
|
||||
{
|
||||
}
|
||||
PoolAllocator(allocators::CountingAllocator<PoolAllocatorBufType>* allocator, unsigned windowSize = DEFAULT_WINDOW_SIZE,
|
||||
PoolAllocator(allocators::CountingAllocator<PoolAllocatorBufType> alloc, unsigned windowSize = DEFAULT_WINDOW_SIZE,
|
||||
bool isTmpSpace = false, bool _useLock = false)
|
||||
: allocSize(windowSize)
|
||||
, tmpSpace(isTmpSpace)
|
||||
@ -63,7 +67,7 @@ class PoolAllocator
|
||||
, nextAlloc(0)
|
||||
, useLock(_useLock)
|
||||
, lock(false)
|
||||
, allocator(allocator)
|
||||
, alloc(alloc)
|
||||
{
|
||||
}
|
||||
PoolAllocator(const PoolAllocator& p)
|
||||
@ -74,7 +78,7 @@ class PoolAllocator
|
||||
, nextAlloc(0)
|
||||
, useLock(p.useLock)
|
||||
, lock(false)
|
||||
, allocator(p.allocator)
|
||||
, alloc(p.alloc)
|
||||
{
|
||||
}
|
||||
virtual ~PoolAllocator()
|
||||
@ -106,23 +110,22 @@ class PoolAllocator
|
||||
void* allocOOB(uint64_t size);
|
||||
|
||||
unsigned allocSize;
|
||||
std::vector<std::shared_ptr<PoolAllocatorBufType[]>> mem;
|
||||
std::vector<boost::shared_ptr<PoolAllocatorBufType>> mem;
|
||||
bool tmpSpace;
|
||||
unsigned capacityRemaining;
|
||||
uint64_t memUsage;
|
||||
PoolAllocatorBufType* nextAlloc;
|
||||
PoolAllocatorBufIntegralType* nextAlloc;
|
||||
bool useLock;
|
||||
std::atomic<bool> lock;
|
||||
|
||||
struct OOBMemInfo
|
||||
{
|
||||
std::shared_ptr<PoolAllocatorBufType[]> mem;
|
||||
boost::shared_ptr<PoolAllocatorBufType> mem;
|
||||
uint64_t size;
|
||||
};
|
||||
typedef std::map<void*, OOBMemInfo> OutOfBandMap;
|
||||
OutOfBandMap oob; // for mem chunks bigger than the window size; these can be dealloc'd
|
||||
// WIP rename to allocator
|
||||
allocators::CountingAllocator<PoolAllocatorBufType>* allocator = nullptr;
|
||||
std::optional<allocators::CountingAllocator<PoolAllocatorBufType>> alloc {};
|
||||
};
|
||||
|
||||
inline void* PoolAllocator::allocate(uint64_t size)
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <memory>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
#include "poolallocator.h"
|
||||
#include "resourcemanager.h"
|
||||
|
||||
#undef min
|
||||
#undef max
|
||||
@ -61,6 +62,7 @@ class STLPoolAllocator
|
||||
};
|
||||
|
||||
STLPoolAllocator() throw();
|
||||
STLPoolAllocator(joblist::ResourceManager* rm);
|
||||
STLPoolAllocator(const STLPoolAllocator&) throw();
|
||||
STLPoolAllocator(uint32_t capacity) throw();
|
||||
template <class U>
|
||||
@ -94,6 +96,20 @@ STLPoolAllocator<T>::STLPoolAllocator() throw()
|
||||
pa.reset(new PoolAllocator(DEFAULT_SIZE));
|
||||
}
|
||||
|
||||
template <class T>
|
||||
STLPoolAllocator<T>::STLPoolAllocator(joblist::ResourceManager* rm)
|
||||
{
|
||||
if (rm)
|
||||
{
|
||||
auto alloc = rm->getAllocator<PoolAllocatorBufType>();
|
||||
pa.reset(new PoolAllocator(alloc));
|
||||
}
|
||||
else
|
||||
{
|
||||
pa.reset(new PoolAllocator(DEFAULT_SIZE));
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
STLPoolAllocator<T>::STLPoolAllocator(const STLPoolAllocator<T>& s) throw()
|
||||
{
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "hasher.h"
|
||||
#include "lbidlist.h"
|
||||
#include "resourcemanager.h"
|
||||
#include "spinlock.h"
|
||||
#include "vlarray.h"
|
||||
#include "threadnaming.h"
|
||||
@ -36,10 +37,17 @@ using namespace joblist;
|
||||
|
||||
namespace joiner
|
||||
{
|
||||
// TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
|
||||
// uint32_t smallJoinColumn, uint32_t largeJoinColumn, JoinType jt,
|
||||
// threadpool::ThreadPool* jsThreadPool)
|
||||
// : TupleJoiner(smallInput, largeInput, smallJoinColumn, largeJoinColumn, jt, jsThreadPool, nullptr)
|
||||
// {
|
||||
// }
|
||||
|
||||
// Typed joiner ctor
|
||||
TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
|
||||
uint32_t smallJoinColumn, uint32_t largeJoinColumn, JoinType jt,
|
||||
threadpool::ThreadPool* jsThreadPool, const uint64_t numCores)
|
||||
threadpool::ThreadPool* jsThreadPool, joblist::ResourceManager* rm, const uint64_t numCores)
|
||||
: smallRG(smallInput)
|
||||
, largeRG(largeInput)
|
||||
, joinAlg(INSERTING)
|
||||
@ -64,7 +72,7 @@ TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::R
|
||||
_pool.reset(new boost::shared_ptr<PoolAllocator>[bucketCount]);
|
||||
for (i = 0; i < bucketCount; i++)
|
||||
{
|
||||
STLPoolAllocator<pair<const long double, Row::Pointer>> alloc;
|
||||
STLPoolAllocator<pair<const long double, Row::Pointer>> alloc(resourceManager_);
|
||||
_pool[i] = alloc.getPoolAllocator();
|
||||
ld[i].reset(new ldhash_t(10, hasher(), ldhash_t::key_equal(), alloc));
|
||||
}
|
||||
@ -75,7 +83,7 @@ TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::R
|
||||
_pool.reset(new boost::shared_ptr<PoolAllocator>[bucketCount]);
|
||||
for (i = 0; i < bucketCount; i++)
|
||||
{
|
||||
STLPoolAllocator<pair<const int64_t, Row::Pointer>> alloc;
|
||||
STLPoolAllocator<pair<const int64_t, Row::Pointer>> alloc(resourceManager_);
|
||||
_pool[i] = alloc.getPoolAllocator();
|
||||
sth[i].reset(new sthash_t(10, hasher(), sthash_t::key_equal(), alloc));
|
||||
}
|
||||
@ -86,7 +94,7 @@ TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::R
|
||||
_pool.reset(new boost::shared_ptr<PoolAllocator>[bucketCount]);
|
||||
for (i = 0; i < bucketCount; i++)
|
||||
{
|
||||
STLPoolAllocator<pair<const int64_t, uint8_t*>> alloc;
|
||||
STLPoolAllocator<pair<const int64_t, uint8_t*>> alloc(resourceManager_);
|
||||
_pool[i] = alloc.getPoolAllocator();
|
||||
h[i].reset(new hash_t(10, hasher(), hash_t::key_equal(), alloc));
|
||||
}
|
||||
@ -143,10 +151,17 @@ TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::R
|
||||
nullValueForJoinColumn = smallNullRow.getSignedNullValue(smallJoinColumn);
|
||||
}
|
||||
|
||||
// TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
|
||||
// const vector<uint32_t>& smallJoinColumns, const vector<uint32_t>& largeJoinColumns,
|
||||
// JoinType jt, threadpool::ThreadPool* jsThreadPool)
|
||||
// : TupleJoiner(smallInput, largeInput, smallJoinColumns, largeJoinColumns, jt, jsThreadPool, nullptr)
|
||||
// {
|
||||
// }
|
||||
|
||||
// Typeless joiner ctor
|
||||
TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
|
||||
const vector<uint32_t>& smallJoinColumns, const vector<uint32_t>& largeJoinColumns,
|
||||
JoinType jt, threadpool::ThreadPool* jsThreadPool, const uint64_t numCores)
|
||||
JoinType jt, threadpool::ThreadPool* jsThreadPool, joblist::ResourceManager* rm, const uint64_t numCores)
|
||||
: smallRG(smallInput)
|
||||
, largeRG(largeInput)
|
||||
, joinAlg(INSERTING)
|
||||
@ -170,7 +185,7 @@ TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::R
|
||||
ht.reset(new boost::scoped_ptr<typelesshash_t>[bucketCount]);
|
||||
for (i = 0; i < bucketCount; i++)
|
||||
{
|
||||
STLPoolAllocator<pair<const TypelessData, Row::Pointer>> alloc;
|
||||
STLPoolAllocator<pair<const TypelessData, Row::Pointer>> alloc(resourceManager_);
|
||||
_pool[i] = alloc.getPoolAllocator();
|
||||
ht[i].reset(new typelesshash_t(10, hasher(), typelesshash_t::key_equal(), alloc));
|
||||
}
|
||||
@ -226,7 +241,10 @@ TupleJoiner::TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::R
|
||||
// TODO: make it explicit to avoid future confusion.
|
||||
storedKeyAlloc.reset(new FixedAllocator[numCores]);
|
||||
for (i = 0; i < (uint)numCores; i++)
|
||||
storedKeyAlloc[i].setAllocSize(keyLength);
|
||||
{
|
||||
auto alloc = resourceManager_->getAllocator<utils::FixedAllocatorBufType>();
|
||||
storedKeyAlloc[i] = FixedAllocator(alloc, keyLength);
|
||||
}
|
||||
}
|
||||
|
||||
TupleJoiner::TupleJoiner()
|
||||
@ -856,7 +874,11 @@ void TupleJoiner::setInUM()
|
||||
tmpKeyAlloc.reset(new FixedAllocator[threadCount]);
|
||||
|
||||
for (i = 0; i < threadCount; i++)
|
||||
tmpKeyAlloc[i] = FixedAllocator(keyLength, true);
|
||||
{
|
||||
auto alloc = resourceManager_->getAllocator<utils::FixedAllocatorBufType>();
|
||||
tmpKeyAlloc[i] = FixedAllocator(alloc, keyLength, true);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -911,7 +933,10 @@ void TupleJoiner::setInUM(vector<RGData>& rgs)
|
||||
tmpKeyAlloc.reset(new FixedAllocator[threadCount]);
|
||||
|
||||
for (i = 0; i < threadCount; i++)
|
||||
tmpKeyAlloc[i] = FixedAllocator(keyLength, true);
|
||||
{
|
||||
auto alloc = resourceManager_->getAllocator<utils::FixedAllocatorBufType>();
|
||||
tmpKeyAlloc[i] = FixedAllocator(alloc, keyLength, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -967,7 +992,10 @@ void TupleJoiner::setThreadCount(uint32_t cnt)
|
||||
tmpKeyAlloc.reset(new FixedAllocator[threadCount]);
|
||||
|
||||
for (uint32_t i = 0; i < threadCount; i++)
|
||||
tmpKeyAlloc[i] = FixedAllocator(keyLength, true);
|
||||
{
|
||||
auto alloc = resourceManager_->getAllocator<utils::FixedAllocatorBufType>();
|
||||
tmpKeyAlloc[i] = FixedAllocator(alloc, keyLength, true);
|
||||
}
|
||||
}
|
||||
|
||||
if (fe)
|
||||
@ -1839,6 +1867,7 @@ std::shared_ptr<TupleJoiner> TupleJoiner::copyForDiskJoin()
|
||||
|
||||
ret->discreteValues.reset(new bool[smallKeyColumns.size()]);
|
||||
ret->cpValues.reset(new vector<int128_t>[smallKeyColumns.size()]);
|
||||
ret->resourceManager_ = resourceManager_;
|
||||
|
||||
for (uint32_t i = 0; i < smallKeyColumns.size(); i++)
|
||||
{
|
||||
@ -1877,7 +1906,10 @@ std::shared_ptr<TupleJoiner> TupleJoiner::copyForDiskJoin()
|
||||
{
|
||||
ret->storedKeyAlloc.reset(new FixedAllocator[numCores]);
|
||||
for (int i = 0; i < numCores; i++)
|
||||
ret->storedKeyAlloc[i].setAllocSize(keyLength);
|
||||
{
|
||||
auto alloc = resourceManager_->getAllocator<utils::FixedAllocatorBufType>();
|
||||
storedKeyAlloc[i] = FixedAllocator(alloc, keyLength);
|
||||
}
|
||||
}
|
||||
|
||||
ret->numCores = numCores;
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <boost/scoped_array.hpp>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "resourcemanager.h"
|
||||
#include "rowgroup.h"
|
||||
#include "joiner.h"
|
||||
#include "fixedallocator.h"
|
||||
@ -266,14 +267,22 @@ class TupleJoiner
|
||||
};
|
||||
|
||||
/* ctor to use for numeric join */
|
||||
// TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
|
||||
// uint32_t smallJoinColumn, uint32_t largeJoinColumn, joblist::JoinType jt,
|
||||
// threadpool::ThreadPool* jsThreadPool);
|
||||
|
||||
TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
|
||||
uint32_t smallJoinColumn, uint32_t largeJoinColumn, joblist::JoinType jt,
|
||||
threadpool::ThreadPool* jsThreadPool, const uint64_t numCores);
|
||||
threadpool::ThreadPool* jsThreadPool, joblist::ResourceManager* rm, const uint64_t numCores);
|
||||
|
||||
/* ctor to use for string & compound join */
|
||||
TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
|
||||
// TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
|
||||
// const std::vector<uint32_t>& smallJoinColumns, const std::vector<uint32_t>& largeJoinColumns,
|
||||
// joblist::JoinType jt, threadpool::ThreadPool* jsThreadPool);
|
||||
|
||||
TupleJoiner(const rowgroup::RowGroup& smallInput, const rowgroup::RowGroup& largeInput,
|
||||
const std::vector<uint32_t>& smallJoinColumns, const std::vector<uint32_t>& largeJoinColumns,
|
||||
joblist::JoinType jt, threadpool::ThreadPool* jsThreadPool, const uint64_t numCores);
|
||||
joblist::JoinType jt, threadpool::ThreadPool* jsThreadPool, joblist::ResourceManager* rm, const uint64_t numCores);
|
||||
|
||||
~TupleJoiner();
|
||||
|
||||
@ -562,6 +571,8 @@ class TupleJoiner
|
||||
void bucketsToTables(buckets_t*, hash_table_t*);
|
||||
|
||||
bool _convertToDiskJoin;
|
||||
joblist::ResourceManager* resourceManager_ = nullptr;
|
||||
|
||||
};
|
||||
|
||||
} // namespace joiner
|
||||
|
Reference in New Issue
Block a user