You've already forked mariadb-columnstore-engine
mirror of
https://github.com/mariadb-corporation/mariadb-columnstore-engine.git
synced 2025-08-01 06:46:55 +03:00
Merge branch 'develop' into MCOL-4841
This commit is contained in:
@ -156,7 +156,6 @@ local Pipeline(branch, platform, event, arch='amd64') = {
|
||||
mtr:: {
|
||||
name: 'mtr',
|
||||
image: 'docker:git',
|
||||
[if arch == 'arm64' then 'failure']: 'ignore',
|
||||
volumes: [pipeline._volumes.docker],
|
||||
commands: [
|
||||
'docker run --volume /sys/fs/cgroup:/sys/fs/cgroup:ro --env MYSQL_TEST_DIR=' + mtr_path + ' --env DEBIAN_FRONTEND=noninteractive --env MCS_USE_S3_STORAGE=0 --name mtr$${DRONE_BUILD_NUMBER} --privileged --detach ' + img + ' ' + init + ' --unit=basic.target',
|
||||
|
@ -1274,6 +1274,10 @@ void BatchPrimitiveProcessorJL::runBPP(ByteStream& bs, uint32_t pmNum)
|
||||
bs << uniqueID;
|
||||
bs << _priority;
|
||||
|
||||
// The weight is used by PrimProc thread pool algo
|
||||
uint32_t weight = calculateBPPWeight();
|
||||
bs << weight;
|
||||
|
||||
bs << dbRoot;
|
||||
bs << count;
|
||||
|
||||
|
@ -252,8 +252,27 @@ class BatchPrimitiveProcessorJL
|
||||
}
|
||||
|
||||
private:
|
||||
// void setLBIDForScan(uint64_t rid, uint32_t dbroot);
|
||||
const size_t perColumnProjectWeight_ = 10;
|
||||
const size_t perColumnFilteringWeight_ = 10;
|
||||
const size_t fe1Weight_ = 10;
|
||||
const size_t fe2Weight_ = 10;
|
||||
const size_t joinWeight_ = 500;
|
||||
const size_t aggregationWeight_ = 500;
|
||||
|
||||
// This is simple SQL operations-based model leveraged by
|
||||
// FairThreadPool run by PP facility.
|
||||
// Every operation mentioned in this calculation spends
|
||||
// some CPU so the morsel uses this op weights more.
|
||||
uint32_t calculateBPPWeight() const
|
||||
{
|
||||
uint32_t weight = perColumnProjectWeight_ * projectCount;
|
||||
weight += filterCount * perColumnFilteringWeight_;
|
||||
weight += tJoiners.size() * joinWeight_;
|
||||
weight += (aggregatorPM) ? aggregationWeight_ : 0;
|
||||
weight += (fe1) ? fe1Weight_ : 0;
|
||||
weight += (fe2) ? fe2Weight_ : 0;
|
||||
return weight;
|
||||
}
|
||||
BPSOutputType ot;
|
||||
|
||||
bool needToSetLBID;
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include "tuplehashjoin.h"
|
||||
#include "joinpartition.h"
|
||||
#include "threadnaming.h"
|
||||
#include "../../utils/threadpool/prioritythreadpool.h"
|
||||
|
||||
#pragma once
|
||||
|
||||
|
@ -1478,7 +1478,7 @@ void TupleAggregateStep::prep1PhaseAggregate(JobInfo& jobInfo, vector<RowGroup>&
|
||||
|
||||
functionVec[i]->fAuxColumnIndex = lastCol;
|
||||
|
||||
// sum(x)
|
||||
// mean(x)
|
||||
oidsAgg.push_back(oidsProj[j]);
|
||||
keysAgg.push_back(keysProj[j]);
|
||||
scaleAgg.push_back(0);
|
||||
@ -1488,7 +1488,7 @@ void TupleAggregateStep::prep1PhaseAggregate(JobInfo& jobInfo, vector<RowGroup>&
|
||||
widthAgg.push_back(sizeof(long double));
|
||||
++lastCol;
|
||||
|
||||
// sum(x**2)
|
||||
// sum(x_i - mean)^2
|
||||
oidsAgg.push_back(oidsProj[j]);
|
||||
keysAgg.push_back(keysProj[j]);
|
||||
scaleAgg.push_back(0);
|
||||
@ -1910,7 +1910,7 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(JobInfo& jobInfo, vector<Ro
|
||||
widthAgg.push_back(sizeof(double));
|
||||
funct->fAuxColumnIndex = ++colAgg;
|
||||
|
||||
// sum(x)
|
||||
// mean(x)
|
||||
oidsAgg.push_back(oidsProj[colProj]);
|
||||
keysAgg.push_back(aggKey);
|
||||
scaleAgg.push_back(0);
|
||||
@ -1920,7 +1920,7 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(JobInfo& jobInfo, vector<Ro
|
||||
widthAgg.push_back(sizeof(long double));
|
||||
++colAgg;
|
||||
|
||||
// sum(x**2)
|
||||
// sum(x_i - mean)^2
|
||||
oidsAgg.push_back(oidsProj[colProj]);
|
||||
keysAgg.push_back(aggKey);
|
||||
scaleAgg.push_back(0);
|
||||
@ -2581,7 +2581,7 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(JobInfo& jobInfo, vector<Ro
|
||||
|
||||
functionVec2[i]->fAuxColumnIndex = lastCol;
|
||||
|
||||
// sum(x)
|
||||
// mean(x)
|
||||
oidsAggDist.push_back(oidsAgg[j]);
|
||||
keysAggDist.push_back(keysAgg[j]);
|
||||
scaleAggDist.push_back(0);
|
||||
@ -2591,7 +2591,7 @@ void TupleAggregateStep::prep1PhaseDistinctAggregate(JobInfo& jobInfo, vector<Ro
|
||||
widthAggDist.push_back(sizeof(long double));
|
||||
++lastCol;
|
||||
|
||||
// sum(x**2)
|
||||
// sum(x_i - mean)^2
|
||||
oidsAggDist.push_back(oidsAgg[j]);
|
||||
keysAggDist.push_back(keysAgg[j]);
|
||||
scaleAggDist.push_back(0);
|
||||
@ -3243,7 +3243,7 @@ void TupleAggregateStep::prep2PhasesAggregate(JobInfo& jobInfo, vector<RowGroup>
|
||||
widthAggPm.push_back(sizeof(double));
|
||||
funct->fAuxColumnIndex = ++colAggPm;
|
||||
|
||||
// sum(x)
|
||||
// mean(x)
|
||||
oidsAggPm.push_back(oidsProj[colProj]);
|
||||
keysAggPm.push_back(aggKey);
|
||||
scaleAggPm.push_back(0);
|
||||
@ -3253,7 +3253,7 @@ void TupleAggregateStep::prep2PhasesAggregate(JobInfo& jobInfo, vector<RowGroup>
|
||||
widthAggPm.push_back(sizeof(long double));
|
||||
++colAggPm;
|
||||
|
||||
// sum(x**2)
|
||||
// sum(x_i - mean)^2
|
||||
oidsAggPm.push_back(oidsProj[colProj]);
|
||||
keysAggPm.push_back(aggKey);
|
||||
scaleAggPm.push_back(0);
|
||||
@ -3701,7 +3701,7 @@ void TupleAggregateStep::prep2PhasesAggregate(JobInfo& jobInfo, vector<RowGroup>
|
||||
|
||||
functionVecUm[i]->fAuxColumnIndex = lastCol;
|
||||
|
||||
// sum(x)
|
||||
// mean(x)
|
||||
oidsAggUm.push_back(oidsAggPm[j]);
|
||||
keysAggUm.push_back(keysAggPm[j]);
|
||||
scaleAggUm.push_back(0);
|
||||
@ -3711,7 +3711,7 @@ void TupleAggregateStep::prep2PhasesAggregate(JobInfo& jobInfo, vector<RowGroup>
|
||||
widthAggUm.push_back(sizeof(long double));
|
||||
++lastCol;
|
||||
|
||||
// sum(x**2)
|
||||
// sum(x_i - mean)^2
|
||||
oidsAggUm.push_back(oidsAggPm[j]);
|
||||
keysAggUm.push_back(keysAggPm[j]);
|
||||
scaleAggUm.push_back(0);
|
||||
@ -4152,7 +4152,7 @@ void TupleAggregateStep::prep2PhasesDistinctAggregate(JobInfo& jobInfo, vector<R
|
||||
widthAggPm.push_back(sizeof(double));
|
||||
funct->fAuxColumnIndex = ++colAggPm;
|
||||
|
||||
// sum(x)
|
||||
// mean(x)
|
||||
oidsAggPm.push_back(oidsProj[colProj]);
|
||||
keysAggPm.push_back(aggKey);
|
||||
scaleAggPm.push_back(0);
|
||||
@ -4162,7 +4162,7 @@ void TupleAggregateStep::prep2PhasesDistinctAggregate(JobInfo& jobInfo, vector<R
|
||||
widthAggPm.push_back(sizeof(long double));
|
||||
++colAggPm;
|
||||
|
||||
// sum(x**2)
|
||||
// sum(x_i - mean)^2
|
||||
oidsAggPm.push_back(oidsProj[colProj]);
|
||||
keysAggPm.push_back(aggKey);
|
||||
scaleAggPm.push_back(0);
|
||||
@ -4808,7 +4808,7 @@ void TupleAggregateStep::prep2PhasesDistinctAggregate(JobInfo& jobInfo, vector<R
|
||||
|
||||
functionVecUm[i]->fAuxColumnIndex = lastCol;
|
||||
|
||||
// sum(x)
|
||||
// mean(x)
|
||||
oidsAggDist.push_back(oidsAggPm[j]);
|
||||
keysAggDist.push_back(keysAggPm[j]);
|
||||
scaleAggDist.push_back(0);
|
||||
@ -4818,7 +4818,7 @@ void TupleAggregateStep::prep2PhasesDistinctAggregate(JobInfo& jobInfo, vector<R
|
||||
widthAggDist.push_back(sizeof(long double));
|
||||
++lastCol;
|
||||
|
||||
// sum(x**2)
|
||||
// sum(x_i - mean)^2
|
||||
oidsAggDist.push_back(oidsAggPm[j]);
|
||||
keysAggDist.push_back(keysAggPm[j]);
|
||||
scaleAggDist.push_back(0);
|
||||
|
@ -53,6 +53,8 @@ CREATE OR REPLACE FUNCTION idblocalpm RETURNS INTEGER SONAME 'ha_columnstore.so'
|
||||
CREATE OR REPLACE FUNCTION mcssystemready RETURNS INTEGER SONAME 'ha_columnstore.so';
|
||||
CREATE OR REPLACE FUNCTION mcssystemreadonly RETURNS INTEGER SONAME 'ha_columnstore.so';
|
||||
CREATE OR REPLACE FUNCTION mcssystemprimary RETURNS INTEGER SONAME 'ha_columnstore.so';
|
||||
CREATE OR REPLACE FUNCTION mcs_emindex_size RETURNS INTEGER SONAME 'ha_columnstore.so';
|
||||
CREATE OR REPLACE FUNCTION mcs_emindex_free RETURNS INTEGER SONAME 'ha_columnstore.so';
|
||||
CREATE OR REPLACE AGGREGATE FUNCTION regr_avgx RETURNS REAL SONAME 'libregr_mysql.so';
|
||||
CREATE OR REPLACE AGGREGATE FUNCTION regr_avgy RETURNS REAL SONAME 'libregr_mysql.so';
|
||||
CREATE OR REPLACE AGGREGATE FUNCTION regr_count RETURNS INTEGER SONAME 'libregr_mysql.so';
|
||||
|
217
mysql-test/columnstore/basic/r/std_aggregate_columnstore.result
Normal file
217
mysql-test/columnstore/basic/r/std_aggregate_columnstore.result
Normal file
@ -0,0 +1,217 @@
|
||||
DROP DATABASE IF EXISTS std_test_db;
|
||||
CREATE DATABASE std_test_db;
|
||||
USE std_test_db;
|
||||
create table t1 (
|
||||
col_signed tinyint,
|
||||
col_unsigned tinyint unsigned
|
||||
)engine=columnstore;
|
||||
LOAD DATA LOCAL infile 'MTR_SUITE_DIR/../std_data/tinyint_range.tbl' INTO TABLE t1 FIELDS TERMINATED BY '|';;
|
||||
ALTER TABLE t1 ADD COLUMN col_small_signed SMALLINT;
|
||||
ALTER TABLE t1 ADD COLUMN col_small_unsigned SMALLINT UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_med_signed MEDIUMINT;
|
||||
ALTER TABLE t1 ADD COLUMN col_med_unsigned MEDIUMINT UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_int_signed INT;
|
||||
ALTER TABLE t1 ADD COLUMN col_int_unsigned INT UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_big_signed BIGINT;
|
||||
ALTER TABLE t1 ADD COLUMN col_big_unsigned BIGINT UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_dec_signed DECIMAL(38,0);
|
||||
ALTER TABLE t1 ADD COLUMN col_dec_unsigned DECIMAL(38,0) UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_float_signed FLOAT;
|
||||
ALTER TABLE t1 ADD COLUMN col_float_unsigned FLOAT UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_double_signed DOUBLE;
|
||||
ALTER TABLE t1 ADD COLUMN col_double_unsigned DOUBLE UNSIGNED;
|
||||
UPDATE t1 SET col_small_signed=col_signed + sign(col_signed) * 32000;
|
||||
UPDATE t1 SET col_small_unsigned=col_unsigned + 65000;
|
||||
UPDATE t1 SET col_med_signed=col_signed + sign(col_signed) * 8388000;
|
||||
UPDATE t1 SET col_med_unsigned=col_unsigned + 16776000;
|
||||
UPDATE t1 SET col_int_signed=col_signed + sign(col_signed) * 2147483000;
|
||||
UPDATE t1 SET col_int_unsigned=col_unsigned + 4294000000;
|
||||
UPDATE t1 SET col_big_signed=col_signed + sign(col_signed) * 9223372036854775000;
|
||||
UPDATE t1 SET col_big_unsigned=col_unsigned + 18446744073709551000;
|
||||
UPDATE t1 SET col_dec_signed=col_signed + sign(col_signed) * 800000000000000000000000000000000001;
|
||||
UPDATE t1 SET col_dec_unsigned=col_unsigned + 800000000000000000000000000000000003;
|
||||
UPDATE t1 SET col_float_signed=col_signed + 0.637 + sign(col_signed) * 8388000;
|
||||
UPDATE t1 SET col_float_unsigned=col_unsigned + 0.637 + 16776000;
|
||||
UPDATE t1 SET col_double_signed=col_signed + 0.637 + sign(col_signed) * 2147483000;
|
||||
UPDATE t1 SET col_double_unsigned=col_unsigned + 0.637 + 4294000000;
|
||||
SELECT 'q1', floor(STD(col_signed)) FROM t1;
|
||||
q1 floor(STD(col_signed))
|
||||
q1 73
|
||||
SELECT 'q2', floor(STD(col_unsigned)) FROM t1;
|
||||
q2 floor(STD(col_unsigned))
|
||||
q2 73
|
||||
SELECT 'q3', floor(STD(col_small_signed)) FROM t1;
|
||||
q3 floor(STD(col_small_signed))
|
||||
q3 32000
|
||||
SELECT 'q4', floor(STD(col_small_unsigned)) FROM t1;
|
||||
q4 floor(STD(col_small_unsigned))
|
||||
q4 73
|
||||
SELECT 'q5', floor(STD(col_med_signed)) FROM t1;
|
||||
q5 floor(STD(col_med_signed))
|
||||
q5 8371470
|
||||
SELECT 'q6', floor(STD(col_med_unsigned)) FROM t1;
|
||||
q6 floor(STD(col_med_unsigned))
|
||||
q6 73
|
||||
SELECT 'q7', floor(STD(col_int_signed)) FROM t1;
|
||||
q7 floor(STD(col_int_signed))
|
||||
q7 2143234889
|
||||
SELECT 'q8', floor(STD(col_int_unsigned)) FROM t1;
|
||||
q8 floor(STD(col_int_unsigned))
|
||||
q8 73
|
||||
SELECT 'q9', floor(STD(col_big_signed)) FROM t1;
|
||||
q9 floor(STD(col_big_signed))
|
||||
q9 9205126264421172000
|
||||
SELECT 'q10', floor(STD(col_big_unsigned)) FROM t1;
|
||||
q10 floor(STD(col_big_unsigned))
|
||||
q10 73
|
||||
SELECT 'q11', floor(STD(col_dec_signed)) FROM t1;
|
||||
q11 floor(STD(col_dec_signed))
|
||||
q11 798417431511104800000000000000000000
|
||||
SELECT 'q13', floor(STD(col_float_signed)) FROM t1;
|
||||
q13 floor(STD(col_float_signed))
|
||||
q13 8371470
|
||||
SELECT 'q14', floor(STD(col_float_unsigned)) FROM t1;
|
||||
q14 floor(STD(col_float_unsigned))
|
||||
q14 73
|
||||
SELECT 'q15', floor(STD(col_double_signed)) FROM t1;
|
||||
q15 floor(STD(col_double_signed))
|
||||
q15 2143234889
|
||||
SELECT 'q16', floor(STD(col_double_unsigned)) FROM t1;
|
||||
q16 floor(STD(col_double_unsigned))
|
||||
q16 73
|
||||
SELECT 'q17', floor(STDDEV_SAMP(col_signed)) FROM t1;
|
||||
q17 floor(STDDEV_SAMP(col_signed))
|
||||
q17 73
|
||||
SELECT 'q18', floor(STDDEV_SAMP(col_unsigned)) FROM t1;
|
||||
q18 floor(STDDEV_SAMP(col_unsigned))
|
||||
q18 73
|
||||
SELECT 'q19', floor(STDDEV_SAMP(col_small_signed)) FROM t1;
|
||||
q19 floor(STDDEV_SAMP(col_small_signed))
|
||||
q19 32063
|
||||
SELECT 'q20', floor(STDDEV_SAMP(col_small_unsigned)) FROM t1;
|
||||
q20 floor(STDDEV_SAMP(col_small_unsigned))
|
||||
q20 73
|
||||
SELECT 'q21', floor(STDDEV_SAMP(col_med_signed)) FROM t1;
|
||||
q21 floor(STDDEV_SAMP(col_med_signed))
|
||||
q21 8387998
|
||||
SELECT 'q22', floor(STDDEV_SAMP(col_med_unsigned)) FROM t1;
|
||||
q22 floor(STDDEV_SAMP(col_med_unsigned))
|
||||
q22 73
|
||||
SELECT 'q23', floor(STDDEV_SAMP(col_int_signed)) FROM t1;
|
||||
q23 floor(STDDEV_SAMP(col_int_signed))
|
||||
q23 2147466354
|
||||
SELECT 'q24', floor(STDDEV_SAMP(col_int_unsigned)) FROM t1;
|
||||
q24 floor(STDDEV_SAMP(col_int_unsigned))
|
||||
q24 73
|
||||
SELECT 'q25', floor(STDDEV_SAMP(col_big_signed)) FROM t1;
|
||||
q25 floor(STDDEV_SAMP(col_big_signed))
|
||||
q25 9223300272764652000
|
||||
SELECT 'q26', floor(STDDEV_SAMP(col_big_unsigned)) FROM t1;
|
||||
q26 floor(STDDEV_SAMP(col_big_unsigned))
|
||||
q26 73
|
||||
SELECT 'q27', floor(STDDEV_SAMP(col_dec_signed)) FROM t1;
|
||||
q27 floor(STDDEV_SAMP(col_dec_signed))
|
||||
q27 799993775457406500000000000000000000
|
||||
SELECT 'q29', floor(STDDEV_SAMP(col_float_signed)) FROM t1;
|
||||
q29 floor(STDDEV_SAMP(col_float_signed))
|
||||
q29 8387998
|
||||
SELECT 'q30', floor(STDDEV_SAMP(col_float_unsigned)) FROM t1;
|
||||
q30 floor(STDDEV_SAMP(col_float_unsigned))
|
||||
q30 73
|
||||
SELECT 'q31', floor(STDDEV_SAMP(col_double_signed)) FROM t1;
|
||||
q31 floor(STDDEV_SAMP(col_double_signed))
|
||||
q31 2147466354
|
||||
SELECT 'q32', floor(STDDEV_SAMP(col_double_unsigned)) FROM t1;
|
||||
q32 floor(STDDEV_SAMP(col_double_unsigned))
|
||||
q32 73
|
||||
SELECT 'q33', floor(VAR_POP(col_signed)) FROM t1;
|
||||
q33 floor(VAR_POP(col_signed))
|
||||
q33 5376
|
||||
SELECT 'q34', floor(VAR_POP(col_unsigned)) FROM t1;
|
||||
q34 floor(VAR_POP(col_unsigned))
|
||||
q34 5376
|
||||
SELECT 'q35', floor(VAR_POP(col_small_signed)) FROM t1;
|
||||
q35 floor(VAR_POP(col_small_signed))
|
||||
q35 1024021882
|
||||
SELECT 'q36', floor(VAR_POP(col_small_unsigned)) FROM t1;
|
||||
q36 floor(VAR_POP(col_small_unsigned))
|
||||
q36 5376
|
||||
SELECT 'q37', floor(VAR_POP(col_med_signed)) FROM t1;
|
||||
q37 floor(VAR_POP(col_med_signed))
|
||||
q37 70081516547007
|
||||
SELECT 'q38', floor(VAR_POP(col_med_unsigned)) FROM t1;
|
||||
q38 floor(VAR_POP(col_med_unsigned))
|
||||
q38 5376
|
||||
SELECT 'q39', floor(VAR_POP(col_int_signed)) FROM t1;
|
||||
q39 floor(VAR_POP(col_int_signed))
|
||||
q39 4593455793567983000
|
||||
SELECT 'q40', floor(VAR_POP(col_int_unsigned)) FROM t1;
|
||||
q40 floor(VAR_POP(col_int_unsigned))
|
||||
q40 5376
|
||||
SELECT 'q41', floor(VAR_POP(col_big_signed)) FROM t1;
|
||||
q41 floor(VAR_POP(col_big_signed))
|
||||
q41 84734349543936475000000000000000000000
|
||||
SELECT 'q42', floor(VAR_POP(col_big_unsigned)) FROM t1;
|
||||
q42 floor(VAR_POP(col_big_unsigned))
|
||||
q42 5376
|
||||
SELECT 'q43', floor(VAR_POP(col_dec_signed)) FROM t1;
|
||||
q43 floor(VAR_POP(col_dec_signed))
|
||||
q43 637470394940789800000000000000000000000000000000000000000000000000000000
|
||||
SELECT 'q45', floor(VAR_POP(col_float_signed)) FROM t1;
|
||||
q45 floor(VAR_POP(col_float_signed))
|
||||
q45 70081516546971
|
||||
SELECT 'q46', floor(VAR_POP(col_float_unsigned)) FROM t1;
|
||||
q46 floor(VAR_POP(col_float_unsigned))
|
||||
q46 5376
|
||||
SELECT 'q47', floor(VAR_POP(col_double_signed)) FROM t1;
|
||||
q47 floor(VAR_POP(col_double_signed))
|
||||
q47 4593455793567983000
|
||||
SELECT 'q48', floor(VAR_POP(col_double_unsigned)) FROM t1;
|
||||
q48 floor(VAR_POP(col_double_unsigned))
|
||||
q48 5376
|
||||
SELECT 'q49', floor(VAR_SAMP(col_signed)) FROM t1;
|
||||
q49 floor(VAR_SAMP(col_signed))
|
||||
q49 5397
|
||||
SELECT 'q50', floor(VAR_SAMP(col_unsigned)) FROM t1;
|
||||
q50 floor(VAR_SAMP(col_unsigned))
|
||||
q50 5397
|
||||
SELECT 'q51', floor(VAR_SAMP(col_small_signed)) FROM t1;
|
||||
q51 floor(VAR_SAMP(col_small_signed))
|
||||
q51 1028069399
|
||||
SELECT 'q52', floor(VAR_SAMP(col_small_unsigned)) FROM t1;
|
||||
q52 floor(VAR_SAMP(col_small_unsigned))
|
||||
q52 5397
|
||||
SELECT 'q53', floor(VAR_SAMP(col_med_signed)) FROM t1;
|
||||
q53 floor(VAR_SAMP(col_med_signed))
|
||||
q53 70358518588695
|
||||
SELECT 'q54', floor(VAR_SAMP(col_med_unsigned)) FROM t1;
|
||||
q54 floor(VAR_SAMP(col_med_unsigned))
|
||||
q54 5397
|
||||
SELECT 'q55', floor(VAR_SAMP(col_int_signed)) FROM t1;
|
||||
q55 floor(VAR_SAMP(col_int_signed))
|
||||
q55 4611611745321216000
|
||||
SELECT 'q56', floor(VAR_SAMP(col_int_unsigned)) FROM t1;
|
||||
q56 floor(VAR_SAMP(col_int_unsigned))
|
||||
q56 5397
|
||||
SELECT 'q57', floor(VAR_SAMP(col_big_signed)) FROM t1;
|
||||
q57 floor(VAR_SAMP(col_big_signed))
|
||||
q57 85069267921580490000000000000000000000
|
||||
SELECT 'q58', floor(VAR_SAMP(col_big_unsigned)) FROM t1;
|
||||
q58 floor(VAR_SAMP(col_big_unsigned))
|
||||
q58 5397
|
||||
SELECT 'q59', floor(VAR_SAMP(col_dec_signed)) FROM t1;
|
||||
q59 floor(VAR_SAMP(col_dec_signed))
|
||||
q59 639990040770595400000000000000000000000000000000000000000000000000000000
|
||||
SELECT 'q61', floor(VAR_SAMP(col_float_signed)) FROM t1;
|
||||
q61 floor(VAR_SAMP(col_float_signed))
|
||||
q61 70358518588659
|
||||
SELECT 'q62', floor(VAR_SAMP(col_float_unsigned)) FROM t1;
|
||||
q62 floor(VAR_SAMP(col_float_unsigned))
|
||||
q62 5397
|
||||
SELECT 'q63', floor(VAR_SAMP(col_double_signed)) FROM t1;
|
||||
q63 floor(VAR_SAMP(col_double_signed))
|
||||
q63 4611611745321216000
|
||||
SELECT 'q64', floor(VAR_SAMP(col_double_unsigned)) FROM t1;
|
||||
q64 floor(VAR_SAMP(col_double_unsigned))
|
||||
q64 5397
|
||||
DROP DATABASE std_test_db;
|
15397
mysql-test/columnstore/basic/r/std_aggregate_window_columnstore.result
Normal file
15397
mysql-test/columnstore/basic/r/std_aggregate_window_columnstore.result
Normal file
File diff suppressed because it is too large
Load Diff
@ -33,7 +33,7 @@ q5 floor(STD(u_custKey))
|
||||
q5 6749
|
||||
SELECT 'q6', floor(STD(u_bigcustKey)) FROM customer;
|
||||
q6 floor(STD(u_bigcustKey))
|
||||
q6 6688
|
||||
q6 6749
|
||||
SELECT 'q7', AVG(u_custKey) FROM customer;
|
||||
q7 AVG(u_custKey)
|
||||
q7 4294007575.1667
|
||||
|
118
mysql-test/columnstore/basic/t/std_aggregate_columnstore.test
Normal file
118
mysql-test/columnstore/basic/t/std_aggregate_columnstore.test
Normal file
@ -0,0 +1,118 @@
|
||||
-- source ../include/have_columnstore.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP DATABASE IF EXISTS std_test_db;
|
||||
--enable_warnings
|
||||
|
||||
CREATE DATABASE std_test_db;
|
||||
USE std_test_db;
|
||||
|
||||
create table t1 (
|
||||
col_signed tinyint,
|
||||
col_unsigned tinyint unsigned
|
||||
)engine=columnstore;
|
||||
|
||||
--replace_result $MTR_SUITE_DIR MTR_SUITE_DIR
|
||||
--eval LOAD DATA LOCAL infile '$MTR_SUITE_DIR/../std_data/tinyint_range.tbl' INTO TABLE t1 FIELDS TERMINATED BY '|';
|
||||
|
||||
ALTER TABLE t1 ADD COLUMN col_small_signed SMALLINT;
|
||||
ALTER TABLE t1 ADD COLUMN col_small_unsigned SMALLINT UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_med_signed MEDIUMINT;
|
||||
ALTER TABLE t1 ADD COLUMN col_med_unsigned MEDIUMINT UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_int_signed INT;
|
||||
ALTER TABLE t1 ADD COLUMN col_int_unsigned INT UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_big_signed BIGINT;
|
||||
ALTER TABLE t1 ADD COLUMN col_big_unsigned BIGINT UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_dec_signed DECIMAL(38,0);
|
||||
ALTER TABLE t1 ADD COLUMN col_dec_unsigned DECIMAL(38,0) UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_float_signed FLOAT;
|
||||
ALTER TABLE t1 ADD COLUMN col_float_unsigned FLOAT UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_double_signed DOUBLE;
|
||||
ALTER TABLE t1 ADD COLUMN col_double_unsigned DOUBLE UNSIGNED;
|
||||
|
||||
UPDATE t1 SET col_small_signed=col_signed + sign(col_signed) * 32000;
|
||||
UPDATE t1 SET col_small_unsigned=col_unsigned + 65000;
|
||||
UPDATE t1 SET col_med_signed=col_signed + sign(col_signed) * 8388000;
|
||||
UPDATE t1 SET col_med_unsigned=col_unsigned + 16776000;
|
||||
UPDATE t1 SET col_int_signed=col_signed + sign(col_signed) * 2147483000;
|
||||
UPDATE t1 SET col_int_unsigned=col_unsigned + 4294000000;
|
||||
UPDATE t1 SET col_big_signed=col_signed + sign(col_signed) * 9223372036854775000;
|
||||
UPDATE t1 SET col_big_unsigned=col_unsigned + 18446744073709551000;
|
||||
UPDATE t1 SET col_dec_signed=col_signed + sign(col_signed) * 800000000000000000000000000000000001;
|
||||
UPDATE t1 SET col_dec_unsigned=col_unsigned + 800000000000000000000000000000000003;
|
||||
UPDATE t1 SET col_float_signed=col_signed + 0.637 + sign(col_signed) * 8388000;
|
||||
UPDATE t1 SET col_float_unsigned=col_unsigned + 0.637 + 16776000;
|
||||
UPDATE t1 SET col_double_signed=col_signed + 0.637 + sign(col_signed) * 2147483000;
|
||||
UPDATE t1 SET col_double_unsigned=col_unsigned + 0.637 + 4294000000;
|
||||
|
||||
|
||||
SELECT 'q1', floor(STD(col_signed)) FROM t1;
|
||||
SELECT 'q2', floor(STD(col_unsigned)) FROM t1;
|
||||
SELECT 'q3', floor(STD(col_small_signed)) FROM t1;
|
||||
SELECT 'q4', floor(STD(col_small_unsigned)) FROM t1;
|
||||
SELECT 'q5', floor(STD(col_med_signed)) FROM t1;
|
||||
SELECT 'q6', floor(STD(col_med_unsigned)) FROM t1;
|
||||
SELECT 'q7', floor(STD(col_int_signed)) FROM t1;
|
||||
SELECT 'q8', floor(STD(col_int_unsigned)) FROM t1;
|
||||
SELECT 'q9', floor(STD(col_big_signed)) FROM t1;
|
||||
SELECT 'q10', floor(STD(col_big_unsigned)) FROM t1;
|
||||
SELECT 'q11', floor(STD(col_dec_signed)) FROM t1;
|
||||
SELECT 'q13', floor(STD(col_float_signed)) FROM t1;
|
||||
SELECT 'q14', floor(STD(col_float_unsigned)) FROM t1;
|
||||
SELECT 'q15', floor(STD(col_double_signed)) FROM t1;
|
||||
SELECT 'q16', floor(STD(col_double_unsigned)) FROM t1;
|
||||
|
||||
|
||||
SELECT 'q17', floor(STDDEV_SAMP(col_signed)) FROM t1;
|
||||
SELECT 'q18', floor(STDDEV_SAMP(col_unsigned)) FROM t1;
|
||||
SELECT 'q19', floor(STDDEV_SAMP(col_small_signed)) FROM t1;
|
||||
SELECT 'q20', floor(STDDEV_SAMP(col_small_unsigned)) FROM t1;
|
||||
SELECT 'q21', floor(STDDEV_SAMP(col_med_signed)) FROM t1;
|
||||
SELECT 'q22', floor(STDDEV_SAMP(col_med_unsigned)) FROM t1;
|
||||
SELECT 'q23', floor(STDDEV_SAMP(col_int_signed)) FROM t1;
|
||||
SELECT 'q24', floor(STDDEV_SAMP(col_int_unsigned)) FROM t1;
|
||||
SELECT 'q25', floor(STDDEV_SAMP(col_big_signed)) FROM t1;
|
||||
SELECT 'q26', floor(STDDEV_SAMP(col_big_unsigned)) FROM t1;
|
||||
SELECT 'q27', floor(STDDEV_SAMP(col_dec_signed)) FROM t1;
|
||||
SELECT 'q29', floor(STDDEV_SAMP(col_float_signed)) FROM t1;
|
||||
SELECT 'q30', floor(STDDEV_SAMP(col_float_unsigned)) FROM t1;
|
||||
SELECT 'q31', floor(STDDEV_SAMP(col_double_signed)) FROM t1;
|
||||
SELECT 'q32', floor(STDDEV_SAMP(col_double_unsigned)) FROM t1;
|
||||
|
||||
SELECT 'q33', floor(VAR_POP(col_signed)) FROM t1;
|
||||
SELECT 'q34', floor(VAR_POP(col_unsigned)) FROM t1;
|
||||
SELECT 'q35', floor(VAR_POP(col_small_signed)) FROM t1;
|
||||
SELECT 'q36', floor(VAR_POP(col_small_unsigned)) FROM t1;
|
||||
SELECT 'q37', floor(VAR_POP(col_med_signed)) FROM t1;
|
||||
SELECT 'q38', floor(VAR_POP(col_med_unsigned)) FROM t1;
|
||||
SELECT 'q39', floor(VAR_POP(col_int_signed)) FROM t1;
|
||||
SELECT 'q40', floor(VAR_POP(col_int_unsigned)) FROM t1;
|
||||
SELECT 'q41', floor(VAR_POP(col_big_signed)) FROM t1;
|
||||
SELECT 'q42', floor(VAR_POP(col_big_unsigned)) FROM t1;
|
||||
SELECT 'q43', floor(VAR_POP(col_dec_signed)) FROM t1;
|
||||
SELECT 'q45', floor(VAR_POP(col_float_signed)) FROM t1;
|
||||
SELECT 'q46', floor(VAR_POP(col_float_unsigned)) FROM t1;
|
||||
SELECT 'q47', floor(VAR_POP(col_double_signed)) FROM t1;
|
||||
SELECT 'q48', floor(VAR_POP(col_double_unsigned)) FROM t1;
|
||||
|
||||
|
||||
SELECT 'q49', floor(VAR_SAMP(col_signed)) FROM t1;
|
||||
SELECT 'q50', floor(VAR_SAMP(col_unsigned)) FROM t1;
|
||||
SELECT 'q51', floor(VAR_SAMP(col_small_signed)) FROM t1;
|
||||
SELECT 'q52', floor(VAR_SAMP(col_small_unsigned)) FROM t1;
|
||||
SELECT 'q53', floor(VAR_SAMP(col_med_signed)) FROM t1;
|
||||
SELECT 'q54', floor(VAR_SAMP(col_med_unsigned)) FROM t1;
|
||||
SELECT 'q55', floor(VAR_SAMP(col_int_signed)) FROM t1;
|
||||
SELECT 'q56', floor(VAR_SAMP(col_int_unsigned)) FROM t1;
|
||||
SELECT 'q57', floor(VAR_SAMP(col_big_signed)) FROM t1;
|
||||
SELECT 'q58', floor(VAR_SAMP(col_big_unsigned)) FROM t1;
|
||||
SELECT 'q59', floor(VAR_SAMP(col_dec_signed)) FROM t1;
|
||||
SELECT 'q61', floor(VAR_SAMP(col_float_signed)) FROM t1;
|
||||
SELECT 'q62', floor(VAR_SAMP(col_float_unsigned)) FROM t1;
|
||||
SELECT 'q63', floor(VAR_SAMP(col_double_signed)) FROM t1;
|
||||
SELECT 'q64', floor(VAR_SAMP(col_double_unsigned)) FROM t1;
|
||||
|
||||
|
||||
|
||||
# Clean UP
|
||||
DROP DATABASE std_test_db;
|
@ -0,0 +1,117 @@
|
||||
-- source ../include/have_columnstore.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP DATABASE IF EXISTS std_test_db;
|
||||
--enable_warnings
|
||||
|
||||
CREATE DATABASE std_test_db;
|
||||
USE std_test_db;
|
||||
|
||||
create table t1 (
|
||||
col_signed tinyint,
|
||||
col_unsigned tinyint unsigned
|
||||
)engine=columnstore;
|
||||
|
||||
--replace_result $MTR_SUITE_DIR MTR_SUITE_DIR
|
||||
--eval LOAD DATA LOCAL infile '$MTR_SUITE_DIR/../std_data/tinyint_range.tbl' INTO TABLE t1 FIELDS TERMINATED BY '|';
|
||||
|
||||
ALTER TABLE t1 ADD COLUMN col_small_signed SMALLINT;
|
||||
ALTER TABLE t1 ADD COLUMN col_small_unsigned SMALLINT UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_med_signed MEDIUMINT;
|
||||
ALTER TABLE t1 ADD COLUMN col_med_unsigned MEDIUMINT UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_int_signed INT;
|
||||
ALTER TABLE t1 ADD COLUMN col_int_unsigned INT UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_big_signed BIGINT;
|
||||
ALTER TABLE t1 ADD COLUMN col_big_unsigned BIGINT UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_dec_signed DECIMAL(38,0);
|
||||
ALTER TABLE t1 ADD COLUMN col_dec_unsigned DECIMAL(38,0) UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_float_signed FLOAT;
|
||||
ALTER TABLE t1 ADD COLUMN col_float_unsigned FLOAT UNSIGNED;
|
||||
ALTER TABLE t1 ADD COLUMN col_double_signed DOUBLE;
|
||||
ALTER TABLE t1 ADD COLUMN col_double_unsigned DOUBLE UNSIGNED;
|
||||
|
||||
UPDATE t1 SET col_small_signed=col_signed + sign(col_signed) * 32000;
|
||||
UPDATE t1 SET col_small_unsigned=col_unsigned + 65000;
|
||||
UPDATE t1 SET col_med_signed=col_signed + sign(col_signed) * 8388000;
|
||||
UPDATE t1 SET col_med_unsigned=col_unsigned + 16776000;
|
||||
UPDATE t1 SET col_int_signed=col_signed + sign(col_signed) * 2147483000;
|
||||
UPDATE t1 SET col_int_unsigned=col_unsigned + 4294000000;
|
||||
UPDATE t1 SET col_big_signed=col_signed + sign(col_signed) * 9223372036854775000;
|
||||
UPDATE t1 SET col_big_unsigned=col_unsigned + 18446744073709551000;
|
||||
UPDATE t1 SET col_dec_signed=col_signed + sign(col_signed) * 800000000000000000000000000000000001;
|
||||
UPDATE t1 SET col_dec_unsigned=col_unsigned + 800000000000000000000000000000000003;
|
||||
UPDATE t1 SET col_float_signed=col_signed + 0.637 + sign(col_signed) * 8388000;
|
||||
UPDATE t1 SET col_float_unsigned=col_unsigned + 0.637 + 16776000;
|
||||
UPDATE t1 SET col_double_signed=col_signed + 0.637 + sign(col_signed) * 2147483000;
|
||||
UPDATE t1 SET col_double_unsigned=col_unsigned + 0.637 + 4294000000;
|
||||
|
||||
|
||||
SELECT 'q1', floor(STD(col_signed) OVER ()) AS std FROM t1;
|
||||
SELECT 'q2', floor(STD(col_unsigned) OVER ()) AS std FROM t1;
|
||||
SELECT 'q3', floor(STD(col_small_signed) OVER ()) AS std FROM t1;
|
||||
SELECT 'q4', floor(STD(col_small_unsigned) OVER ()) AS std FROM t1;
|
||||
SELECT 'q5', floor(STD(col_med_signed) OVER ()) AS std FROM t1;
|
||||
SELECT 'q6', floor(STD(col_med_unsigned) OVER ()) AS std FROM t1;
|
||||
SELECT 'q7', floor(STD(col_int_signed) OVER ()) AS std FROM t1;
|
||||
SELECT 'q8', floor(STD(col_int_unsigned) OVER ()) AS std FROM t1;
|
||||
SELECT 'q9', floor(STD(col_big_signed) OVER ()) AS std FROM t1;
|
||||
SELECT 'q10', floor(STD(col_big_unsigned) OVER ()) AS std FROM t1;
|
||||
SELECT 'q11', floor(STD(col_dec_signed) OVER ()) AS std FROM t1;
|
||||
SELECT 'q13', floor(STD(col_float_signed) OVER ()) AS std FROM t1;
|
||||
SELECT 'q14', floor(STD(col_float_unsigned) OVER ()) AS std FROM t1;
|
||||
SELECT 'q15', floor(STD(col_double_signed) OVER ()) AS std FROM t1;
|
||||
SELECT 'q16', floor(STD(col_double_unsigned) OVER ()) AS std FROM t1;
|
||||
|
||||
|
||||
SELECT 'q17', floor(STDDEV_SAMP(col_signed) OVER ()) AS std_samp FROM t1;
|
||||
SELECT 'q18', floor(STDDEV_SAMP(col_unsigned) OVER ()) AS std_samp FROM t1;
|
||||
SELECT 'q19', floor(STDDEV_SAMP(col_small_signed) OVER ()) AS std_samp FROM t1;
|
||||
SELECT 'q20', floor(STDDEV_SAMP(col_small_unsigned) OVER ()) AS std_samp FROM t1;
|
||||
SELECT 'q21', floor(STDDEV_SAMP(col_med_signed) OVER ()) AS std_samp FROM t1;
|
||||
SELECT 'q22', floor(STDDEV_SAMP(col_med_unsigned) OVER ()) AS std_samp FROM t1;
|
||||
SELECT 'q23', floor(STDDEV_SAMP(col_int_signed) OVER ()) AS std_samp FROM t1;
|
||||
SELECT 'q24', floor(STDDEV_SAMP(col_int_unsigned) OVER ()) AS std_samp FROM t1;
|
||||
SELECT 'q25', floor(STDDEV_SAMP(col_big_signed) OVER ()) AS std_samp FROM t1;
|
||||
SELECT 'q26', floor(STDDEV_SAMP(col_big_unsigned) OVER ()) AS std_samp FROM t1;
|
||||
SELECT 'q27', floor(STDDEV_SAMP(col_dec_signed) OVER ()) AS std_samp FROM t1;
|
||||
SELECT 'q29', floor(STDDEV_SAMP(col_float_signed) OVER ()) AS std_samp FROM t1;
|
||||
SELECT 'q30', floor(STDDEV_SAMP(col_float_unsigned) OVER ()) AS std_samp FROM t1;
|
||||
SELECT 'q31', floor(STDDEV_SAMP(col_double_signed) OVER ()) AS std_samp FROM t1;
|
||||
SELECT 'q32', floor(STDDEV_SAMP(col_double_unsigned) OVER ()) AS std_samp FROM t1;
|
||||
|
||||
SELECT 'q33', floor(VAR_POP(col_signed) OVER()) AS var FROM t1;
|
||||
SELECT 'q34', floor(VAR_POP(col_unsigned) OVER()) AS var FROM t1;
|
||||
SELECT 'q35', floor(VAR_POP(col_small_signed) OVER()) AS var FROM t1;
|
||||
SELECT 'q36', floor(VAR_POP(col_small_unsigned) OVER()) AS var FROM t1;
|
||||
SELECT 'q37', floor(VAR_POP(col_med_signed) OVER()) AS var FROM t1;
|
||||
SELECT 'q38', floor(VAR_POP(col_med_unsigned) OVER()) AS var FROM t1;
|
||||
SELECT 'q39', floor(VAR_POP(col_int_signed) OVER()) AS var FROM t1;
|
||||
SELECT 'q40', floor(VAR_POP(col_int_unsigned) OVER()) AS var FROM t1;
|
||||
SELECT 'q41', floor(VAR_POP(col_big_signed) OVER()) AS var FROM t1;
|
||||
SELECT 'q42', floor(VAR_POP(col_big_unsigned) OVER()) AS var FROM t1;
|
||||
SELECT 'q43', floor(VAR_POP(col_dec_signed) OVER()) AS var FROM t1;
|
||||
SELECT 'q45', floor(VAR_POP(col_float_signed) OVER()) AS var FROM t1;
|
||||
SELECT 'q46', floor(VAR_POP(col_float_unsigned) OVER()) AS var FROM t1;
|
||||
SELECT 'q47', floor(VAR_POP(col_double_signed) OVER()) AS var FROM t1;
|
||||
SELECT 'q48', floor(VAR_POP(col_double_unsigned) OVER()) AS var FROM t1;
|
||||
|
||||
|
||||
SELECT 'q49', floor(VAR_SAMP(col_signed) OVER()) AS var_samp FROM t1;
|
||||
SELECT 'q50', floor(VAR_SAMP(col_unsigned) OVER()) AS var_samp FROM t1;
|
||||
SELECT 'q51', floor(VAR_SAMP(col_small_signed) OVER()) AS var_samp FROM t1;
|
||||
SELECT 'q52', floor(VAR_SAMP(col_small_unsigned) OVER()) AS var_samp FROM t1;
|
||||
SELECT 'q53', floor(VAR_SAMP(col_med_signed) OVER()) AS var_samp FROM t1;
|
||||
SELECT 'q54', floor(VAR_SAMP(col_med_unsigned) OVER()) AS var_samp FROM t1;
|
||||
SELECT 'q55', floor(VAR_SAMP(col_int_signed) OVER()) AS var_samp FROM t1;
|
||||
SELECT 'q56', floor(VAR_SAMP(col_int_unsigned) OVER()) AS var_samp FROM t1;
|
||||
SELECT 'q57', floor(VAR_SAMP(col_big_signed) OVER()) AS var_samp FROM t1;
|
||||
SELECT 'q58', floor(VAR_SAMP(col_big_unsigned) OVER()) AS var_samp FROM t1;
|
||||
SELECT 'q59', floor(VAR_SAMP(col_dec_signed) OVER()) AS var_samp FROM t1;
|
||||
SELECT 'q61', floor(VAR_SAMP(col_float_signed) OVER()) AS var_samp FROM t1;
|
||||
SELECT 'q62', floor(VAR_SAMP(col_float_unsigned) OVER()) AS var_samp FROM t1;
|
||||
SELECT 'q63', floor(VAR_SAMP(col_double_signed) OVER()) AS var_samp FROM t1;
|
||||
SELECT 'q64', floor(VAR_SAMP(col_double_unsigned) OVER()) AS var_samp FROM t1;
|
||||
|
||||
|
||||
# Clean UP
|
||||
DROP DATABASE std_test_db;
|
254
mysql-test/columnstore/std_data/tinyint_range.tbl
Normal file
254
mysql-test/columnstore/std_data/tinyint_range.tbl
Normal file
@ -0,0 +1,254 @@
|
||||
-126|0
|
||||
-125|1
|
||||
-124|2
|
||||
-123|3
|
||||
-122|4
|
||||
-121|5
|
||||
-120|6
|
||||
-119|7
|
||||
-118|8
|
||||
-117|9
|
||||
-116|10
|
||||
-115|11
|
||||
-114|12
|
||||
-113|13
|
||||
-112|14
|
||||
-111|15
|
||||
-110|16
|
||||
-109|17
|
||||
-108|18
|
||||
-107|19
|
||||
-106|20
|
||||
-105|21
|
||||
-104|22
|
||||
-103|23
|
||||
-102|24
|
||||
-101|25
|
||||
-100|26
|
||||
-99|27
|
||||
-98|28
|
||||
-97|29
|
||||
-96|30
|
||||
-95|31
|
||||
-94|32
|
||||
-93|33
|
||||
-92|34
|
||||
-91|35
|
||||
-90|36
|
||||
-89|37
|
||||
-88|38
|
||||
-87|39
|
||||
-86|40
|
||||
-85|41
|
||||
-84|42
|
||||
-83|43
|
||||
-82|44
|
||||
-81|45
|
||||
-80|46
|
||||
-79|47
|
||||
-78|48
|
||||
-77|49
|
||||
-76|50
|
||||
-75|51
|
||||
-74|52
|
||||
-73|53
|
||||
-72|54
|
||||
-71|55
|
||||
-70|56
|
||||
-69|57
|
||||
-68|58
|
||||
-67|59
|
||||
-66|60
|
||||
-65|61
|
||||
-64|62
|
||||
-63|63
|
||||
-62|64
|
||||
-61|65
|
||||
-60|66
|
||||
-59|67
|
||||
-58|68
|
||||
-57|69
|
||||
-56|70
|
||||
-55|71
|
||||
-54|72
|
||||
-53|73
|
||||
-52|74
|
||||
-51|75
|
||||
-50|76
|
||||
-49|77
|
||||
-48|78
|
||||
-47|79
|
||||
-46|80
|
||||
-45|81
|
||||
-44|82
|
||||
-43|83
|
||||
-42|84
|
||||
-41|85
|
||||
-40|86
|
||||
-39|87
|
||||
-38|88
|
||||
-37|89
|
||||
-36|90
|
||||
-35|91
|
||||
-34|92
|
||||
-33|93
|
||||
-32|94
|
||||
-31|95
|
||||
-30|96
|
||||
-29|97
|
||||
-28|98
|
||||
-27|99
|
||||
-26|100
|
||||
-25|101
|
||||
-24|102
|
||||
-23|103
|
||||
-22|104
|
||||
-21|105
|
||||
-20|106
|
||||
-19|107
|
||||
-18|108
|
||||
-17|109
|
||||
-16|110
|
||||
-15|111
|
||||
-14|112
|
||||
-13|113
|
||||
-12|114
|
||||
-11|115
|
||||
-10|116
|
||||
-9|117
|
||||
-8|118
|
||||
-7|119
|
||||
-6|120
|
||||
-5|121
|
||||
-4|122
|
||||
-3|123
|
||||
-2|124
|
||||
-1|125
|
||||
0|126
|
||||
1|127
|
||||
2|128
|
||||
3|129
|
||||
4|130
|
||||
5|131
|
||||
6|132
|
||||
7|133
|
||||
8|134
|
||||
9|135
|
||||
10|136
|
||||
11|137
|
||||
12|138
|
||||
13|139
|
||||
14|140
|
||||
15|141
|
||||
16|142
|
||||
17|143
|
||||
18|144
|
||||
19|145
|
||||
20|146
|
||||
21|147
|
||||
22|148
|
||||
23|149
|
||||
24|150
|
||||
25|151
|
||||
26|152
|
||||
27|153
|
||||
28|154
|
||||
29|155
|
||||
30|156
|
||||
31|157
|
||||
32|158
|
||||
33|159
|
||||
34|160
|
||||
35|161
|
||||
36|162
|
||||
37|163
|
||||
38|164
|
||||
39|165
|
||||
40|166
|
||||
41|167
|
||||
42|168
|
||||
43|169
|
||||
44|170
|
||||
45|171
|
||||
46|172
|
||||
47|173
|
||||
48|174
|
||||
49|175
|
||||
50|176
|
||||
51|177
|
||||
52|178
|
||||
53|179
|
||||
54|180
|
||||
55|181
|
||||
56|182
|
||||
57|183
|
||||
58|184
|
||||
59|185
|
||||
60|186
|
||||
61|187
|
||||
62|188
|
||||
63|189
|
||||
64|190
|
||||
65|191
|
||||
66|192
|
||||
67|193
|
||||
68|194
|
||||
69|195
|
||||
70|196
|
||||
71|197
|
||||
72|198
|
||||
73|199
|
||||
74|200
|
||||
75|201
|
||||
76|202
|
||||
77|203
|
||||
78|204
|
||||
79|205
|
||||
80|206
|
||||
81|207
|
||||
82|208
|
||||
83|209
|
||||
84|210
|
||||
85|211
|
||||
86|212
|
||||
87|213
|
||||
88|214
|
||||
89|215
|
||||
90|216
|
||||
91|217
|
||||
92|218
|
||||
93|219
|
||||
94|220
|
||||
95|221
|
||||
96|222
|
||||
97|223
|
||||
98|224
|
||||
99|225
|
||||
100|226
|
||||
101|227
|
||||
102|228
|
||||
103|229
|
||||
104|230
|
||||
105|231
|
||||
106|232
|
||||
107|233
|
||||
108|234
|
||||
109|235
|
||||
110|236
|
||||
111|237
|
||||
112|238
|
||||
113|239
|
||||
114|240
|
||||
115|241
|
||||
116|242
|
||||
117|243
|
||||
118|244
|
||||
119|245
|
||||
120|246
|
||||
121|247
|
||||
122|248
|
||||
123|249
|
||||
124|250
|
||||
125|251
|
||||
126|252
|
||||
127|253
|
@ -12,11 +12,18 @@ running_systemd() {
|
||||
fi
|
||||
}
|
||||
|
||||
# This function recursively(up to PID 1) searches for
|
||||
# env_var_name in the environment variables list
|
||||
find_env_var() {
|
||||
env_var_name=$1
|
||||
pid=$$
|
||||
while [ -z "$ENV_VAR" -a "$pid" != 1 ]; do
|
||||
ENV_VAR=''
|
||||
while [ -z "$ENV_VAR" -a "$pid" != 1 ]; do
|
||||
ppid=$(ps -oppid -p$pid|tail -1|awk '{print $1}')
|
||||
# This condition is true in containers
|
||||
if [ "$ppid" == 0 ]; then
|
||||
break;
|
||||
fi
|
||||
env=$(strings /proc/$ppid/environ)
|
||||
ENV_VAR=$(echo "$env"|awk -F= "\$1 == \"$env_var_name\" { print \$2; }")
|
||||
pid=$ppid
|
||||
|
@ -16,4 +16,5 @@ LimitNPROC=65536
|
||||
ExecStart=@ENGINE_BINDIR@/DMLProc
|
||||
|
||||
Restart=on-failure
|
||||
TimeoutStopSec=2
|
||||
TimeoutStartSec=900
|
||||
TimeoutStopSec=900
|
||||
|
@ -98,7 +98,7 @@ if __name__ == '__main__':
|
||||
if use_systemd is True:
|
||||
cmd = 'systemctl start mcs-storagemanager'
|
||||
retcode = subprocess.call(cmd, shell=True)
|
||||
if retcode < 0:
|
||||
if retcode != 0:
|
||||
print('Failed to start storagemanager. \
|
||||
{} exits with {}.'.format(cmd, retcode))
|
||||
sys.exit(1)
|
||||
@ -219,7 +219,7 @@ brm_saves_current.decode("utf-8").replace("BRM_saves", ""), USER)
|
||||
brm_saves_current.decode("utf-8").replace("BRM_saves", ""))
|
||||
try:
|
||||
retcode = subprocess.call(cmd, shell=True)
|
||||
if retcode < 0:
|
||||
if retcode != 0:
|
||||
print('{} exits with {}.'.format(cmd, retcode))
|
||||
sys.exit(1)
|
||||
# systemd services by default works using mysql privileges.
|
||||
@ -230,3 +230,9 @@ brm_saves_current.decode("utf-8").replace("BRM_saves", ""))
|
||||
shutil.chown(shm_file, USER, GROUP)
|
||||
except OSError as e:
|
||||
sys.exit(1)
|
||||
else:
|
||||
if s3_enabled:
|
||||
print('brm_saves_currenty returned empty string from read_from_sm_with_retry')
|
||||
else:
|
||||
print('brm_saves_currenty returned empty string from read_from_disk')
|
||||
sys.exit(1)
|
||||
|
@ -15,6 +15,7 @@ ExecStopPost=@ENGINE_BINDIR@/mcs-savebrm.py
|
||||
ExecStopPost=/usr/bin/env bash -c "clearShm > /dev/null 2>&1"
|
||||
|
||||
Restart=on-failure
|
||||
TimeoutStopSec=120
|
||||
TimeoutStopSec=750
|
||||
|
||||
EnvironmentFile=-/etc/columnstore/systemd.env
|
||||
TimeoutStopSec=900
|
@ -140,6 +140,7 @@ BatchPrimitiveProcessor::BatchPrimitiveProcessor()
|
||||
, ptMask(0)
|
||||
, firstInstance(false)
|
||||
, valuesLBID(0)
|
||||
, weight_(0)
|
||||
{
|
||||
pp.setLogicalBlockMode(true);
|
||||
pp.setBlockPtr((int*)blockData);
|
||||
@ -193,6 +194,7 @@ BatchPrimitiveProcessor::BatchPrimitiveProcessor(ByteStream& b, double prefetch,
|
||||
// ptMask(processorThreads - 1),
|
||||
, firstInstance(true)
|
||||
, valuesLBID(0)
|
||||
, weight_(0)
|
||||
{
|
||||
// promote processorThreads to next power of 2. also need to change the name to bucketCount or similar
|
||||
processorThreads = nextPowOf2(processorThreads);
|
||||
@ -542,6 +544,7 @@ void BatchPrimitiveProcessor::resetBPP(ByteStream& bs, const SP_UM_MUTEX& w, con
|
||||
|
||||
// skip the header, sessionID, stepID, uniqueID, and priority
|
||||
bs.advance(sizeof(ISMPacketHeader) + 16);
|
||||
bs >> weight_;
|
||||
bs >> dbRoot;
|
||||
bs >> count;
|
||||
bs >> ridCount;
|
||||
|
@ -137,6 +137,11 @@ class BatchPrimitiveProcessor
|
||||
fBusy = b;
|
||||
}
|
||||
|
||||
size_t getWeight() const
|
||||
{
|
||||
return weight_;
|
||||
}
|
||||
|
||||
uint16_t FilterCount() const
|
||||
{
|
||||
return filterCount;
|
||||
@ -434,6 +439,9 @@ class BatchPrimitiveProcessor
|
||||
uint ptMask;
|
||||
bool firstInstance;
|
||||
uint64_t valuesLBID;
|
||||
uint32_t weight_;
|
||||
|
||||
static const uint64_t maxResultCount = 1048576; // 2^20
|
||||
|
||||
static const uint64_t maxResultCount = 1048576; // 2^20
|
||||
|
||||
|
@ -47,7 +47,7 @@
|
||||
|
||||
namespace primitiveprocessor
|
||||
{
|
||||
class BPPSeeder : public threadpool::PriorityThreadPool::Functor
|
||||
class BPPSeeder : public threadpool::FairThreadPool::Functor
|
||||
{
|
||||
public:
|
||||
BPPSeeder(const messageqcpp::SBS&, const SP_UM_MUTEX& wLock, const SP_UM_IOSOCK& ios, const int pmThreads,
|
||||
@ -71,6 +71,11 @@ class BPPSeeder : public threadpool::PriorityThreadPool::Functor
|
||||
{
|
||||
return _priority;
|
||||
}
|
||||
size_t getWeight() const
|
||||
{
|
||||
assert(bpp);
|
||||
return bpp->getWeight();
|
||||
}
|
||||
|
||||
private:
|
||||
BPPSeeder();
|
||||
|
@ -124,7 +124,7 @@ oam::OamCache* oamCache = oam::OamCache::makeOamCache();
|
||||
// FIXME: there is an anon ns burried later in between 2 named namespaces...
|
||||
namespace primitiveprocessor
|
||||
{
|
||||
boost::shared_ptr<threadpool::PriorityThreadPool> OOBPool;
|
||||
boost::shared_ptr<threadpool::FairThreadPool> OOBPool;
|
||||
|
||||
BlockRequestProcessor** BRPp;
|
||||
#ifndef _MSC_VER
|
||||
@ -1050,7 +1050,7 @@ using namespace primitiveprocessor;
|
||||
/** @brief The job type to process a dictionary scan (pDictionaryScan class on the UM)
|
||||
* TODO: Move this & the impl into different files
|
||||
*/
|
||||
class DictScanJob : public threadpool::PriorityThreadPool::Functor
|
||||
class DictScanJob : public threadpool::FairThreadPool::Functor
|
||||
{
|
||||
public:
|
||||
DictScanJob(SP_UM_IOSOCK ios, SBS bs, SP_UM_MUTEX writeLock);
|
||||
@ -1242,7 +1242,7 @@ struct BPPHandler
|
||||
scoped.unlock();
|
||||
}
|
||||
|
||||
struct BPPHandlerFunctor : public PriorityThreadPool::Functor
|
||||
struct BPPHandlerFunctor : public FairThreadPool::Functor
|
||||
{
|
||||
BPPHandlerFunctor(boost::shared_ptr<BPPHandler> r, SBS b) : bs(b)
|
||||
{
|
||||
@ -1710,7 +1710,7 @@ return 0;
|
||||
PrimitiveServer* fPrimitiveServerPtr;
|
||||
};
|
||||
|
||||
class DictionaryOp : public PriorityThreadPool::Functor
|
||||
class DictionaryOp : public FairThreadPool::Functor
|
||||
{
|
||||
public:
|
||||
DictionaryOp(SBS cmd) : bs(cmd)
|
||||
@ -1947,7 +1947,7 @@ struct ReadThread
|
||||
void operator()()
|
||||
{
|
||||
utils::setThreadName("PPReadThread");
|
||||
threadpool::PriorityThreadPool* procPoolPtr = fPrimitiveServerPtr->getProcessorThreadPool();
|
||||
threadpool::FairThreadPool* procPoolPtr = fPrimitiveServerPtr->getProcessorThreadPool();
|
||||
SBS bs;
|
||||
UmSocketSelector* pUmSocketSelector = UmSocketSelector::instance();
|
||||
|
||||
@ -2043,35 +2043,69 @@ struct ReadThread
|
||||
switch (ismHdr->Command)
|
||||
{
|
||||
case DICT_CREATE_EQUALITY_FILTER:
|
||||
{
|
||||
PriorityThreadPool::Job job;
|
||||
const uint8_t* buf = bs->buf();
|
||||
uint32_t pos = sizeof(ISMPacketHeader) - 2;
|
||||
job.stepID = *((uint32_t*)&buf[pos + 6]);
|
||||
job.uniqueID = *((uint32_t*)&buf[pos + 10]);
|
||||
job.sock = outIos;
|
||||
job.functor = boost::shared_ptr<PriorityThreadPool::Functor>(new CreateEqualityFilter(bs));
|
||||
OOBPool->addJob(job);
|
||||
break;
|
||||
}
|
||||
|
||||
case DICT_DESTROY_EQUALITY_FILTER:
|
||||
case BATCH_PRIMITIVE_CREATE:
|
||||
case BATCH_PRIMITIVE_ADD_JOINER:
|
||||
case BATCH_PRIMITIVE_END_JOINER:
|
||||
case BATCH_PRIMITIVE_DESTROY:
|
||||
case BATCH_PRIMITIVE_ABORT:
|
||||
{
|
||||
PriorityThreadPool::Job job;
|
||||
const uint8_t* buf = bs->buf();
|
||||
uint32_t pos = sizeof(ISMPacketHeader) - 2;
|
||||
job.stepID = *((uint32_t*)&buf[pos + 6]);
|
||||
job.uniqueID = *((uint32_t*)&buf[pos + 10]);
|
||||
job.sock = outIos;
|
||||
job.functor = boost::shared_ptr<PriorityThreadPool::Functor>(new DestroyEqualityFilter(bs));
|
||||
const uint32_t txnId = *((uint32_t*)&buf[pos + 2]);
|
||||
const uint32_t stepID = *((uint32_t*)&buf[pos + 6]);
|
||||
const uint32_t uniqueID = *((uint32_t*)&buf[pos + 10]);
|
||||
const uint32_t weight = 1;
|
||||
const uint32_t priority = 0;
|
||||
uint32_t id = 0;
|
||||
boost::shared_ptr<FairThreadPool::Functor> functor;
|
||||
if (ismHdr->Command == DICT_CREATE_EQUALITY_FILTER)
|
||||
{
|
||||
functor.reset(new CreateEqualityFilter(bs));
|
||||
}
|
||||
else if (ismHdr->Command == DICT_DESTROY_EQUALITY_FILTER)
|
||||
{
|
||||
functor.reset(new DestroyEqualityFilter(bs));
|
||||
}
|
||||
else if (ismHdr->Command == BATCH_PRIMITIVE_CREATE)
|
||||
{
|
||||
functor.reset(new BPPHandler::Create(fBPPHandler, bs));
|
||||
}
|
||||
else if (ismHdr->Command == BATCH_PRIMITIVE_ADD_JOINER)
|
||||
{
|
||||
functor.reset(new BPPHandler::AddJoiner(fBPPHandler, bs));
|
||||
}
|
||||
else if (ismHdr->Command == BATCH_PRIMITIVE_END_JOINER)
|
||||
{
|
||||
id = fBPPHandler->getUniqueID(bs, ismHdr->Command);
|
||||
functor.reset(new BPPHandler::LastJoiner(fBPPHandler, bs));
|
||||
}
|
||||
else if (ismHdr->Command == BATCH_PRIMITIVE_DESTROY)
|
||||
{
|
||||
functor.reset(new BPPHandler::Destroy(fBPPHandler, bs));
|
||||
}
|
||||
else if (ismHdr->Command == BATCH_PRIMITIVE_ABORT)
|
||||
{
|
||||
id = fBPPHandler->getUniqueID(bs, ismHdr->Command);
|
||||
functor.reset(new BPPHandler::Abort(fBPPHandler, bs));
|
||||
}
|
||||
FairThreadPool::Job job(uniqueID, stepID, txnId, functor, outIos, weight, priority, id);
|
||||
OOBPool->addJob(job);
|
||||
break;
|
||||
}
|
||||
|
||||
case DICT_TOKEN_BY_SCAN_COMPARE:
|
||||
case BATCH_PRIMITIVE_RUN:
|
||||
{
|
||||
idbassert(bs->length() >= sizeof(TokenByScanRequestHeader));
|
||||
TokenByScanRequestHeader* hdr = (TokenByScanRequestHeader*)ismHdr;
|
||||
TokenByScanRequestHeader* hdr = nullptr;
|
||||
boost::shared_ptr<FairThreadPool::Functor> functor;
|
||||
uint32_t id = 0;
|
||||
uint32_t weight = 0;
|
||||
uint32_t priority = 0;
|
||||
uint32_t txnId = 0;
|
||||
uint32_t stepID = 0;
|
||||
uint32_t uniqueID = 0;
|
||||
bool isSyscat = false;
|
||||
|
||||
if (bRotateDest)
|
||||
{
|
||||
@ -2089,23 +2123,41 @@ struct ReadThread
|
||||
}
|
||||
}
|
||||
|
||||
PriorityThreadPool::Job job;
|
||||
job.functor = boost::shared_ptr<DictScanJob>(new DictScanJob(outIos, bs, writeLock));
|
||||
job.id = hdr->Hdr.UniqueID;
|
||||
job.weight = LOGICAL_BLOCK_RIDS;
|
||||
job.priority = hdr->Hdr.Priority;
|
||||
const uint8_t* buf = bs->buf();
|
||||
uint32_t pos = sizeof(ISMPacketHeader) - 2;
|
||||
job.stepID = *((uint32_t*)&buf[pos + 6]);
|
||||
job.uniqueID = *((uint32_t*)&buf[pos + 10]);
|
||||
job.sock = outIos;
|
||||
|
||||
if (hdr->flags & IS_SYSCAT)
|
||||
if (ismHdr->Command == DICT_TOKEN_BY_SCAN_COMPARE)
|
||||
{
|
||||
idbassert(bs->length() >= sizeof(TokenByScanRequestHeader));
|
||||
hdr = (TokenByScanRequestHeader*)ismHdr;
|
||||
functor.reset(new DictScanJob(outIos, bs, writeLock));
|
||||
id = hdr->Hdr.UniqueID;
|
||||
weight = LOGICAL_BLOCK_RIDS;
|
||||
priority = hdr->Hdr.Priority;
|
||||
const uint8_t* buf = bs->buf();
|
||||
const uint32_t pos = sizeof(ISMPacketHeader) - 2;
|
||||
txnId = *((uint32_t*)&buf[pos + 2]);
|
||||
stepID = *((uint32_t*)&buf[pos + 6]);
|
||||
uniqueID = *((uint32_t*)&buf[pos + 10]);
|
||||
isSyscat = hdr->flags & IS_SYSCAT;
|
||||
}
|
||||
else if (ismHdr->Command == BATCH_PRIMITIVE_RUN)
|
||||
{
|
||||
functor.reset(new BPPSeeder(bs, writeLock, outIos,
|
||||
fPrimitiveServerPtr->ProcessorThreads(),
|
||||
fPrimitiveServerPtr->PTTrace()));
|
||||
BPPSeeder* bpps = dynamic_cast<BPPSeeder*>(functor.get());
|
||||
id = bpps->getID();
|
||||
priority = bpps->priority();
|
||||
const uint8_t* buf = bs->buf();
|
||||
const uint32_t pos = sizeof(ISMPacketHeader) - 2;
|
||||
txnId = *((uint32_t*)&buf[pos + 2]);
|
||||
stepID = *((uint32_t*)&buf[pos + 6]);
|
||||
uniqueID = *((uint32_t*)&buf[pos + 10]);
|
||||
weight = ismHdr->Size + *((uint32_t*)&buf[pos + 18]);
|
||||
isSyscat = bpps->isSysCat();
|
||||
}
|
||||
FairThreadPool::Job job(uniqueID, stepID, txnId, functor, outIos, weight, priority, id);
|
||||
|
||||
if (isSyscat)
|
||||
{
|
||||
// boost::thread t(DictScanJob(outIos, bs, writeLock));
|
||||
// using already-existing threads may cut latency
|
||||
// if it's changed back to running in an independent thread
|
||||
// change the issyscat() checks in BPPSeeder as well
|
||||
OOBPool->addJob(job);
|
||||
}
|
||||
else
|
||||
@ -2116,146 +2168,11 @@ struct ReadThread
|
||||
break;
|
||||
}
|
||||
|
||||
case BATCH_PRIMITIVE_RUN:
|
||||
{
|
||||
if (bRotateDest)
|
||||
{
|
||||
if (!pUmSocketSelector->nextIOSocket(fIos, outIos, writeLock))
|
||||
{
|
||||
// If we ever fall into this part of the
|
||||
// code we have a "bug" of some sort.
|
||||
// See handleUmSockSelErr() for more info.
|
||||
// We reset ios and mutex to defaults.
|
||||
handleUmSockSelErr(string("BPR cmd"));
|
||||
outIos = outIosDefault;
|
||||
writeLock = writeLockDefault;
|
||||
pUmSocketSelector->delConnection(fIos);
|
||||
bRotateDest = false;
|
||||
}
|
||||
}
|
||||
|
||||
/* Decide whether this is a syscat call and run
|
||||
right away instead of queueing */
|
||||
boost::shared_ptr<BPPSeeder> bpps(new BPPSeeder(bs, writeLock, outIos,
|
||||
fPrimitiveServerPtr->ProcessorThreads(),
|
||||
fPrimitiveServerPtr->PTTrace()));
|
||||
PriorityThreadPool::Job job;
|
||||
job.functor = bpps;
|
||||
job.id = bpps->getID();
|
||||
job.weight = ismHdr->Size;
|
||||
job.priority = bpps->priority();
|
||||
const uint8_t* buf = bs->buf();
|
||||
uint32_t pos = sizeof(ISMPacketHeader) - 2;
|
||||
job.stepID = *((uint32_t*)&buf[pos + 6]);
|
||||
job.uniqueID = *((uint32_t*)&buf[pos + 10]);
|
||||
job.sock = outIos;
|
||||
|
||||
if (bpps->isSysCat())
|
||||
{
|
||||
// boost::thread t(*bpps);
|
||||
// using already-existing threads may cut latency
|
||||
// if it's changed back to running in an independent thread
|
||||
// change the issyscat() checks in BPPSeeder as well
|
||||
OOBPool->addJob(job);
|
||||
}
|
||||
else
|
||||
{
|
||||
procPoolPtr->addJob(job);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case BATCH_PRIMITIVE_CREATE:
|
||||
{
|
||||
PriorityThreadPool::Job job;
|
||||
job.functor =
|
||||
boost::shared_ptr<PriorityThreadPool::Functor>(new BPPHandler::Create(fBPPHandler, bs));
|
||||
const uint8_t* buf = bs->buf();
|
||||
uint32_t pos = sizeof(ISMPacketHeader) - 2;
|
||||
job.stepID = *((uint32_t*)&buf[pos + 6]);
|
||||
job.uniqueID = *((uint32_t*)&buf[pos + 10]);
|
||||
job.sock = outIos;
|
||||
OOBPool->addJob(job);
|
||||
// fBPPHandler->createBPP(*bs);
|
||||
break;
|
||||
}
|
||||
|
||||
case BATCH_PRIMITIVE_ADD_JOINER:
|
||||
{
|
||||
PriorityThreadPool::Job job;
|
||||
job.functor =
|
||||
boost::shared_ptr<PriorityThreadPool::Functor>(new BPPHandler::AddJoiner(fBPPHandler, bs));
|
||||
job.id = fBPPHandler->getUniqueID(bs, ismHdr->Command);
|
||||
const uint8_t* buf = bs->buf();
|
||||
uint32_t pos = sizeof(ISMPacketHeader) - 2;
|
||||
job.stepID = *((uint32_t*)&buf[pos + 6]);
|
||||
job.uniqueID = *((uint32_t*)&buf[pos + 10]);
|
||||
job.sock = outIos;
|
||||
OOBPool->addJob(job);
|
||||
// fBPPHandler->addJoinerToBPP(*bs);
|
||||
break;
|
||||
}
|
||||
|
||||
case BATCH_PRIMITIVE_END_JOINER:
|
||||
{
|
||||
// lastJoinerMsg can block; must do this in a different thread
|
||||
// OOBPool->invoke(BPPHandler::LastJoiner(fBPPHandler, bs)); // needs a threadpool that can
|
||||
// resched boost::thread tmp(BPPHandler::LastJoiner(fBPPHandler, bs));
|
||||
PriorityThreadPool::Job job;
|
||||
job.functor =
|
||||
boost::shared_ptr<PriorityThreadPool::Functor>(new BPPHandler::LastJoiner(fBPPHandler, bs));
|
||||
job.id = fBPPHandler->getUniqueID(bs, ismHdr->Command);
|
||||
const uint8_t* buf = bs->buf();
|
||||
uint32_t pos = sizeof(ISMPacketHeader) - 2;
|
||||
job.stepID = *((uint32_t*)&buf[pos + 6]);
|
||||
job.uniqueID = *((uint32_t*)&buf[pos + 10]);
|
||||
job.sock = outIos;
|
||||
OOBPool->addJob(job);
|
||||
break;
|
||||
}
|
||||
|
||||
case BATCH_PRIMITIVE_DESTROY:
|
||||
{
|
||||
// OOBPool->invoke(BPPHandler::Destroy(fBPPHandler, bs)); // needs a threadpool that can
|
||||
// resched boost::thread tmp(BPPHandler::Destroy(fBPPHandler, bs));
|
||||
PriorityThreadPool::Job job;
|
||||
job.functor =
|
||||
boost::shared_ptr<PriorityThreadPool::Functor>(new BPPHandler::Destroy(fBPPHandler, bs));
|
||||
job.id = fBPPHandler->getUniqueID(bs, ismHdr->Command);
|
||||
const uint8_t* buf = bs->buf();
|
||||
uint32_t pos = sizeof(ISMPacketHeader) - 2;
|
||||
job.stepID = *((uint32_t*)&buf[pos + 6]);
|
||||
job.uniqueID = *((uint32_t*)&buf[pos + 10]);
|
||||
job.sock = outIos;
|
||||
OOBPool->addJob(job);
|
||||
// fBPPHandler->destroyBPP(*bs);
|
||||
break;
|
||||
}
|
||||
|
||||
case BATCH_PRIMITIVE_ACK:
|
||||
{
|
||||
fBPPHandler->doAck(*bs);
|
||||
break;
|
||||
}
|
||||
|
||||
case BATCH_PRIMITIVE_ABORT:
|
||||
{
|
||||
// OBPool->invoke(BPPHandler::Abort(fBPPHandler, bs));
|
||||
// fBPPHandler->doAbort(*bs);
|
||||
PriorityThreadPool::Job job;
|
||||
job.functor =
|
||||
boost::shared_ptr<PriorityThreadPool::Functor>(new BPPHandler::Abort(fBPPHandler, bs));
|
||||
job.id = fBPPHandler->getUniqueID(bs, ismHdr->Command);
|
||||
const uint8_t* buf = bs->buf();
|
||||
uint32_t pos = sizeof(ISMPacketHeader) - 2;
|
||||
job.stepID = *((uint32_t*)&buf[pos + 6]);
|
||||
job.uniqueID = *((uint32_t*)&buf[pos + 10]);
|
||||
job.sock = outIos;
|
||||
OOBPool->addJob(job);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
std::ostringstream os;
|
||||
@ -2405,12 +2322,12 @@ PrimitiveServer::PrimitiveServer(int serverThreads, int serverQueueSize, int pro
|
||||
fServerpool.setQueueSize(fServerQueueSize);
|
||||
fServerpool.setName("PrimitiveServer");
|
||||
|
||||
fProcessorPool = new threadpool::PriorityThreadPool(fProcessorWeight, highPriorityThreads,
|
||||
fProcessorPool = new threadpool::FairThreadPool(fProcessorWeight, highPriorityThreads,
|
||||
medPriorityThreads, lowPriorityThreads, 0);
|
||||
|
||||
// We're not using either the priority or the job-clustering features, just need a threadpool
|
||||
// that can reschedule jobs, and an unlimited non-blocking queue
|
||||
OOBPool.reset(new threadpool::PriorityThreadPool(1, 5, 0, 0, 1));
|
||||
OOBPool.reset(new threadpool::FairThreadPool(1, 5, 0, 0, 1));
|
||||
|
||||
asyncCounter = 0;
|
||||
|
||||
|
@ -37,6 +37,7 @@
|
||||
|
||||
#include "threadpool.h"
|
||||
#include "../../utils/threadpool/prioritythreadpool.h"
|
||||
#include "fair_threadpool.h"
|
||||
#include "messagequeue.h"
|
||||
#include "blockrequestprocessor.h"
|
||||
#include "batchprimitiveprocessor.h"
|
||||
@ -48,7 +49,7 @@ extern oam::OamCache* oamCache;
|
||||
|
||||
namespace primitiveprocessor
|
||||
{
|
||||
extern boost::shared_ptr<threadpool::PriorityThreadPool> OOBPool;
|
||||
extern boost::shared_ptr<threadpool::FairThreadPool> OOBPool;
|
||||
extern dbbc::BlockRequestProcessor** BRPp;
|
||||
extern BRM::DBRM* brm;
|
||||
extern boost::mutex bppLock;
|
||||
@ -130,7 +131,7 @@ class PrimitiveServer
|
||||
|
||||
/** @brief get a pointer the shared processor thread pool
|
||||
*/
|
||||
inline threadpool::PriorityThreadPool* getProcessorThreadPool() const
|
||||
inline threadpool::FairThreadPool* getProcessorThreadPool() const
|
||||
{
|
||||
return fProcessorPool;
|
||||
}
|
||||
@ -167,7 +168,7 @@ class PrimitiveServer
|
||||
/** @brief the thread pool used to process
|
||||
* primitive commands
|
||||
*/
|
||||
threadpool::PriorityThreadPool* fProcessorPool;
|
||||
threadpool::FairThreadPool* fProcessorPool;
|
||||
|
||||
int fServerThreads;
|
||||
int fServerQueueSize;
|
||||
|
@ -56,7 +56,18 @@ CloudStorage* CloudStorage::get()
|
||||
if (inst)
|
||||
return inst;
|
||||
if (type == "s3")
|
||||
inst = new S3Storage();
|
||||
{
|
||||
try
|
||||
{
|
||||
inst = new S3Storage();
|
||||
}
|
||||
catch (exception& e)
|
||||
{
|
||||
cout << "S3 Storage Manager Configuration Error:" << endl;
|
||||
cout << e.what() << endl;
|
||||
throw runtime_error("S3Storage: Failed");
|
||||
}
|
||||
}
|
||||
else if (type == "local" || type == "localstorage")
|
||||
inst = new LocalStorage();
|
||||
else
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <boost/thread/mutex.hpp>
|
||||
#include <boost/property_tree/ini_parser.hpp>
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <boost/regex.hpp>
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <vector>
|
||||
@ -30,7 +31,6 @@
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
#include <iostream>
|
||||
#include <regex>
|
||||
|
||||
#include "SMLogging.h"
|
||||
|
||||
@ -165,13 +165,13 @@ bool Config::reload()
|
||||
return rtn;
|
||||
}
|
||||
|
||||
string use_envvar(const std::smatch& envvar)
|
||||
string use_envvar(const boost::smatch& envvar)
|
||||
{
|
||||
char* env = getenv(envvar[1].str().c_str());
|
||||
return (env ? env : "");
|
||||
}
|
||||
|
||||
string expand_numbers(const std::smatch& match)
|
||||
string expand_numbers(const boost::smatch& match)
|
||||
{
|
||||
long long num = stol(match[1].str());
|
||||
char suffix = (char)::tolower(match[2].str()[0]);
|
||||
@ -187,20 +187,6 @@ string expand_numbers(const std::smatch& match)
|
||||
return ::to_string(num);
|
||||
}
|
||||
|
||||
std::string regex_replace_with_format(const std::string& input,
|
||||
const std::regex& regex,
|
||||
std::function<std::string(std::smatch const& match)> format)
|
||||
{
|
||||
|
||||
std::ostringstream output;
|
||||
std::sregex_iterator begin(input.begin(), input.end(), regex), end;
|
||||
for(; begin != end; begin++){
|
||||
output << begin->prefix() << format(*begin);
|
||||
}
|
||||
output << input.substr(input.size() - begin->position());
|
||||
return output.str();
|
||||
}
|
||||
|
||||
string Config::getValue(const string& section, const string& key) const
|
||||
{
|
||||
// if we care, move this envvar substition stuff to where the file is loaded
|
||||
@ -216,15 +202,15 @@ string Config::getValue(const string& section, const string& key) const
|
||||
}
|
||||
s.unlock();
|
||||
|
||||
std::regex re("\\$\\{(.+)\\}");
|
||||
boost::regex re("\\$\\{(.+)\\}");
|
||||
|
||||
ret = regex_replace_with_format(ret, re, use_envvar);
|
||||
ret = boost::regex_replace(ret, re, use_envvar);
|
||||
|
||||
// do the numeric substitutions. ex, the suffixes m, k, g
|
||||
// ehhhhh. going to end up turning a string to a number, to a string, and then to a number again
|
||||
// don't like that. OTOH who cares.
|
||||
std::regex num_re("^([[:digit:]]+)([mMkKgG])$", std::regex::extended);
|
||||
ret = regex_replace_with_format(ret, num_re, expand_numbers);
|
||||
boost::regex num_re("^([[:digit:]]+)([mMkKgG])$", boost::regex::extended);
|
||||
ret = boost::regex_replace(ret, num_re, expand_numbers);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -25,6 +25,8 @@
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <boost/filesystem.hpp>
|
||||
#define BOOST_SPIRIT_THREADSAFE
|
||||
#include <boost/property_tree/json_parser.hpp>
|
||||
#include <iostream>
|
||||
#include "checks.h"
|
||||
#include "vlarray.h"
|
||||
@ -1264,10 +1266,9 @@ boost::shared_array<uint8_t> IOCoordinator::mergeJournal(const char* object, con
|
||||
boost::shared_array<char> headertxt = seekToEndOfHeader1(journalFD, &l_bytesRead);
|
||||
stringstream ss;
|
||||
ss << headertxt.get();
|
||||
|
||||
nlohmann::json header = nlohmann::json::parse(ss);
|
||||
|
||||
assert(header["version"] == 1);
|
||||
boost::property_tree::ptree header;
|
||||
boost::property_tree::json_parser::read_json(ss, header);
|
||||
assert(header.get<int>("version") == 1);
|
||||
|
||||
// start processing the entries
|
||||
while (1)
|
||||
@ -1352,9 +1353,9 @@ int IOCoordinator::mergeJournalInMem(boost::shared_array<uint8_t>& objData, size
|
||||
boost::shared_array<char> headertxt = seekToEndOfHeader1(journalFD, &l_bytesRead);
|
||||
stringstream ss;
|
||||
ss << headertxt.get();
|
||||
|
||||
nlohmann::json header = nlohmann::json::parse(ss);
|
||||
assert(header["version"] == 1);
|
||||
boost::property_tree::ptree header;
|
||||
boost::property_tree::json_parser::read_json(ss, header);
|
||||
assert(header.get<int>("version") == 1);
|
||||
|
||||
// read the journal file into memory
|
||||
size_t journalBytes = ::lseek(journalFD, 0, SEEK_END) - l_bytesRead;
|
||||
@ -1432,9 +1433,9 @@ int IOCoordinator::mergeJournalInMem_bigJ(boost::shared_array<uint8_t>& objData,
|
||||
boost::shared_array<char> headertxt = seekToEndOfHeader1(journalFD, &l_bytesRead);
|
||||
stringstream ss;
|
||||
ss << headertxt.get();
|
||||
|
||||
nlohmann::json header = nlohmann::json::parse(ss);
|
||||
assert(header["version"] == 1);
|
||||
boost::property_tree::ptree header;
|
||||
boost::property_tree::json_parser::read_json(ss, header);
|
||||
assert(header.get<int>("version") == 1);
|
||||
|
||||
// start processing the entries
|
||||
while (1)
|
||||
|
@ -20,17 +20,20 @@
|
||||
*/
|
||||
#include "MetadataFile.h"
|
||||
#include <boost/filesystem.hpp>
|
||||
#define BOOST_SPIRIT_THREADSAFE
|
||||
#include <boost/property_tree/ptree.hpp>
|
||||
#include <boost/property_tree/json_parser.hpp>
|
||||
#include <boost/foreach.hpp>
|
||||
#include <boost/uuid/uuid.hpp>
|
||||
#include <boost/uuid/uuid_io.hpp>
|
||||
#include <boost/uuid/random_generator.hpp>
|
||||
#include <cstdio>
|
||||
#include <unistd.h>
|
||||
#include <fstream>
|
||||
|
||||
#define max(x, y) (x > y ? x : y)
|
||||
#define min(x, y) (x < y ? x : y)
|
||||
|
||||
using namespace std;
|
||||
namespace bpt = boost::property_tree;
|
||||
namespace bf = boost::filesystem;
|
||||
|
||||
namespace
|
||||
@ -117,13 +120,12 @@ MetadataFile::MetadataFile(const boost::filesystem::path& filename)
|
||||
{
|
||||
if (boost::filesystem::exists(mFilename))
|
||||
{
|
||||
std::ifstream i(mFilename.string());
|
||||
jsontree.reset(new nlohmann::json);
|
||||
i >> *jsontree;
|
||||
jsontree.reset(new bpt::ptree());
|
||||
boost::property_tree::read_json(mFilename.string(), *jsontree);
|
||||
jsonCache.put(mFilename, jsontree);
|
||||
s.unlock();
|
||||
mVersion = 1;
|
||||
mRevision = (*jsontree)["revision"];
|
||||
mRevision = jsontree->get<int>("revision");
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -138,7 +140,7 @@ MetadataFile::MetadataFile(const boost::filesystem::path& filename)
|
||||
{
|
||||
s.unlock();
|
||||
mVersion = 1;
|
||||
mRevision = (*jsontree)["revision"];;
|
||||
mRevision = jsontree->get<int>("revision");
|
||||
}
|
||||
++metadataFilesAccessed;
|
||||
}
|
||||
@ -160,13 +162,12 @@ MetadataFile::MetadataFile(const boost::filesystem::path& filename, no_create_t,
|
||||
if (boost::filesystem::exists(mFilename))
|
||||
{
|
||||
_exists = true;
|
||||
jsontree.reset(new nlohmann::json);
|
||||
std::ifstream i(mFilename.string());
|
||||
i >> *jsontree;
|
||||
jsontree.reset(new bpt::ptree());
|
||||
boost::property_tree::read_json(mFilename.string(), *jsontree);
|
||||
jsonCache.put(mFilename, jsontree);
|
||||
s.unlock();
|
||||
mVersion = 1;
|
||||
mRevision = (*jsontree)["revision"];
|
||||
mRevision = jsontree->get<int>("revision");
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -181,7 +182,7 @@ MetadataFile::MetadataFile(const boost::filesystem::path& filename, no_create_t,
|
||||
s.unlock();
|
||||
_exists = true;
|
||||
mVersion = 1;
|
||||
mRevision = (*jsontree)["revision"];
|
||||
mRevision = jsontree->get<int>("revision");
|
||||
}
|
||||
++metadataFilesAccessed;
|
||||
}
|
||||
@ -192,10 +193,11 @@ MetadataFile::~MetadataFile()
|
||||
|
||||
void MetadataFile::makeEmptyJsonTree()
|
||||
{
|
||||
jsontree.reset(new nlohmann::json);
|
||||
(*jsontree)["version"] = mVersion;
|
||||
(*jsontree)["revision"] = mRevision;
|
||||
(*jsontree)["objects"] = nlohmann::json::array();
|
||||
jsontree.reset(new bpt::ptree());
|
||||
boost::property_tree::ptree objs;
|
||||
jsontree->put("version", mVersion);
|
||||
jsontree->put("revision", mRevision);
|
||||
jsontree->add_child("objects", objs);
|
||||
}
|
||||
|
||||
void MetadataFile::printKPIs()
|
||||
@ -217,11 +219,11 @@ size_t MetadataFile::getLength() const
|
||||
{
|
||||
size_t totalSize = 0;
|
||||
|
||||
auto &objects = (*jsontree)["objects"];
|
||||
auto& objects = jsontree->get_child("objects");
|
||||
if (!objects.empty())
|
||||
{
|
||||
auto& lastObject = objects.back();
|
||||
totalSize = lastObject["offset"].get<off_t>() + lastObject["length"].get<size_t>();
|
||||
auto& lastObject = objects.back().second;
|
||||
totalSize = lastObject.get<off_t>("offset") + lastObject.get<size_t>("length");
|
||||
}
|
||||
return totalSize;
|
||||
}
|
||||
@ -241,9 +243,10 @@ vector<metadataObject> MetadataFile::metadataRead(off_t offset, size_t length) c
|
||||
rather than write a new alg.
|
||||
*/
|
||||
set<metadataObject> mObjects;
|
||||
for(const auto &v : (*jsontree)["objects"])
|
||||
BOOST_FOREACH (const boost::property_tree::ptree::value_type& v, jsontree->get_child("objects"))
|
||||
{
|
||||
mObjects.insert(metadataObject(v["offset"], v["length"], v["key"]));
|
||||
mObjects.insert(metadataObject(v.second.get<uint64_t>("offset"), v.second.get<uint64_t>("length"),
|
||||
v.second.get<string>("key")));
|
||||
}
|
||||
|
||||
if (mObjects.size() == 0)
|
||||
@ -285,20 +288,20 @@ metadataObject MetadataFile::addMetadataObject(const boost::filesystem::path& fi
|
||||
//
|
||||
|
||||
metadataObject addObject;
|
||||
auto& objects = (*jsontree)["objects"];
|
||||
auto& objects = jsontree->get_child("objects");
|
||||
if (!objects.empty())
|
||||
{
|
||||
auto& lastObject = objects.back();
|
||||
addObject.offset = lastObject["offset"].get<off_t>() + mpConfig->mObjectSize;
|
||||
auto& lastObject = objects.back().second;
|
||||
addObject.offset = lastObject.get<off_t>("offset") + mpConfig->mObjectSize;
|
||||
}
|
||||
|
||||
addObject.length = length;
|
||||
addObject.key = getNewKey(filename.string(), addObject.offset, addObject.length);
|
||||
nlohmann::json object = nlohmann::json::object();
|
||||
object["offset"] = addObject.offset;
|
||||
object["length"] = addObject.length;
|
||||
object["key"] = addObject.key;
|
||||
objects.push_back(object);
|
||||
boost::property_tree::ptree object;
|
||||
object.put("offset", addObject.offset);
|
||||
object.put("length", addObject.length);
|
||||
object.put("key", addObject.key);
|
||||
objects.push_back(make_pair("", object));
|
||||
|
||||
return addObject;
|
||||
}
|
||||
@ -309,8 +312,7 @@ int MetadataFile::writeMetadata()
|
||||
if (!boost::filesystem::exists(mFilename.parent_path()))
|
||||
boost::filesystem::create_directories(mFilename.parent_path());
|
||||
|
||||
std::ofstream o(mFilename.c_str());
|
||||
o << *jsontree;
|
||||
write_json(mFilename.string(), *jsontree);
|
||||
_exists = true;
|
||||
|
||||
boost::unique_lock<boost::mutex> s(jsonCache.getMutex());
|
||||
@ -322,13 +324,13 @@ int MetadataFile::writeMetadata()
|
||||
bool MetadataFile::getEntry(off_t offset, metadataObject* out) const
|
||||
{
|
||||
metadataObject addObject;
|
||||
for(auto &v: (*jsontree)["objects"])
|
||||
BOOST_FOREACH (const boost::property_tree::ptree::value_type& v, jsontree->get_child("objects"))
|
||||
{
|
||||
if (v["offset"].get<off_t>() == offset)
|
||||
if (v.second.get<off_t>("offset") == offset)
|
||||
{
|
||||
out->offset = offset;
|
||||
out->length = v["length"].get<size_t>();
|
||||
out->key = v["key"];
|
||||
out->length = v.second.get<size_t>("length");
|
||||
out->key = v.second.get<string>("key");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -337,10 +339,10 @@ bool MetadataFile::getEntry(off_t offset, metadataObject* out) const
|
||||
|
||||
void MetadataFile::removeEntry(off_t offset)
|
||||
{
|
||||
auto& objects = (*jsontree)["objects"];
|
||||
for (auto it = objects.begin(); it != objects.end(); ++it)
|
||||
bpt::ptree& objects = jsontree->get_child("objects");
|
||||
for (bpt::ptree::iterator it = objects.begin(); it != objects.end(); ++it)
|
||||
{
|
||||
if ((*it)["offset"].get<off_t>() == offset)
|
||||
if (it->second.get<off_t>("offset") == offset)
|
||||
{
|
||||
objects.erase(it);
|
||||
break;
|
||||
@ -350,7 +352,7 @@ void MetadataFile::removeEntry(off_t offset)
|
||||
|
||||
void MetadataFile::removeAllEntries()
|
||||
{
|
||||
(*jsontree)["objects"] = nlohmann::json::array();
|
||||
jsontree->get_child("objects").clear();
|
||||
}
|
||||
|
||||
void MetadataFile::deletedMeta(const bf::path& p)
|
||||
@ -454,21 +456,21 @@ void MetadataFile::setLengthInKey(string& key, size_t newLength)
|
||||
|
||||
void MetadataFile::printObjects() const
|
||||
{
|
||||
for (auto& v : (*jsontree)["objects"])
|
||||
BOOST_FOREACH (const boost::property_tree::ptree::value_type& v, jsontree->get_child("objects"))
|
||||
{
|
||||
printf("Name: %s Length: %zu Offset: %lld\n", v["key"].get<std::string>().c_str(),
|
||||
v["length"].get<size_t>(), (long long)v["offset"].get<off_t>());
|
||||
printf("Name: %s Length: %zu Offset: %lld\n", v.second.get<string>("key").c_str(),
|
||||
v.second.get<size_t>("length"), (long long)v.second.get<off_t>("offset"));
|
||||
}
|
||||
}
|
||||
|
||||
void MetadataFile::updateEntry(off_t offset, const string& newName, size_t newLength)
|
||||
{
|
||||
for (auto& v : (*jsontree)["objects"])
|
||||
for (auto& v : jsontree->get_child("objects"))
|
||||
{
|
||||
if (v["offset"].get<off_t>() == offset)
|
||||
if (v.second.get<off_t>("offset") == offset)
|
||||
{
|
||||
v["key"] = newName;
|
||||
v["length"] = newLength;
|
||||
v.second.put("key", newName);
|
||||
v.second.put("length", newLength);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -480,11 +482,11 @@ void MetadataFile::updateEntry(off_t offset, const string& newName, size_t newLe
|
||||
|
||||
void MetadataFile::updateEntryLength(off_t offset, size_t newLength)
|
||||
{
|
||||
for (auto& v : (*jsontree)["objects"])
|
||||
for (auto& v : jsontree->get_child("objects"))
|
||||
{
|
||||
if (v["offset"].get<off_t>() == offset)
|
||||
if (v.second.get<off_t>("offset") == offset)
|
||||
{
|
||||
v["length"] = newLength;
|
||||
v.second.put("length", newLength);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -496,12 +498,11 @@ void MetadataFile::updateEntryLength(off_t offset, size_t newLength)
|
||||
|
||||
off_t MetadataFile::getMetadataNewObjectOffset()
|
||||
{
|
||||
auto& objects = (*jsontree)["objects"];
|
||||
auto& objects = jsontree->get_child("objects");
|
||||
if (objects.empty())
|
||||
return 0;
|
||||
|
||||
auto& lastObject = objects.back();
|
||||
return lastObject["offset"].get<off_t>() + lastObject["length"].get<size_t>();
|
||||
auto& lastObject = jsontree->get_child("objects").back().second;
|
||||
return lastObject.get<off_t>("offset") + lastObject.get<size_t>("length");
|
||||
}
|
||||
|
||||
metadataObject::metadataObject() : offset(0), length(0)
|
||||
|
@ -28,8 +28,6 @@
|
||||
#include <unordered_map>
|
||||
#include <boost/filesystem/path.hpp>
|
||||
|
||||
#include <utils/json/json.hpp>
|
||||
|
||||
namespace storagemanager
|
||||
{
|
||||
struct metadataObject
|
||||
@ -112,7 +110,7 @@ class MetadataFile
|
||||
|
||||
static void printKPIs();
|
||||
|
||||
typedef boost::shared_ptr<nlohmann::json> Jsontree_t;
|
||||
typedef boost::shared_ptr<boost::property_tree::ptree> Jsontree_t;
|
||||
|
||||
private:
|
||||
MetadataConfig* mpConfig;
|
||||
|
@ -27,12 +27,12 @@
|
||||
#include <errno.h>
|
||||
#include <sys/sendfile.h>
|
||||
#include <boost/filesystem.hpp>
|
||||
#define BOOST_SPIRIT_THREADSAFE
|
||||
#include <boost/property_tree/json_parser.hpp>
|
||||
#include <boost/shared_array.hpp>
|
||||
#include <boost/format.hpp>
|
||||
#include <iostream>
|
||||
|
||||
#include <utils/json/json.hpp>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace
|
||||
@ -279,14 +279,12 @@ int Replicator::addJournalEntry(const boost::filesystem::path& filename, const u
|
||||
stringstream ss;
|
||||
ss << headertxt.get();
|
||||
headerRollback = headertxt.get();
|
||||
nlohmann::json header;
|
||||
|
||||
boost::property_tree::ptree header;
|
||||
try
|
||||
{
|
||||
header = nlohmann::json::parse(ss);
|
||||
boost::property_tree::json_parser::read_json(ss, header);
|
||||
}
|
||||
|
||||
catch (const nlohmann::json::exception& e)
|
||||
catch (const boost::property_tree::json_parser::json_parser_error& e)
|
||||
{
|
||||
mpLogger->log(LOG_CRIT, "%s", e.what());
|
||||
errno = EIO;
|
||||
@ -298,8 +296,8 @@ int Replicator::addJournalEntry(const boost::filesystem::path& filename, const u
|
||||
errno = EIO;
|
||||
return -1;
|
||||
}
|
||||
assert(header["version"] == 1);
|
||||
uint64_t currentMaxOffset = header["max_offset"];
|
||||
assert(header.get<int>("version") == 1);
|
||||
uint64_t currentMaxOffset = header.get<uint64_t>("max_offset");
|
||||
if (thisEntryMaxOffset > currentMaxOffset)
|
||||
{
|
||||
bHeaderChanged = true;
|
||||
|
@ -26,9 +26,9 @@
|
||||
#include <boost/uuid/uuid.hpp>
|
||||
#include <boost/uuid/uuid_io.hpp>
|
||||
#include <boost/uuid/random_generator.hpp>
|
||||
|
||||
#include "utils/json/json.hpp"
|
||||
|
||||
#define BOOST_SPIRIT_THREADSAFE
|
||||
#include <boost/property_tree/ptree.hpp>
|
||||
#include <boost/property_tree/json_parser.hpp>
|
||||
#include "Utilities.h"
|
||||
|
||||
using namespace std;
|
||||
@ -52,7 +52,7 @@ static size_t WriteCallback(void* contents, size_t size, size_t nmemb, void* use
|
||||
inline bool retryable_error(uint8_t s3err)
|
||||
{
|
||||
return (s3err == MS3_ERR_RESPONSE_PARSE || s3err == MS3_ERR_REQUEST_ERROR || s3err == MS3_ERR_OOM ||
|
||||
s3err == MS3_ERR_IMPOSSIBLE || s3err == MS3_ERR_AUTH || s3err == MS3_ERR_SERVER ||
|
||||
s3err == MS3_ERR_IMPOSSIBLE || s3err == MS3_ERR_SERVER ||
|
||||
s3err == MS3_ERR_AUTH_ROLE);
|
||||
}
|
||||
|
||||
@ -258,12 +258,12 @@ bool S3Storage::getCredentialsFromMetadataEC2()
|
||||
logger->log(LOG_ERR, "CURL fail %u", curl_res);
|
||||
return false;
|
||||
}
|
||||
|
||||
nlohmann::json pt = nlohmann::json::parse(readBuffer);
|
||||
key = pt["AccessKeyId"];
|
||||
secret = pt["SecretAccessKey"];
|
||||
token = pt["Token"];
|
||||
|
||||
stringstream credentials(readBuffer);
|
||||
boost::property_tree::ptree pt;
|
||||
boost::property_tree::read_json(credentials, pt);
|
||||
key = pt.get<string>("AccessKeyId");
|
||||
secret = pt.get<string>("SecretAccessKey");
|
||||
token = pt.get<string>("Token");
|
||||
// logger->log(LOG_INFO, "S3Storage: key = %s secret = %s token =
|
||||
// %s",key.c_str(),secret.c_str(),token.c_str());
|
||||
|
||||
@ -294,6 +294,12 @@ void S3Storage::testConnectivityAndPerms()
|
||||
err = deleteObject(testObjKey);
|
||||
if (err)
|
||||
FAIL(DELETE)
|
||||
err = exists(testObjKey, &_exists);
|
||||
if (err)
|
||||
{
|
||||
logger->log(LOG_CRIT, "S3Storage::exists() failed on nonexistent object. Check 'ListBucket' permissions.");
|
||||
FAIL(HEAD)
|
||||
}
|
||||
logger->log(LOG_INFO, "S3Storage: S3 connectivity & permissions are OK");
|
||||
}
|
||||
|
||||
|
@ -87,6 +87,7 @@ void catFileOffline(const char* filename, int prefixlen)
|
||||
catch (exception& e)
|
||||
{
|
||||
cerr << "smcat catFileOffline FAIL: " << e.what() << endl;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -51,6 +51,11 @@ if (WITH_UNITTESTS)
|
||||
target_link_libraries(simd_processors ${ENGINE_LDFLAGS} ${MARIADB_CLIENT_LIBS} ${ENGINE_WRITE_LIBS} ${GTEST_LIBRARIES} processor dbbc)
|
||||
gtest_discover_tests(simd_processors TEST_PREFIX columnstore:)
|
||||
|
||||
add_executable(fair_threadpool_test fair_threadpool.cpp)
|
||||
add_dependencies(fair_threadpool_test googletest)
|
||||
target_link_libraries(fair_threadpool_test ${ENGINE_LDFLAGS} ${MARIADB_CLIENT_LIBS} ${ENGINE_WRITE_LIBS} ${GTEST_LIBRARIES} processor dbbc)
|
||||
gtest_discover_tests(fair_threadpool_test TEST_PREFIX columnstore:)
|
||||
|
||||
# CPPUNIT TESTS
|
||||
add_executable(we_shared_components_tests shared_components_tests.cpp)
|
||||
add_dependencies(we_shared_components_tests loggingcpp)
|
||||
|
173
tests/fair_threadpool.cpp
Normal file
173
tests/fair_threadpool.cpp
Normal file
@ -0,0 +1,173 @@
|
||||
/* Copyright (C) 2022 MariaDB Corporation
|
||||
|
||||
This program is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU General Public License
|
||||
as published by the Free Software Foundation; version 2 of
|
||||
the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||
MA 02110-1301, USA. */
|
||||
|
||||
#include <iostream>
|
||||
#include <gtest/gtest.h>
|
||||
#include <vector>
|
||||
|
||||
#include "utils/threadpool/fair_threadpool.h"
|
||||
|
||||
using namespace primitiveprocessor;
|
||||
using namespace std;
|
||||
using namespace threadpool;
|
||||
|
||||
using ResultsType = std::vector<int>;
|
||||
static ResultsType results;
|
||||
|
||||
class FairThreadPoolTest : public testing::Test {
|
||||
public:
|
||||
|
||||
void SetUp() override
|
||||
{
|
||||
results.clear();
|
||||
threadPool = new FairThreadPool(1, 1, 0, 0);
|
||||
}
|
||||
|
||||
|
||||
FairThreadPool* threadPool;
|
||||
};
|
||||
|
||||
class TestFunctor: public FairThreadPool::Functor
|
||||
{
|
||||
public:
|
||||
TestFunctor(const size_t id, const size_t delay): id_(id), delay_(delay)
|
||||
{
|
||||
}
|
||||
~TestFunctor() {};
|
||||
int operator()() override
|
||||
{
|
||||
usleep(delay_);
|
||||
results.push_back(id_);
|
||||
return 0;
|
||||
}
|
||||
private:
|
||||
size_t id_;
|
||||
size_t delay_;
|
||||
};
|
||||
|
||||
class TestRescheduleFunctor: public FairThreadPool::Functor
|
||||
{
|
||||
public:
|
||||
TestRescheduleFunctor(const size_t id, const size_t delay): id_(id), delay_(delay)
|
||||
{
|
||||
}
|
||||
~TestRescheduleFunctor() {};
|
||||
int operator()() override
|
||||
{
|
||||
if (firstRun)
|
||||
{
|
||||
firstRun = false;
|
||||
return 1; // re-schedule the Job
|
||||
}
|
||||
usleep(delay_);
|
||||
results.push_back(id_);
|
||||
return 0;
|
||||
}
|
||||
private:
|
||||
size_t id_;
|
||||
size_t delay_;
|
||||
bool firstRun = true;
|
||||
};
|
||||
|
||||
testing::AssertionResult isThisOrThat(const ResultsType& arr, const size_t idxA, const int a, const size_t idxB, const int b)
|
||||
{
|
||||
if (arr.empty() || arr.size() <= max(idxA, idxB))
|
||||
return testing::AssertionFailure() << "The supplied vector is either empty or not big enough.";
|
||||
if (arr[idxA] == a && arr[idxB] == b)
|
||||
return testing::AssertionSuccess();
|
||||
if (arr[idxA] == b && arr[idxB] == a)
|
||||
return testing::AssertionSuccess();
|
||||
return testing::AssertionFailure() << "The values at positions "<< idxA << " " << idxB
|
||||
<< " are not " << a << " and " << b << std::endl;
|
||||
}
|
||||
|
||||
TEST_F(FairThreadPoolTest, FairThreadPoolAdd)
|
||||
{
|
||||
SP_UM_IOSOCK sock(new messageqcpp::IOSocket);
|
||||
auto functor1 = boost::shared_ptr<FairThreadPool::Functor>(new TestFunctor(1, 50000));
|
||||
FairThreadPool::Job job1(1, 1, 1, functor1, sock, 1);
|
||||
auto functor2 = boost::shared_ptr<FairThreadPool::Functor>(new TestFunctor(2, 5000));
|
||||
FairThreadPool::Job job2(2, 1, 1, functor2, sock, 1);
|
||||
auto functor3 = boost::shared_ptr<FairThreadPool::Functor>(new TestFunctor(3, 5000));
|
||||
FairThreadPool::Job job3(3, 1, 2, functor3, sock, 1);
|
||||
|
||||
threadPool->addJob(job1);
|
||||
threadPool->addJob(job2);
|
||||
threadPool->addJob(job3);
|
||||
|
||||
while (threadPool->queueSize())
|
||||
{
|
||||
usleep(250000);
|
||||
}
|
||||
|
||||
EXPECT_EQ(threadPool->queueSize(), 0ULL);
|
||||
EXPECT_EQ(results.size(), 3ULL);
|
||||
EXPECT_EQ(results[0], 1);
|
||||
EXPECT_EQ(results[1], 3);
|
||||
EXPECT_EQ(results[2], 2);
|
||||
}
|
||||
|
||||
TEST_F(FairThreadPoolTest, FairThreadPoolRemove)
|
||||
{
|
||||
SP_UM_IOSOCK sock(new messageqcpp::IOSocket);
|
||||
auto functor1 = boost::shared_ptr<FairThreadPool::Functor>(new TestFunctor(1, 100000));
|
||||
FairThreadPool::Job job1(1, 1, 1, functor1, sock, 1, 0, 1);
|
||||
auto functor2 = boost::shared_ptr<FairThreadPool::Functor>(new TestFunctor(2, 50000));
|
||||
FairThreadPool::Job job2(2, 1, 1, functor2, sock, 1, 0, 2);
|
||||
auto functor3 = boost::shared_ptr<FairThreadPool::Functor>(new TestFunctor(3, 50000));
|
||||
FairThreadPool::Job job3(3, 1, 2, functor3, sock, 1, 0, 3);
|
||||
|
||||
threadPool->addJob(job1);
|
||||
threadPool->addJob(job2);
|
||||
threadPool->addJob(job3);
|
||||
threadPool->removeJobs(job2.id_);
|
||||
|
||||
while (threadPool->queueSize())
|
||||
{
|
||||
usleep(250000);
|
||||
}
|
||||
|
||||
EXPECT_EQ(threadPool->queueSize(), 0ULL);
|
||||
EXPECT_EQ(results.size(), 2ULL);
|
||||
EXPECT_EQ(results[0], 1);
|
||||
EXPECT_EQ(results[1], 3);
|
||||
}
|
||||
|
||||
TEST_F(FairThreadPoolTest, FairThreadPoolReschedule)
|
||||
{
|
||||
SP_UM_IOSOCK sock(new messageqcpp::IOSocket);
|
||||
auto functor1 = boost::shared_ptr<FairThreadPool::Functor>(new TestFunctor(1, 100000));
|
||||
FairThreadPool::Job job1(1, 1, 1, functor1, sock, 1, 0, 1);
|
||||
auto functor2 = boost::shared_ptr<FairThreadPool::Functor>(new TestFunctor(2, 50000));
|
||||
FairThreadPool::Job job2(2, 1, 2, functor2, sock, 1, 0, 2);
|
||||
auto functor3 = boost::shared_ptr<FairThreadPool::Functor>(new TestFunctor(3, 50000));
|
||||
FairThreadPool::Job job3(3, 1, 3, functor3, sock, 1, 0, 3);
|
||||
|
||||
threadPool->addJob(job1);
|
||||
threadPool->addJob(job2);
|
||||
threadPool->addJob(job3);
|
||||
|
||||
while (threadPool->queueSize())
|
||||
{
|
||||
usleep(250000);
|
||||
}
|
||||
|
||||
EXPECT_EQ(threadPool->queueSize(), 0ULL);
|
||||
EXPECT_EQ(results.size(), 3ULL);
|
||||
EXPECT_EQ(results[0], 1);
|
||||
EXPECT_TRUE(isThisOrThat(results, 1, 2, 2, 3));
|
||||
}
|
@ -98,7 +98,8 @@ class StatisticsManager
|
||||
std::map<uint32_t, KeyType> keyTypes;
|
||||
StatisticsManager() : epoch(0), version(1)
|
||||
{
|
||||
IDBPolicy::init(true, false, "", 0);
|
||||
// Initialize plugins.
|
||||
IDBPolicy::configIDBPolicy();
|
||||
}
|
||||
std::unique_ptr<char[]> convertStatsToDataStream(uint64_t& dataStreamSize);
|
||||
|
||||
|
@ -1900,8 +1900,8 @@ void RowAggregation::doAvg(const Row& rowIn, int64_t colIn, int64_t colOut, int6
|
||||
// rowIn(in) - Row to be included in aggregation.
|
||||
// colIn(in) - column in the input row group
|
||||
// colOut(in) - column in the output row group stores the count
|
||||
// colAux(in) - column in the output row group stores the sum(x)
|
||||
// colAux + 1 - column in the output row group stores the sum(x**2)
|
||||
// colAux(in) - column in the output row group stores the mean(x)
|
||||
// colAux + 1 - column in the output row group stores the sum(x_i - mean)^2
|
||||
//------------------------------------------------------------------------------
|
||||
void RowAggregation::doStatistics(const Row& rowIn, int64_t colIn, int64_t colOut, int64_t colAux)
|
||||
{
|
||||
@ -1960,9 +1960,17 @@ void RowAggregation::doStatistics(const Row& rowIn, int64_t colIn, int64_t colOu
|
||||
break;
|
||||
}
|
||||
|
||||
fRow.setDoubleField(fRow.getDoubleField(colOut) + 1.0, colOut);
|
||||
fRow.setLongDoubleField(fRow.getLongDoubleField(colAux) + valIn, colAux);
|
||||
fRow.setLongDoubleField(fRow.getLongDoubleField(colAux + 1) + valIn * valIn, colAux + 1);
|
||||
double count = fRow.getDoubleField(colOut) + 1.0;
|
||||
long double mean = fRow.getLongDoubleField(colAux);
|
||||
long double scaledMomentum2 = fRow.getLongDoubleField(colAux + 1);
|
||||
volatile long double delta = valIn - mean;
|
||||
mean += delta/count;
|
||||
scaledMomentum2 += delta * (valIn - mean);
|
||||
|
||||
|
||||
fRow.setDoubleField(count, colOut);
|
||||
fRow.setLongDoubleField(mean, colAux);
|
||||
fRow.setLongDoubleField(scaledMomentum2, colAux + 1);
|
||||
}
|
||||
|
||||
void RowAggregation::mergeStatistics(const Row& rowIn, uint64_t colOut, uint64_t colAux)
|
||||
@ -3156,31 +3164,26 @@ void RowAggregationUM::calculateStatisticsFunctions()
|
||||
}
|
||||
else // count > 1
|
||||
{
|
||||
long double sum1 = fRow.getLongDoubleField(colAux);
|
||||
long double sum2 = fRow.getLongDoubleField(colAux + 1);
|
||||
long double scaledMomentum2 = fRow.getLongDoubleField(colAux + 1);
|
||||
|
||||
uint32_t scale = fRow.getScale(colOut);
|
||||
auto factor = datatypes::scaleDivisor<long double>(scale);
|
||||
|
||||
if (scale != 0) // adjust the scale if necessary
|
||||
{
|
||||
sum1 /= factor;
|
||||
sum2 /= factor * factor;
|
||||
scaledMomentum2 /= factor * factor;
|
||||
}
|
||||
|
||||
long double stat = sum1 * sum1 / cnt;
|
||||
stat = sum2 - stat;
|
||||
|
||||
if (fFunctionCols[i]->fStatsFunction == ROWAGG_STDDEV_POP)
|
||||
stat = sqrt(stat / cnt);
|
||||
scaledMomentum2 = sqrt(scaledMomentum2 / cnt);
|
||||
else if (fFunctionCols[i]->fStatsFunction == ROWAGG_STDDEV_SAMP)
|
||||
stat = sqrt(stat / (cnt - 1));
|
||||
scaledMomentum2 = sqrt(scaledMomentum2 / (cnt - 1));
|
||||
else if (fFunctionCols[i]->fStatsFunction == ROWAGG_VAR_POP)
|
||||
stat = stat / cnt;
|
||||
scaledMomentum2 = scaledMomentum2 / cnt;
|
||||
else if (fFunctionCols[i]->fStatsFunction == ROWAGG_VAR_SAMP)
|
||||
stat = stat / (cnt - 1);
|
||||
scaledMomentum2 = scaledMomentum2 / (cnt - 1);
|
||||
|
||||
fRow.setDoubleField(stat, colOut);
|
||||
fRow.setDoubleField(scaledMomentum2, colOut);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4281,18 +4284,39 @@ void RowAggregationUMP2::doAvg(const Row& rowIn, int64_t colIn, int64_t colOut,
|
||||
// Update the sum and count fields for stattistics if input is not null.
|
||||
// rowIn(in) - Row to be included in aggregation.
|
||||
// colIn(in) - column in the input row group stores the count/logical block
|
||||
// colIn + 1 - column in the input row group stores the sum(x)/logical block
|
||||
// colIn + 2 - column in the input row group stores the sum(x**2)/logical block
|
||||
// colIn + 1 - column in the input row group stores the mean(x)/logical block
|
||||
// colIn + 2 - column in the input row group stores the sum(x_i - mean)^2/logical block
|
||||
// colOut(in) - column in the output row group stores the count
|
||||
// colAux(in) - column in the output row group stores the sum(x)
|
||||
// colAux + 1 - column in the output row group stores the sum(x**2)
|
||||
// colAux(in) - column in the output row group stores the mean(x)
|
||||
// colAux + 1 - column in the output row group stores the sum(x_i - mean)^2
|
||||
//------------------------------------------------------------------------------
|
||||
void RowAggregationUMP2::doStatistics(const Row& rowIn, int64_t colIn, int64_t colOut, int64_t colAux)
|
||||
{
|
||||
fRow.setDoubleField(fRow.getDoubleField(colOut) + rowIn.getDoubleField(colIn), colOut);
|
||||
fRow.setLongDoubleField(fRow.getLongDoubleField(colAux) + rowIn.getLongDoubleField(colIn + 1), colAux);
|
||||
fRow.setLongDoubleField(fRow.getLongDoubleField(colAux + 1) + rowIn.getLongDoubleField(colIn + 2),
|
||||
colAux + 1);
|
||||
double count = fRow.getDoubleField(colOut);
|
||||
long double mean = fRow.getLongDoubleField(colAux);
|
||||
long double scaledMomentum2 = fRow.getLongDoubleField(colAux + 1);
|
||||
|
||||
double blockCount = rowIn.getDoubleField(colIn);
|
||||
long double blockMean = rowIn.getLongDoubleField(colIn + 1);
|
||||
long double blockScaledMomentum2 = rowIn.getLongDoubleField(colIn + 2);
|
||||
|
||||
double nextCount = count + blockCount;
|
||||
long double nextMean;
|
||||
long double nextScaledMomentum2;
|
||||
if (nextCount == 0)
|
||||
{
|
||||
nextMean = 0;
|
||||
nextScaledMomentum2 = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
volatile long double delta = mean - blockMean;
|
||||
nextMean = (mean * count + blockMean * blockCount) / nextCount;
|
||||
nextScaledMomentum2 = scaledMomentum2 + blockScaledMomentum2 + delta * delta * (count * blockCount / nextCount);
|
||||
}
|
||||
fRow.setDoubleField(nextCount, colOut);
|
||||
fRow.setLongDoubleField(nextMean, colAux);
|
||||
fRow.setLongDoubleField(nextScaledMomentum2, colAux + 1);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
@ -4,11 +4,8 @@ include_directories( ${ENGINE_COMMON_INCLUDES} )
|
||||
|
||||
########### next target ###############
|
||||
|
||||
set(threadpool_LIB_SRCS weightedthreadpool.cpp threadpool.cpp prioritythreadpool.cpp)
|
||||
|
||||
set(threadpool_LIB_SRCS weightedthreadpool.cpp threadpool.cpp prioritythreadpool.cpp fair_threadpool.cpp)
|
||||
add_library(threadpool SHARED ${threadpool_LIB_SRCS})
|
||||
|
||||
add_dependencies(threadpool loggingcpp)
|
||||
target_link_libraries(threadpool Boost::chrono)
|
||||
|
||||
install(TARGETS threadpool DESTINATION ${ENGINE_LIBDIR} COMPONENT columnstore-engine)
|
295
utils/threadpool/fair_threadpool.cpp
Normal file
295
utils/threadpool/fair_threadpool.cpp
Normal file
@ -0,0 +1,295 @@
|
||||
/* Copyright (c) 2022 MariaDB Corporation
|
||||
|
||||
This program is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU General Public License
|
||||
as published by the Free Software Foundation; version 2 of
|
||||
the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||
MA 02110-1301, USA. */
|
||||
|
||||
#include <atomic>
|
||||
#include <stdexcept>
|
||||
#include <unistd.h>
|
||||
#include <exception>
|
||||
using namespace std;
|
||||
|
||||
#include "messageobj.h"
|
||||
#include "messagelog.h"
|
||||
#include "threadnaming.h"
|
||||
using namespace logging;
|
||||
|
||||
#include "fair_threadpool.h"
|
||||
using namespace boost;
|
||||
|
||||
#include "dbcon/joblist/primitivemsg.h"
|
||||
|
||||
namespace threadpool
|
||||
{
|
||||
FairThreadPool::FairThreadPool(uint targetWeightPerRun, uint highThreads, uint midThreads, uint lowThreads,
|
||||
uint ID)
|
||||
: weightPerRun(targetWeightPerRun), id(ID)
|
||||
{
|
||||
boost::thread* newThread;
|
||||
size_t numberOfThreads = highThreads + midThreads + lowThreads;
|
||||
for (uint32_t i = 0; i < numberOfThreads; ++i)
|
||||
{
|
||||
newThread = threads.create_thread(ThreadHelper(this, PriorityThreadPool::Priority::HIGH));
|
||||
newThread->detach();
|
||||
}
|
||||
cout << "FairThreadPool started " << numberOfThreads << " thread/-s.\n";
|
||||
threadCounts_.store(numberOfThreads, std::memory_order_relaxed);
|
||||
defaultThreadCounts = numberOfThreads;
|
||||
}
|
||||
|
||||
FairThreadPool::~FairThreadPool()
|
||||
{
|
||||
stop();
|
||||
}
|
||||
|
||||
void FairThreadPool::addJob(const Job& job)
|
||||
{
|
||||
addJob_(job);
|
||||
}
|
||||
|
||||
void FairThreadPool::addJob_(const Job& job, bool useLock)
|
||||
{
|
||||
boost::thread* newThread;
|
||||
std::unique_lock<std::mutex> lk(mutex, std::defer_lock_t());
|
||||
|
||||
// Create any missing threads
|
||||
if (defaultThreadCounts != threadCounts_.load(std::memory_order_relaxed))
|
||||
{
|
||||
newThread = threads.create_thread(ThreadHelper(this, PriorityThreadPool::Priority::HIGH));
|
||||
newThread->detach();
|
||||
threadCounts_.fetch_add(1, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
if (useLock)
|
||||
lk.lock();
|
||||
|
||||
auto jobsListMapIter = txn2JobsListMap_.find(job.txnIdx_);
|
||||
if (jobsListMapIter == txn2JobsListMap_.end()) // there is no txn in the map
|
||||
{
|
||||
ThreadPoolJobsList* jobsList = new ThreadPoolJobsList;
|
||||
jobsList->push_back(job);
|
||||
txn2JobsListMap_[job.txnIdx_] = jobsList;
|
||||
weightedTxnsQueue_.push({job.weight_, job.txnIdx_});
|
||||
}
|
||||
else // txn is in the map
|
||||
{
|
||||
if (jobsListMapIter->second->empty()) // there are no jobs for the txn
|
||||
{
|
||||
weightedTxnsQueue_.push({job.weight_, job.txnIdx_});
|
||||
}
|
||||
jobsListMapIter->second->push_back(job);
|
||||
}
|
||||
|
||||
if (useLock)
|
||||
newJob.notify_one();
|
||||
}
|
||||
|
||||
void FairThreadPool::removeJobs(uint32_t id)
|
||||
{
|
||||
std::unique_lock<std::mutex> lk(mutex);
|
||||
|
||||
for (auto& txnJobsMapPair : txn2JobsListMap_)
|
||||
{
|
||||
ThreadPoolJobsList* txnJobsList = txnJobsMapPair.second;
|
||||
auto job = txnJobsList->begin();
|
||||
while (job != txnJobsList->end())
|
||||
{
|
||||
if (job->id_ == id)
|
||||
{
|
||||
job = txnJobsList->erase(job); // update the job iter
|
||||
if (txnJobsList->empty())
|
||||
{
|
||||
txn2JobsListMap_.erase(txnJobsMapPair.first);
|
||||
delete txnJobsList;
|
||||
break;
|
||||
// There is no clean-up for PQ. It will happen later in threadFcn
|
||||
}
|
||||
continue; // go-on skiping job iter increment
|
||||
}
|
||||
++job;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void FairThreadPool::threadFcn(const PriorityThreadPool::Priority preferredQueue)
|
||||
{
|
||||
utils::setThreadName("Idle");
|
||||
RunListT runList(1); // This is a vector to allow to grab multiple jobs
|
||||
RescheduleVecType reschedule;
|
||||
bool running = false;
|
||||
bool rescheduleJob = false;
|
||||
|
||||
try
|
||||
{
|
||||
while (!stop_.load(std::memory_order_relaxed))
|
||||
{
|
||||
runList.clear(); // remove the job
|
||||
std::unique_lock<std::mutex> lk(mutex);
|
||||
|
||||
if (weightedTxnsQueue_.empty())
|
||||
{
|
||||
newJob.wait(lk);
|
||||
continue; // just go on w/o re-taking the lock
|
||||
}
|
||||
|
||||
WeightedTxnT weightedTxn = weightedTxnsQueue_.top();
|
||||
auto txnAndJobListPair = txn2JobsListMap_.find(weightedTxn.second);
|
||||
// Looking for non-empty jobsList in a loop
|
||||
// The loop waits on newJob cond_var if PQ is empty(no jobs in this thread pool)
|
||||
while (txnAndJobListPair == txn2JobsListMap_.end() || txnAndJobListPair->second->empty())
|
||||
{
|
||||
// JobList is empty. This can happen when this method pops the last Job.
|
||||
if (txnAndJobListPair != txn2JobsListMap_.end())
|
||||
{
|
||||
ThreadPoolJobsList* txnJobsList = txnAndJobListPair->second;
|
||||
delete txnJobsList;
|
||||
txn2JobsListMap_.erase(txnAndJobListPair->first);
|
||||
}
|
||||
weightedTxnsQueue_.pop();
|
||||
if (weightedTxnsQueue_.empty()) // remove the empty
|
||||
{
|
||||
break;
|
||||
}
|
||||
weightedTxn = weightedTxnsQueue_.top();
|
||||
txnAndJobListPair = txn2JobsListMap_.find(weightedTxn.second);
|
||||
}
|
||||
|
||||
if (weightedTxnsQueue_.empty())
|
||||
{
|
||||
newJob.wait(lk); // might need a lock here
|
||||
continue;
|
||||
}
|
||||
|
||||
// We have non-empty jobsList at this point.
|
||||
// Remove the txn from a queue first to add it later
|
||||
weightedTxnsQueue_.pop();
|
||||
TransactionIdxT txnIdx = txnAndJobListPair->first;
|
||||
ThreadPoolJobsList* jobsList = txnAndJobListPair->second;
|
||||
runList.push_back(jobsList->front());
|
||||
|
||||
jobsList->pop_front();
|
||||
// Add the jobList back into the PQ adding some weight to it
|
||||
// Current algo doesn't reduce total txn weight if the job is rescheduled.
|
||||
if (!jobsList->empty())
|
||||
{
|
||||
weightedTxnsQueue_.push({weightedTxn.first + runList[0].weight_, txnIdx});
|
||||
}
|
||||
|
||||
lk.unlock();
|
||||
|
||||
running = true;
|
||||
jobsRunning_.fetch_add(1, std::memory_order_relaxed);
|
||||
rescheduleJob = (*(runList[0].functor_))(); // run the functor
|
||||
jobsRunning_.fetch_sub(1, std::memory_order_relaxed);
|
||||
running = false;
|
||||
|
||||
utils::setThreadName("Idle");
|
||||
|
||||
if (rescheduleJob)
|
||||
{
|
||||
// to avoid excessive CPU usage waiting for data from storage
|
||||
usleep(500);
|
||||
lk.lock();
|
||||
addJob_(runList[0], false);
|
||||
newJob.notify_one();
|
||||
lk.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (std::exception& ex)
|
||||
{
|
||||
if (running)
|
||||
{
|
||||
jobsRunning_.fetch_sub(1, std::memory_order_relaxed);
|
||||
}
|
||||
// Log the exception and exit this thread
|
||||
try
|
||||
{
|
||||
threadCounts_.fetch_sub(1, std::memory_order_relaxed);
|
||||
#ifndef NOLOGGING
|
||||
logging::Message::Args args;
|
||||
logging::Message message(5);
|
||||
args.add("threadFcn: Caught exception: ");
|
||||
args.add(ex.what());
|
||||
|
||||
message.format(args);
|
||||
|
||||
logging::LoggingID lid(22);
|
||||
logging::MessageLog ml(lid);
|
||||
|
||||
ml.logErrorMessage(message);
|
||||
#endif
|
||||
|
||||
if (running)
|
||||
sendErrorMsg(runList[0].uniqueID_, runList[0].stepID_, runList[0].sock_);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
// Log the exception and exit this thread
|
||||
try
|
||||
{
|
||||
if (running)
|
||||
{
|
||||
jobsRunning_.fetch_sub(1, std::memory_order_relaxed);
|
||||
}
|
||||
threadCounts_.fetch_sub(1, std::memory_order_relaxed);
|
||||
;
|
||||
#ifndef NOLOGGING
|
||||
logging::Message::Args args;
|
||||
logging::Message message(6);
|
||||
args.add("threadFcn: Caught unknown exception!");
|
||||
|
||||
message.format(args);
|
||||
|
||||
logging::LoggingID lid(22);
|
||||
logging::MessageLog ml(lid);
|
||||
|
||||
ml.logErrorMessage(message);
|
||||
#endif
|
||||
|
||||
if (running)
|
||||
sendErrorMsg(runList[0].uniqueID_, runList[0].stepID_, runList[0].sock_);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void FairThreadPool::sendErrorMsg(uint32_t id, uint32_t step, primitiveprocessor::SP_UM_IOSOCK sock)
|
||||
{
|
||||
ISMPacketHeader ism;
|
||||
PrimitiveHeader ph = {0, 0, 0, 0, 0, 0};
|
||||
|
||||
ism.Status = logging::primitiveServerErr;
|
||||
ph.UniqueID = id;
|
||||
ph.StepID = step;
|
||||
messageqcpp::ByteStream msg(sizeof(ISMPacketHeader) + sizeof(PrimitiveHeader));
|
||||
msg.append((uint8_t*)&ism, sizeof(ism));
|
||||
msg.append((uint8_t*)&ph, sizeof(ph));
|
||||
|
||||
sock->write(msg);
|
||||
}
|
||||
|
||||
void FairThreadPool::stop()
|
||||
{
|
||||
stop_.store(true, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
} // namespace threadpool
|
165
utils/threadpool/fair_threadpool.h
Normal file
165
utils/threadpool/fair_threadpool.h
Normal file
@ -0,0 +1,165 @@
|
||||
/* Copyright (c) 2022 MariaDB Corporation
|
||||
|
||||
This program is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU General Public License
|
||||
as published by the Free Software Foundation; version 2 of
|
||||
the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||
MA 02110-1301, USA. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <condition_variable>
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <cstdlib>
|
||||
#include <sstream>
|
||||
#include <stdexcept>
|
||||
#include <boost/thread/thread.hpp>
|
||||
#include <boost/thread/mutex.hpp>
|
||||
#include <boost/thread/condition.hpp>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
#include <boost/function.hpp>
|
||||
#include <atomic>
|
||||
#include <queue>
|
||||
#include <unordered_map>
|
||||
#include <list>
|
||||
#include <functional>
|
||||
|
||||
#include "primitives/primproc/umsocketselector.h"
|
||||
#include "prioritythreadpool.h"
|
||||
|
||||
namespace threadpool
|
||||
{
|
||||
// The idea of this thread pool is to run morsel jobs(primitive job) is to equaly distribute CPU time
|
||||
// b/w multiple parallel queries(thread maps morsel to query using txnId). Query(txnId) has its weight
|
||||
// stored in PriorityQueue that thread increases before run another morsel for the query. When query is
|
||||
// done(ThreadPoolJobsList is empty) it is removed from PQ and the Map(txn to ThreadPoolJobsList).
|
||||
// I tested multiple morsels per one loop iteration in ::threadFcn. This approach reduces CPU consumption
|
||||
// and increases query timings.
|
||||
class FairThreadPool
|
||||
{
|
||||
public:
|
||||
using Functor = PriorityThreadPool::Functor;
|
||||
|
||||
using TransactionIdxT = uint32_t;
|
||||
struct Job
|
||||
{
|
||||
Job() : weight_(1), priority_(0), id_(0)
|
||||
{
|
||||
}
|
||||
Job(const uint32_t uniqueID, const uint32_t stepID, const TransactionIdxT txnIdx,
|
||||
const boost::shared_ptr<Functor>& functor, const primitiveprocessor::SP_UM_IOSOCK& sock,
|
||||
const uint32_t weight = 1, const uint32_t priority = 0, const uint32_t id = 0)
|
||||
: uniqueID_(uniqueID)
|
||||
, stepID_(stepID)
|
||||
, txnIdx_(txnIdx)
|
||||
, functor_(functor)
|
||||
, sock_(sock)
|
||||
, weight_(weight)
|
||||
, priority_(priority)
|
||||
, id_(id)
|
||||
{
|
||||
}
|
||||
uint32_t uniqueID_;
|
||||
uint32_t stepID_;
|
||||
TransactionIdxT txnIdx_;
|
||||
boost::shared_ptr<Functor> functor_;
|
||||
primitiveprocessor::SP_UM_IOSOCK sock_;
|
||||
uint32_t weight_;
|
||||
uint32_t priority_;
|
||||
uint32_t id_;
|
||||
};
|
||||
|
||||
/*********************************************
|
||||
* ctor/dtor
|
||||
*
|
||||
*********************************************/
|
||||
|
||||
/** @brief ctor
|
||||
*/
|
||||
|
||||
FairThreadPool(uint targetWeightPerRun, uint highThreads, uint midThreads, uint lowThreads, uint id = 0);
|
||||
virtual ~FairThreadPool();
|
||||
|
||||
void removeJobs(uint32_t id);
|
||||
void addJob(const Job& job);
|
||||
void stop();
|
||||
|
||||
/** @brief for use in debugging
|
||||
*/
|
||||
void dump();
|
||||
|
||||
size_t queueSize() const
|
||||
{
|
||||
return weightedTxnsQueue_.size();
|
||||
}
|
||||
// This method enables a pool current workload estimate.
|
||||
size_t jobsRunning() const
|
||||
{
|
||||
return jobsRunning_.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
protected:
|
||||
private:
|
||||
struct ThreadHelper
|
||||
{
|
||||
ThreadHelper(FairThreadPool* impl, PriorityThreadPool::Priority queue) : ptp(impl), preferredQueue(queue)
|
||||
{
|
||||
}
|
||||
void operator()()
|
||||
{
|
||||
ptp->threadFcn(preferredQueue);
|
||||
}
|
||||
FairThreadPool* ptp;
|
||||
PriorityThreadPool::Priority preferredQueue;
|
||||
};
|
||||
|
||||
explicit FairThreadPool();
|
||||
explicit FairThreadPool(const FairThreadPool&);
|
||||
FairThreadPool& operator=(const FairThreadPool&);
|
||||
|
||||
void addJob_(const Job& job, bool useLock = true);
|
||||
void threadFcn(const PriorityThreadPool::Priority preferredQueue);
|
||||
void sendErrorMsg(uint32_t id, uint32_t step, primitiveprocessor::SP_UM_IOSOCK sock);
|
||||
|
||||
uint32_t defaultThreadCounts;
|
||||
std::mutex mutex;
|
||||
std::condition_variable newJob;
|
||||
boost::thread_group threads;
|
||||
uint32_t weightPerRun;
|
||||
volatile uint id; // prevent it from being optimized out
|
||||
|
||||
using WeightT = uint32_t;
|
||||
using WeightedTxnT = std::pair<WeightT, TransactionIdxT>;
|
||||
using WeightedTxnVec = std::vector<WeightedTxnT>;
|
||||
struct PrioQueueCmp
|
||||
{
|
||||
bool operator()(WeightedTxnT lhs, WeightedTxnT rhs)
|
||||
{
|
||||
if (lhs.first == rhs.first)
|
||||
return lhs.second > rhs.second;
|
||||
return lhs.first > rhs.first;
|
||||
}
|
||||
};
|
||||
using RunListT = std::vector<Job>;
|
||||
using RescheduleVecType = std::vector<bool>;
|
||||
using WeightedTxnPrioQueue = std::priority_queue<WeightedTxnT, WeightedTxnVec, PrioQueueCmp>;
|
||||
using ThreadPoolJobsList = std::list<Job>;
|
||||
using Txn2ThreadPoolJobsListMap = std::unordered_map<TransactionIdxT, ThreadPoolJobsList*>;
|
||||
Txn2ThreadPoolJobsListMap txn2JobsListMap_;
|
||||
WeightedTxnPrioQueue weightedTxnsQueue_;
|
||||
std::atomic<size_t> jobsRunning_{0};
|
||||
std::atomic<size_t> threadCounts_{0};
|
||||
std::atomic<bool> stop_{false};
|
||||
};
|
||||
|
||||
} // namespace threadpool
|
@ -53,8 +53,6 @@ class PriorityThreadPool
|
||||
virtual int operator()() = 0;
|
||||
};
|
||||
|
||||
// typedef boost::function0<int> Functor;
|
||||
|
||||
struct Job
|
||||
{
|
||||
Job() : weight(1), priority(0), id(0)
|
||||
|
@ -139,10 +139,10 @@ WindowFunctionType* WF_stats<T>::clone() const
|
||||
template <typename T>
|
||||
void WF_stats<T>::resetData()
|
||||
{
|
||||
fSum1 = 0;
|
||||
fSum2 = 0;
|
||||
fCount = 0;
|
||||
fStats = 0.0;
|
||||
mean_ = 0;
|
||||
scaledMomentum2_ = 0;
|
||||
count_ = 0;
|
||||
stats_ = 0.0;
|
||||
|
||||
WindowFunctionType::resetData();
|
||||
}
|
||||
@ -171,51 +171,46 @@ void WF_stats<T>::operator()(int64_t b, int64_t e, int64_t c)
|
||||
|
||||
if (fRow.isNullValue(colIn) == true)
|
||||
continue;
|
||||
|
||||
// Welford's single-pass algorithm
|
||||
T valIn;
|
||||
getValue(colIn, valIn, &cdt);
|
||||
long double val = (long double)valIn;
|
||||
|
||||
fSum1 += val;
|
||||
fSum2 += val * val;
|
||||
fCount++;
|
||||
count_++;
|
||||
long double delta = val - mean_;
|
||||
mean_ += delta/count_;
|
||||
scaledMomentum2_ += delta * (val - mean_);
|
||||
}
|
||||
|
||||
if (fCount > 1)
|
||||
if (count_ > 1)
|
||||
{
|
||||
uint32_t scale = fRow.getScale(colIn);
|
||||
auto factor = datatypes::scaleDivisor<long double>(scale);
|
||||
long double ldSum1 = fSum1;
|
||||
long double ldSum2 = fSum2;
|
||||
long double stat = scaledMomentum2_;
|
||||
|
||||
// adjust the scale if necessary
|
||||
if (scale != 0 && cdt != CalpontSystemCatalog::LONGDOUBLE)
|
||||
{
|
||||
ldSum1 /= factor;
|
||||
ldSum2 /= factor * factor;
|
||||
stat /= factor * factor;
|
||||
}
|
||||
|
||||
long double stat = ldSum1 * ldSum1 / fCount;
|
||||
stat = ldSum2 - stat;
|
||||
|
||||
if (fFunctionId == WF__STDDEV_POP)
|
||||
stat = sqrt(stat / fCount);
|
||||
stat = sqrt(stat / count_);
|
||||
else if (fFunctionId == WF__STDDEV_SAMP)
|
||||
stat = sqrt(stat / (fCount - 1));
|
||||
stat = sqrt(stat / (count_ - 1));
|
||||
else if (fFunctionId == WF__VAR_POP)
|
||||
stat = stat / fCount;
|
||||
stat = stat / count_;
|
||||
else if (fFunctionId == WF__VAR_SAMP)
|
||||
stat = stat / (fCount - 1);
|
||||
stat = stat / (count_ - 1);
|
||||
|
||||
fStats = (double)stat;
|
||||
stats_ = (double)stat;
|
||||
}
|
||||
}
|
||||
|
||||
if (fCount == 0)
|
||||
if (count_ == 0)
|
||||
{
|
||||
setValue(CalpontSystemCatalog::DOUBLE, b, e, c, (double*)NULL);
|
||||
}
|
||||
else if (fCount == 1)
|
||||
else if (count_ == 1)
|
||||
{
|
||||
if (fFunctionId == WF__STDDEV_SAMP || fFunctionId == WF__VAR_SAMP)
|
||||
{
|
||||
@ -229,7 +224,7 @@ void WF_stats<T>::operator()(int64_t b, int64_t e, int64_t c)
|
||||
}
|
||||
else
|
||||
{
|
||||
setValue(CalpontSystemCatalog::DOUBLE, b, e, c, &fStats);
|
||||
setValue(CalpontSystemCatalog::DOUBLE, b, e, c, &stats_);
|
||||
}
|
||||
|
||||
fPrev = c;
|
||||
|
@ -40,10 +40,10 @@ class WF_stats : public WindowFunctionType
|
||||
static boost::shared_ptr<WindowFunctionType> makeFunction(int, const string&, int, WindowFunctionColumn*);
|
||||
|
||||
protected:
|
||||
long double fSum1;
|
||||
long double fSum2;
|
||||
uint64_t fCount;
|
||||
double fStats;
|
||||
long double mean_;
|
||||
long double scaledMomentum2_;
|
||||
uint64_t count_;
|
||||
double stats_;
|
||||
};
|
||||
|
||||
} // namespace windowfunction
|
||||
|
Reference in New Issue
Block a user