1
0
mirror of https://github.com/mariadb-corporation/mariadb-columnstore-engine.git synced 2025-08-07 03:22:57 +03:00

Merge pull request #504 from mariadb-corporation/1.1-merge-up-20180621

Merge develop-1.1 into develop
This commit is contained in:
Roman Nozdrin
2018-07-04 22:52:04 +03:00
committed by GitHub
54 changed files with 1905 additions and 1010 deletions

View File

@@ -37,43 +37,56 @@ DROP PROCEDURE IF EXISTS `table_usage` //
CREATE PROCEDURE table_usage (IN t_schema char(64), IN t_name char(64))
`table_usage`: BEGIN
DECLARE done INTEGER DEFAULT 0;
DECLARE dbname VARCHAR(64);
DECLARE tbname VARCHAR(64);
DECLARE object_ids TEXT;
DECLARE dictionary_object_ids TEXT;
DECLARE `locker` TINYINT UNSIGNED DEFAULT IS_USED_LOCK('table_usage');
DECLARE columns_list CURSOR FOR SELECT TABLE_SCHEMA, TABLE_NAME, GROUP_CONCAT(object_id) OBJECT_IDS, GROUP_CONCAT(dictionary_object_id) DICT_OBJECT_IDS FROM INFORMATION_SCHEMA.COLUMNSTORE_COLUMNS WHERE table_name = t_name and table_schema = t_schema GROUP BY table_schema, table_name;
DECLARE columns_list_sc CURSOR FOR SELECT TABLE_SCHEMA, TABLE_NAME, GROUP_CONCAT(object_id) OBJECT_IDS, GROUP_CONCAT(dictionary_object_id) DICT_OBJECT_IDS FROM INFORMATION_SCHEMA.COLUMNSTORE_COLUMNS WHERE table_schema = t_schema GROUP BY table_schema, table_name;
DECLARE columns_list_all CURSOR FOR SELECT TABLE_SCHEMA, TABLE_NAME, GROUP_CONCAT(object_id) OBJECT_IDS, GROUP_CONCAT(dictionary_object_id) DICT_OBJECT_IDS FROM INFORMATION_SCHEMA.COLUMNSTORE_COLUMNS GROUP BY table_schema, table_name;
DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = 1;
IF `locker` IS NOT NULL THEN
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = 'Error acquiring table_usage lock';
LEAVE `table_usage`;
END IF;
DO GET_LOCK('table_usage', 0);
DROP TABLE IF EXISTS columnstore_info.columnstore_columns;
DROP TABLE IF EXISTS columnstore_info.columnstore_files;
CREATE TABLE columnstore_info.columnstore_columns engine=myisam as (select * from information_schema.columnstore_columns);
ALTER TABLE columnstore_info.columnstore_columns ADD INDEX `object_id` (`object_id`);
ALTER TABLE columnstore_info.columnstore_columns ADD INDEX `dictionary_object_id` (`dictionary_object_id`);
CREATE TABLE columnstore_info.columnstore_files engine=myisam as (select * from information_schema.columnstore_files);
ALTER TABLE columnstore_info.columnstore_files ADD INDEX `object_id` (`object_id`);
CREATE TEMPORARY TABLE columnstore_info.columnstore_files (TABLE_SCHEMA VARCHAR(64), TABLE_NAME VARCHAR(64), DATA BIGINT, DICT BIGINT);
IF t_name IS NOT NULL THEN
SELECT TABLE_SCHEMA, TABLE_NAME, columnstore_info.format_filesize(data) as DATA_DISK_USAGE, columnstore_info.format_filesize(dict) as DICT_DISK_USAGE, columnstore_info.format_filesize(data + COALESCE(dict, 0)) as TOTAL_USAGE FROM (
SELECT TABLE_SCHEMA, TABLE_NAME, (SELECT sum(cf.file_size) as data FROM columnstore_info.columnstore_columns cc JOIN columnstore_info.columnstore_files cf ON cc.object_id = cf.object_id WHERE table_name = ics.table_name and table_schema = ics.table_schema) as data, (SELECT sum(cf.file_size) as dict FROM columnstore_info.columnstore_columns cc JOIN columnstore_info.columnstore_files cf ON cc.dictionary_object_id = cf.object_id WHERE table_name = ics.table_name and table_schema = ics.table_schema GROUP BY table_schema, table_name) as dict
FROM
columnstore_info.columnstore_columns ics where table_name = t_name and (table_schema = t_schema or t_schema IS NULL)
group by table_schema, table_name
) q;
OPEN columns_list;
ELSEIF t_schema IS NOT NULL THEN
SELECT TABLE_SCHEMA, TABLE_NAME, columnstore_info.format_filesize(data) as DATA_DISK_USAGE, columnstore_info.format_filesize(dict) as DICT_DISK_USAGE, columnstore_info.format_filesize(data + COALESCE(dict, 0)) as TOTAL_USAGE FROM (
SELECT TABLE_SCHEMA, TABLE_NAME, (SELECT sum(cf.file_size) as data FROM columnstore_info.columnstore_columns cc JOIN columnstore_info.columnstore_files cf ON cc.object_id = cf.object_id WHERE table_name = ics.table_name and table_schema = ics.table_schema) as data, (SELECT sum(cf.file_size) as dict FROM columnstore_info.columnstore_columns cc JOIN columnstore_info.columnstore_files cf ON cc.dictionary_object_id = cf.object_id WHERE table_name = ics.table_name and table_schema = ics.table_schema GROUP BY table_schema, table_name) as dict
FROM
columnstore_info.columnstore_columns ics where table_schema = t_schema
group by table_schema, table_name
) q;
OPEN columns_list_sc;
ELSE
SELECT TABLE_SCHEMA, TABLE_NAME, columnstore_info.format_filesize(data) as DATA_DISK_USAGE, columnstore_info.format_filesize(dict) as DICT_DISK_USAGE, columnstore_info.format_filesize(data + COALESCE(dict, 0)) as TOTAL_USAGE FROM (
SELECT TABLE_SCHEMA, TABLE_NAME, (SELECT sum(cf.file_size) as data FROM columnstore_info.columnstore_columns cc JOIN columnstore_info.columnstore_files cf ON cc.object_id = cf.object_id WHERE table_name = ics.table_name and table_schema = ics.table_schema) as data, (SELECT sum(cf.file_size) as dict FROM columnstore_info.columnstore_columns cc JOIN columnstore_info.columnstore_files cf ON cc.dictionary_object_id = cf.object_id WHERE table_name = ics.table_name and table_schema = ics.table_schema GROUP BY table_schema, table_name) as dict
FROM
columnstore_info.columnstore_columns ics
group by table_schema, table_name
) q;
OPEN columns_list_all;
END IF;
DROP TABLE IF EXISTS columnstore_info.columnstore_columns;
files_table: LOOP
IF t_name IS NOT NULL THEN
FETCH columns_list INTO dbname, tbname, object_ids, dictionary_object_ids;
ELSEIF t_schema IS NOT NULL THEN
FETCH columns_list_sc INTO dbname, tbname, object_ids, dictionary_object_ids;
ELSE
FETCH columns_list_all INTO dbname, tbname, object_ids, dictionary_object_ids;
END IF;
IF done = 1 THEN LEAVE files_table;
END IF;
INSERT INTO columnstore_info.columnstore_files (SELECT dbname, tbname, sum(file_size), 0 FROM information_schema.columnstore_files WHERE find_in_set(object_id, object_ids));
IF dictionary_object_ids IS NOT NULL THEN
UPDATE columnstore_info.columnstore_files SET DICT = (SELECT sum(file_size) FROM information_schema.columnstore_files WHERE find_in_set(object_id, dictionary_object_ids)) WHERE TABLE_SCHEMA = dbname AND TABLE_NAME = tbname;
END IF;
END LOOP;
IF t_name IS NOT NULL THEN
CLOSE columns_list;
ELSEIF t_schema IS NOT NULL THEN
CLOSE columns_list_sc;
ELSE
CLOSE columns_list_all;
END IF;
SELECT TABLE_SCHEMA, TABLE_NAME, columnstore_info.format_filesize(DATA) as DATA_DISK_USAGE, columnstore_info.format_filesize(DICT) as DICT_DATA_USAGE, columnstore_info.format_filesize(DATA + COALESCE(DICT, 0)) as TOTAL_USAGE FROM columnstore_info.columnstore_files;
DROP TABLE IF EXISTS columnstore_info.columnstore_files;
DO RELEASE_LOCK('table_usage');
END //

View File

@@ -899,6 +899,7 @@ int ha_calpont_impl_write_batch_row_(uchar* buf, TABLE* table, cal_impl_if::cal_
{
fprintf(ci.filePtr, "-");
}
if (!ltime.second_part)
{
fprintf(ci.filePtr, "%02d:%02d:%02d%c",

View File

@@ -4039,8 +4039,8 @@ ParseTree* buildParseTree(Item_func* item, gp_walk_info& gwi, bool& nonSupport)
ReturnedColumn* buildAggregateColumn(Item* item, gp_walk_info& gwi)
{
// MCOL-1201 For UDAnF multiple parameters
vector<SRCP> selCols;
vector<SRCP> orderCols;
vector<SRCP> selCols;
vector<SRCP> orderCols;
if (!(gwi.thd->infinidb_vtable.cal_conn_info))
gwi.thd->infinidb_vtable.cal_conn_info = (void*)(new cal_connection_info());
@@ -4059,6 +4059,7 @@ ReturnedColumn* buildAggregateColumn(Item* item, gp_walk_info& gwi)
// N.B. argument_count() is the # of formal parms to the agg fcn. InifniDB only supports 1 argument
// TODO: Support more than one parm
#if 0
if (isp->argument_count() != 1 && isp->sum_func() != Item_sum::GROUP_CONCAT_FUNC
&& isp->sum_func() != Item_sum::UDF_SUM_FUNC)
{
@@ -4066,6 +4067,7 @@ ReturnedColumn* buildAggregateColumn(Item* item, gp_walk_info& gwi)
gwi.parseErrorText = IDBErrorInfo::instance()->errorMsg(ERR_MUL_ARG_AGG);
return NULL;
}
#endif
AggregateColumn* ac = NULL;
@@ -4089,446 +4091,459 @@ ReturnedColumn* buildAggregateColumn(Item* item, gp_walk_info& gwi)
{
gwi.fatalParseError = true;
gwi.parseErrorText = "Non supported aggregate type on the select clause";
if (ac)
delete ac;
return NULL;
}
try
{
// special parsing for group_concat
if (isp->sum_func() == Item_sum::GROUP_CONCAT_FUNC)
{
Item_func_group_concat* gc = (Item_func_group_concat*)isp;
vector<SRCP> orderCols;
RowColumn* rowCol = new RowColumn();
vector<SRCP> selCols;
uint32_t select_ctn = gc->count_field();
ReturnedColumn* rc = NULL;
for (uint32_t i = 0; i < select_ctn; i++)
// special parsing for group_concat
if (isp->sum_func() == Item_sum::GROUP_CONCAT_FUNC)
{
rc = buildReturnedColumn(sfitempp[i], gwi, gwi.fatalParseError);
Item_func_group_concat* gc = (Item_func_group_concat*)isp;
vector<SRCP> orderCols;
RowColumn* rowCol = new RowColumn();
vector<SRCP> selCols;
if (!rc || gwi.fatalParseError)
{
if (ac)
delete ac;
return NULL;
}
uint32_t select_ctn = gc->count_field();
ReturnedColumn* rc = NULL;
selCols.push_back(SRCP(rc));
}
ORDER** order_item, **end;
for (order_item = gc->get_order(),
end = order_item + gc->order_field(); order_item < end;
order_item++)
{
Item* ord_col = *(*order_item)->item;
if (ord_col->type() == Item::INT_ITEM)
for (uint32_t i = 0; i < select_ctn; i++)
{
Item_int* id = (Item_int*)ord_col;
if (id->val_int() > (int)selCols.size())
{
gwi.fatalParseError = true;
if (ac)
delete ac;
return NULL;
}
rc = selCols[id->val_int() - 1]->clone();
rc->orderPos(id->val_int() - 1);
}
else
{
rc = buildReturnedColumn(ord_col, gwi, gwi.fatalParseError);
rc = buildReturnedColumn(sfitempp[i], gwi, gwi.fatalParseError);
if (!rc || gwi.fatalParseError)
{
if (ac)
delete ac;
if (ac)
delete ac;
return NULL;
}
selCols.push_back(SRCP(rc));
}
// 10.2 TODO: direction is now a tri-state flag
rc->asc((*order_item)->direction == ORDER::ORDER_ASC ? true : false);
orderCols.push_back(SRCP(rc));
}
ORDER** order_item, **end;
rowCol->columnVec(selCols);
(dynamic_cast<GroupConcatColumn*>(ac))->orderCols(orderCols);
parm.reset(rowCol);
ac->aggParms().push_back(parm);
if (gc->str_separator())
{
string separator;
separator.assign(gc->str_separator()->ptr(), gc->str_separator()->length());
(dynamic_cast<GroupConcatColumn*>(ac))->separator(separator);
}
}
else
{
for (uint32_t i = 0; i < isp->argument_count(); i++)
{
Item* sfitemp = sfitempp[i];
Item::Type sfitype = sfitemp->type();
switch (sfitype)
for (order_item = gc->get_order(),
end = order_item + gc->order_field(); order_item < end;
order_item++)
{
case Item::FIELD_ITEM:
{
Item_field* ifp = reinterpret_cast<Item_field*>(sfitemp);
SimpleColumn* sc = buildSimpleColumn(ifp, gwi);
Item* ord_col = *(*order_item)->item;
if (!sc)
if (ord_col->type() == Item::INT_ITEM)
{
Item_int* id = (Item_int*)ord_col;
if (id->val_int() > (int)selCols.size())
{
gwi.fatalParseError = true;
if (ac)
delete ac;
return NULL;
}
rc = selCols[id->val_int() - 1]->clone();
rc->orderPos(id->val_int() - 1);
}
else
{
rc = buildReturnedColumn(ord_col, gwi, gwi.fatalParseError);
if (!rc || gwi.fatalParseError)
{
if (ac)
delete ac;
return NULL;
}
}
// 10.2 TODO: direction is now a tri-state flag
rc->asc((*order_item)->direction == ORDER::ORDER_ASC ? true : false);
orderCols.push_back(SRCP(rc));
}
rowCol->columnVec(selCols);
(dynamic_cast<GroupConcatColumn*>(ac))->orderCols(orderCols);
parm.reset(rowCol);
ac->aggParms().push_back(parm);
if (gc->str_separator())
{
string separator;
separator.assign(gc->str_separator()->ptr(), gc->str_separator()->length());
(dynamic_cast<GroupConcatColumn*>(ac))->separator(separator);
}
}
else
{
for (uint32_t i = 0; i < isp->argument_count(); i++)
{
Item* sfitemp = sfitempp[i];
Item::Type sfitype = sfitemp->type();
switch (sfitype)
{
case Item::FIELD_ITEM:
{
Item_field* ifp = reinterpret_cast<Item_field*>(sfitemp);
SimpleColumn* sc = buildSimpleColumn(ifp, gwi);
if (!sc)
{
gwi.fatalParseError = true;
break;
}
parm.reset(sc);
gwi.columnMap.insert(CalpontSelectExecutionPlan::ColumnMap::value_type(string(ifp->field_name), parm));
TABLE_LIST* tmp = (ifp->cached_table ? ifp->cached_table : 0);
gwi.tableMap[make_aliastable(sc->schemaName(), sc->tableName(), sc->tableAlias(), sc->isInfiniDB())] = make_pair(1, tmp);
break;
}
parm.reset(sc);
gwi.columnMap.insert(CalpontSelectExecutionPlan::ColumnMap::value_type(string(ifp->field_name), parm));
TABLE_LIST* tmp = (ifp->cached_table ? ifp->cached_table : 0);
gwi.tableMap[make_aliastable(sc->schemaName(), sc->tableName(), sc->tableAlias(), sc->isInfiniDB())] = make_pair(1, tmp);
break;
}
case Item::INT_ITEM:
case Item::STRING_ITEM:
case Item::REAL_ITEM:
case Item::DECIMAL_ITEM:
{
// treat as count(*)
if (ac->aggOp() == AggregateColumn::COUNT)
ac->aggOp(AggregateColumn::COUNT_ASTERISK);
case Item::INT_ITEM:
case Item::STRING_ITEM:
case Item::REAL_ITEM:
case Item::DECIMAL_ITEM:
{
// treat as count(*)
if (ac->aggOp() == AggregateColumn::COUNT)
ac->aggOp(AggregateColumn::COUNT_ASTERISK);
parm.reset(buildReturnedColumn(sfitemp, gwi, gwi.fatalParseError));
ac->constCol(parm);
break;
}
case Item::NULL_ITEM:
{
parm.reset(new ConstantColumn("", ConstantColumn::NULLDATA));
ac->constCol(SRCP(buildReturnedColumn(sfitemp, gwi, gwi.fatalParseError)));
break;
}
case Item::FUNC_ITEM:
{
Item_func* ifp = (Item_func*)sfitemp;
ReturnedColumn* rc = 0;
// check count(1+1) case
vector <Item_field*> tmpVec;
uint16_t parseInfo = 0;
parse_item(ifp, tmpVec, gwi.fatalParseError, parseInfo);
if (parseInfo & SUB_BIT)
{
gwi.fatalParseError = true;
break;
}
else if (!gwi.fatalParseError &&
!(parseInfo & AGG_BIT) &&
!(parseInfo & AF_BIT) &&
tmpVec.size() == 0)
case Item::NULL_ITEM:
{
rc = buildFunctionColumn(ifp, gwi, gwi.fatalParseError);
FunctionColumn* fc = dynamic_cast<FunctionColumn*>(rc);
parm.reset(new ConstantColumn("", ConstantColumn::NULLDATA));
ac->constCol(SRCP(buildReturnedColumn(sfitemp, gwi, gwi.fatalParseError)));
break;
}
if ((fc && fc->functionParms().empty()) || !fc)
case Item::FUNC_ITEM:
{
Item_func* ifp = (Item_func*)sfitemp;
ReturnedColumn* rc = 0;
// check count(1+1) case
vector <Item_field*> tmpVec;
uint16_t parseInfo = 0;
parse_item(ifp, tmpVec, gwi.fatalParseError, parseInfo);
if (parseInfo & SUB_BIT)
{
//ac->aggOp(AggregateColumn::COUNT_ASTERISK);
ReturnedColumn* rc = buildReturnedColumn(sfitemp, gwi, gwi.fatalParseError);
gwi.fatalParseError = true;
break;
}
else if (!gwi.fatalParseError &&
!(parseInfo & AGG_BIT) &&
!(parseInfo & AF_BIT) &&
tmpVec.size() == 0)
{
rc = buildFunctionColumn(ifp, gwi, gwi.fatalParseError);
FunctionColumn* fc = dynamic_cast<FunctionColumn*>(rc);
if (dynamic_cast<ConstantColumn*>(rc))
if ((fc && fc->functionParms().empty()) || !fc)
{
//@bug5229. handle constant function on aggregate argument
ac->constCol(SRCP(rc));
break;
//ac->aggOp(AggregateColumn::COUNT_ASTERISK);
ReturnedColumn* rc = buildReturnedColumn(sfitemp, gwi, gwi.fatalParseError);
if (dynamic_cast<ConstantColumn*>(rc))
{
//@bug5229. handle constant function on aggregate argument
ac->constCol(SRCP(rc));
break;
}
}
}
// MySQL carelessly allows correlated aggregate function on the WHERE clause.
// Here is the work around to deal with that inconsistence.
// e.g., SELECT (SELECT t.c FROM t1 AS t WHERE t.b=MAX(t1.b + 0)) FROM t1;
ClauseType clauseType = gwi.clauseType;
if (gwi.clauseType == WHERE)
gwi.clauseType = HAVING;
// @bug 3603. for cases like max(rand()). try to build function first.
if (!rc)
rc = buildFunctionColumn(ifp, gwi, gwi.fatalParseError);
parm.reset(rc);
gwi.clauseType = clauseType;
if (gwi.fatalParseError)
break;
break;
}
case Item::REF_ITEM:
{
ReturnedColumn* rc = buildReturnedColumn(sfitemp, gwi, gwi.fatalParseError);
if (rc)
{
parm.reset(rc);
break;
}
}
// MySQL carelessly allows correlated aggregate function on the WHERE clause.
// Here is the work around to deal with that inconsistence.
// e.g., SELECT (SELECT t.c FROM t1 AS t WHERE t.b=MAX(t1.b + 0)) FROM t1;
ClauseType clauseType = gwi.clauseType;
if (gwi.clauseType == WHERE)
gwi.clauseType = HAVING;
// @bug 3603. for cases like max(rand()). try to build function first.
if (!rc)
rc = buildFunctionColumn(ifp, gwi, gwi.fatalParseError);
parm.reset(rc);
gwi.clauseType = clauseType;
if (gwi.fatalParseError)
break;
break;
}
case Item::REF_ITEM:
{
ReturnedColumn* rc = buildReturnedColumn(sfitemp, gwi, gwi.fatalParseError);
if (rc)
default:
{
parm.reset(rc);
break;
gwi.fatalParseError = true;
//gwi.parseErrorText = "Non-supported Item in Aggregate function";
}
}
default:
if (gwi.fatalParseError)
{
gwi.fatalParseError = true;
//gwi.parseErrorText = "Non-supported Item in Aggregate function";
}
}
if (gwi.parseErrorText.empty())
{
Message::Args args;
if (gwi.fatalParseError)
{
if (gwi.parseErrorText.empty())
{
Message::Args args;
if (item->name)
args.add(item->name);
else
args.add("");
if (item->name)
args.add(item->name);
else
args.add("");
gwi.parseErrorText = IDBErrorInfo::instance()->errorMsg(ERR_NON_SUPPORT_AGG_ARGS, args);
}
gwi.parseErrorText = IDBErrorInfo::instance()->errorMsg(ERR_NON_SUPPORT_AGG_ARGS, args);
if (ac)
delete ac;
return NULL;
}
if (ac)
delete ac;
return NULL;
}
if (parm)
{
// MCOL-1201 multi-argument aggregate
ac->aggParms().push_back(parm);
if (parm)
{
// MCOL-1201 multi-argument aggregate
ac->aggParms().push_back(parm);
}
}
}
}
// Get result type
// Modified for MCOL-1201 multi-argument aggregate
if (ac->aggParms().size() > 0)
{
{
// These are all one parm functions, so we can safely
// use the first parm for result type.
parm = ac->aggParms()[0];
if (isp->sum_func() == Item_sum::AVG_FUNC ||
isp->sum_func() == Item_sum::AVG_DISTINCT_FUNC)
{
CalpontSystemCatalog::ColType ct = parm->resultType();
switch (ct.colDataType)
if (isp->sum_func() == Item_sum::AVG_FUNC ||
isp->sum_func() == Item_sum::AVG_DISTINCT_FUNC)
{
case CalpontSystemCatalog::TINYINT:
case CalpontSystemCatalog::SMALLINT:
case CalpontSystemCatalog::MEDINT:
case CalpontSystemCatalog::INT:
case CalpontSystemCatalog::BIGINT:
case CalpontSystemCatalog::DECIMAL:
case CalpontSystemCatalog::UDECIMAL:
case CalpontSystemCatalog::UTINYINT:
case CalpontSystemCatalog::USMALLINT:
case CalpontSystemCatalog::UMEDINT:
case CalpontSystemCatalog::UINT:
case CalpontSystemCatalog::UBIGINT:
ct.colDataType = CalpontSystemCatalog::DECIMAL;
ct.colWidth = 8;
ct.scale += 4;
break;
CalpontSystemCatalog::ColType ct = parm->resultType();
switch (ct.colDataType)
{
case CalpontSystemCatalog::TINYINT:
case CalpontSystemCatalog::SMALLINT:
case CalpontSystemCatalog::MEDINT:
case CalpontSystemCatalog::INT:
case CalpontSystemCatalog::BIGINT:
case CalpontSystemCatalog::DECIMAL:
case CalpontSystemCatalog::UDECIMAL:
case CalpontSystemCatalog::UTINYINT:
case CalpontSystemCatalog::USMALLINT:
case CalpontSystemCatalog::UMEDINT:
case CalpontSystemCatalog::UINT:
case CalpontSystemCatalog::UBIGINT:
ct.colDataType = CalpontSystemCatalog::DECIMAL;
ct.colWidth = 8;
ct.scale += 4;
break;
#if PROMOTE_FLOAT_TO_DOUBLE_ON_SUM
case CalpontSystemCatalog::FLOAT:
case CalpontSystemCatalog::UFLOAT:
case CalpontSystemCatalog::DOUBLE:
case CalpontSystemCatalog::UDOUBLE:
ct.colDataType = CalpontSystemCatalog::DOUBLE;
ct.colWidth = 8;
break;
case CalpontSystemCatalog::FLOAT:
case CalpontSystemCatalog::UFLOAT:
case CalpontSystemCatalog::DOUBLE:
case CalpontSystemCatalog::UDOUBLE:
ct.colDataType = CalpontSystemCatalog::DOUBLE;
ct.colWidth = 8;
break;
#endif
default:
break;
default:
break;
}
ac->resultType(ct);
}
ac->resultType(ct);
}
else if (isp->sum_func() == Item_sum::COUNT_FUNC ||
isp->sum_func() == Item_sum::COUNT_DISTINCT_FUNC)
{
CalpontSystemCatalog::ColType ct;
ct.colDataType = CalpontSystemCatalog::BIGINT;
ct.colWidth = 8;
ct.scale = parm->resultType().scale;
ac->resultType(ct);
}
else if (isp->sum_func() == Item_sum::SUM_FUNC ||
isp->sum_func() == Item_sum::SUM_DISTINCT_FUNC)
{
CalpontSystemCatalog::ColType ct = parm->resultType();
switch (ct.colDataType)
else if (isp->sum_func() == Item_sum::COUNT_FUNC ||
isp->sum_func() == Item_sum::COUNT_DISTINCT_FUNC)
{
case CalpontSystemCatalog::TINYINT:
case CalpontSystemCatalog::SMALLINT:
case CalpontSystemCatalog::MEDINT:
case CalpontSystemCatalog::INT:
case CalpontSystemCatalog::BIGINT:
ct.colDataType = CalpontSystemCatalog::BIGINT;
CalpontSystemCatalog::ColType ct;
ct.colDataType = CalpontSystemCatalog::BIGINT;
ct.colWidth = 8;
ct.scale = parm->resultType().scale;
ac->resultType(ct);
}
else if (isp->sum_func() == Item_sum::SUM_FUNC ||
isp->sum_func() == Item_sum::SUM_DISTINCT_FUNC)
{
CalpontSystemCatalog::ColType ct = parm->resultType();
// no break, let fall through
switch (ct.colDataType)
{
case CalpontSystemCatalog::TINYINT:
case CalpontSystemCatalog::SMALLINT:
case CalpontSystemCatalog::MEDINT:
case CalpontSystemCatalog::INT:
case CalpontSystemCatalog::BIGINT:
ct.colDataType = CalpontSystemCatalog::BIGINT;
case CalpontSystemCatalog::DECIMAL:
case CalpontSystemCatalog::UDECIMAL:
ct.colWidth = 8;
break;
// no break, let fall through
case CalpontSystemCatalog::UTINYINT:
case CalpontSystemCatalog::USMALLINT:
case CalpontSystemCatalog::UMEDINT:
case CalpontSystemCatalog::UINT:
case CalpontSystemCatalog::UBIGINT:
ct.colDataType = CalpontSystemCatalog::UBIGINT;
ct.colWidth = 8;
break;
case CalpontSystemCatalog::DECIMAL:
case CalpontSystemCatalog::UDECIMAL:
ct.colWidth = 8;
break;
case CalpontSystemCatalog::UTINYINT:
case CalpontSystemCatalog::USMALLINT:
case CalpontSystemCatalog::UMEDINT:
case CalpontSystemCatalog::UINT:
case CalpontSystemCatalog::UBIGINT:
ct.colDataType = CalpontSystemCatalog::UBIGINT;
ct.colWidth = 8;
break;
#if PROMOTE_FLOAT_TO_DOUBLE_ON_SUM
case CalpontSystemCatalog::FLOAT:
case CalpontSystemCatalog::UFLOAT:
case CalpontSystemCatalog::DOUBLE:
case CalpontSystemCatalog::UDOUBLE:
ct.colDataType = CalpontSystemCatalog::DOUBLE;
ct.colWidth = 8;
break;
case CalpontSystemCatalog::FLOAT:
case CalpontSystemCatalog::UFLOAT:
case CalpontSystemCatalog::DOUBLE:
case CalpontSystemCatalog::UDOUBLE:
ct.colDataType = CalpontSystemCatalog::DOUBLE;
ct.colWidth = 8;
break;
#endif
default:
break;
}
default:
break;
}
ac->resultType(ct);
}
else if (isp->sum_func() == Item_sum::STD_FUNC ||
isp->sum_func() == Item_sum::VARIANCE_FUNC)
{
CalpontSystemCatalog::ColType ct;
ct.colDataType = CalpontSystemCatalog::DOUBLE;
ct.colWidth = 8;
ct.scale = 0;
ac->resultType(ct);
}
else if (isp->sum_func() == Item_sum::SUM_BIT_FUNC)
{
CalpontSystemCatalog::ColType ct;
ct.colDataType = CalpontSystemCatalog::BIGINT;
ct.colWidth = 8;
ct.scale = 0;
ct.precision = -16; // borrowed to indicate skip null value check on connector
ac->resultType(ct);
}
else if (isp->sum_func() == Item_sum::GROUP_CONCAT_FUNC)
{
//Item_func_group_concat* gc = (Item_func_group_concat*)isp;
CalpontSystemCatalog::ColType ct;
ct.colDataType = CalpontSystemCatalog::VARCHAR;
ct.colWidth = isp->max_length;
ct.precision = 0;
ac->resultType(ct);
ac->resultType(ct);
}
else if (isp->sum_func() == Item_sum::STD_FUNC ||
isp->sum_func() == Item_sum::VARIANCE_FUNC)
{
CalpontSystemCatalog::ColType ct;
ct.colDataType = CalpontSystemCatalog::DOUBLE;
ct.colWidth = 8;
ct.scale = 0;
ac->resultType(ct);
}
else if (isp->sum_func() == Item_sum::SUM_BIT_FUNC)
{
CalpontSystemCatalog::ColType ct;
ct.colDataType = CalpontSystemCatalog::BIGINT;
ct.colWidth = 8;
ct.scale = 0;
ct.precision = -16; // borrowed to indicate skip null value check on connector
ac->resultType(ct);
}
else if (isp->sum_func() == Item_sum::GROUP_CONCAT_FUNC)
{
//Item_func_group_concat* gc = (Item_func_group_concat*)isp;
CalpontSystemCatalog::ColType ct;
ct.colDataType = CalpontSystemCatalog::VARCHAR;
ct.colWidth = isp->max_length;
ct.precision = 0;
ac->resultType(ct);
}
else
{
// UDAF result type will be set below.
ac->resultType(parm->resultType());
}
}
else
{
// UDAF result type will be set below.
ac->resultType(parm->resultType());
ac->resultType(colType_MysqlToIDB(isp));
}
}
else
{
ac->resultType(colType_MysqlToIDB(isp));
}
// adjust decimal result type according to internalDecimalScale
if (gwi.internalDecimalScale >= 0 && ac->resultType().colDataType == CalpontSystemCatalog::DECIMAL)
{
CalpontSystemCatalog::ColType ct = ac->resultType();
ct.scale = gwi.internalDecimalScale;
ac->resultType(ct);
}
// check for same aggregate on the select list
ac->expressionId(ci->expressionId++);
if (gwi.clauseType != SELECT)
{
for (uint32_t i = 0; i < gwi.returnedCols.size(); i++)
// adjust decimal result type according to internalDecimalScale
if (gwi.internalDecimalScale >= 0 && ac->resultType().colDataType == CalpontSystemCatalog::DECIMAL)
{
if (*ac == gwi.returnedCols[i].get())
ac->expressionId(gwi.returnedCols[i]->expressionId());
CalpontSystemCatalog::ColType ct = ac->resultType();
ct.scale = gwi.internalDecimalScale;
ac->resultType(ct);
}
}
// @bug5977 @note Temporary fix to avoid mysqld crash. The permanent fix will
// be applied in ExeMgr. When the ExeMgr fix is available, this checking
// will be taken out.
// check for same aggregate on the select list
ac->expressionId(ci->expressionId++);
if (gwi.clauseType != SELECT)
{
for (uint32_t i = 0; i < gwi.returnedCols.size(); i++)
{
if (*ac == gwi.returnedCols[i].get())
ac->expressionId(gwi.returnedCols[i]->expressionId());
}
}
// @bug5977 @note Temporary fix to avoid mysqld crash. The permanent fix will
// be applied in ExeMgr. When the ExeMgr fix is available, this checking
// will be taken out.
if (isp->sum_func() != Item_sum::UDF_SUM_FUNC)
{
if (ac->constCol() && gwi.tbList.empty() && gwi.derivedTbList.empty())
{
gwi.fatalParseError = true;
gwi.parseErrorText = "No project column found for aggregate function";
if (ac->constCol() && gwi.tbList.empty() && gwi.derivedTbList.empty())
{
gwi.fatalParseError = true;
gwi.parseErrorText = "No project column found for aggregate function";
if (ac)
delete ac;
return NULL;
}
else if (ac->constCol())
{
gwi.count_asterisk_list.push_back(ac);
}
return NULL;
}
else if (ac->constCol())
{
gwi.count_asterisk_list.push_back(ac);
}
}
// For UDAF, populate the context and call the UDAF init() function.
// For UDAF, populate the context and call the UDAF init() function.
// The return type is (should be) set in context by init().
if (isp->sum_func() == Item_sum::UDF_SUM_FUNC)
{
UDAFColumn* udafc = dynamic_cast<UDAFColumn*>(ac);
if (udafc)
if (isp->sum_func() == Item_sum::UDF_SUM_FUNC)
{
mcsv1Context& context = udafc->getContext();
context.setName(isp->func_name());
UDAFColumn* udafc = dynamic_cast<UDAFColumn*>(ac);
// Set up the return type defaults for the call to init()
context.setResultType(udafc->resultType().colDataType);
context.setColWidth(udafc->resultType().colWidth);
context.setScale(udafc->resultType().scale);
context.setPrecision(udafc->resultType().precision);
if (udafc)
{
mcsv1Context& context = udafc->getContext();
context.setName(isp->func_name());
// Set up the return type defaults for the call to init()
context.setResultType(udafc->resultType().colDataType);
context.setColWidth(udafc->resultType().colWidth);
context.setScale(udafc->resultType().scale);
context.setPrecision(udafc->resultType().precision);
context.setParamCount(udafc->aggParms().size());
ColumnDatum colType;
ColumnDatum colTypes[udafc->aggParms().size()];
// Build the column type vector.
// Modified for MCOL-1201 multi-argument aggregate
for (uint32_t i = 0; i < udafc->aggParms().size(); ++i)
{
const execplan::CalpontSystemCatalog::ColType& resultType
const execplan::CalpontSystemCatalog::ColType& resultType
= udafc->aggParms()[i]->resultType();
colType.dataType = resultType.colDataType;
colType.precision = resultType.precision;
@@ -4536,65 +4551,78 @@ ReturnedColumn* buildAggregateColumn(Item* item, gp_walk_info& gwi)
colTypes[i] = colType;
}
// Call the user supplied init()
// Call the user supplied init()
mcsv1sdk::mcsv1_UDAF* udaf = context.getFunction();
if (!udaf)
{
gwi.fatalParseError = true;
gwi.parseErrorText = "Aggregate Function " + context.getName() + " doesn't exist in the ColumnStore engine";
if (ac)
delete ac;
return NULL;
}
if (udaf->init(&context, colTypes) == mcsv1_UDAF::ERROR)
{
gwi.fatalParseError = true;
gwi.parseErrorText = udafc->getContext().getErrorMessage();
if (udaf->init(&context, colTypes) == mcsv1_UDAF::ERROR)
{
gwi.fatalParseError = true;
gwi.parseErrorText = udafc->getContext().getErrorMessage();
if (ac)
delete ac;
return NULL;
}
return NULL;
}
// UDAF_OVER_REQUIRED means that this function is for Window
// Function only. Reject it here in aggregate land.
if (udafc->getContext().getRunFlag(UDAF_OVER_REQUIRED))
{
gwi.fatalParseError = true;
gwi.parseErrorText =
logging::IDBErrorInfo::instance()->errorMsg(logging::ERR_WINDOW_FUNC_ONLY,
context.getName());
if (udafc->getContext().getRunFlag(UDAF_OVER_REQUIRED))
{
gwi.fatalParseError = true;
gwi.parseErrorText =
logging::IDBErrorInfo::instance()->errorMsg(logging::ERR_WINDOW_FUNC_ONLY,
context.getName());
if (ac)
delete ac;
return NULL;
}
// Set the return type as set in init()
CalpontSystemCatalog::ColType ct;
ct.colDataType = context.getResultType();
ct.colWidth = context.getColWidth();
ct.scale = context.getScale();
ct.precision = context.getPrecision();
udafc->resultType(ct);
return NULL;
}
// Set the return type as set in init()
CalpontSystemCatalog::ColType ct;
ct.colDataType = context.getResultType();
ct.colWidth = context.getColWidth();
ct.scale = context.getScale();
ct.precision = context.getPrecision();
udafc->resultType(ct);
}
}
}
}
catch (std::logic_error e)
{
gwi.fatalParseError = true;
gwi.parseErrorText = "error building Aggregate Function: ";
gwi.parseErrorText += e.what();
if (ac)
delete ac;
return NULL;
}
catch (...)
{
gwi.fatalParseError = true;
gwi.parseErrorText = "error building Aggregate Function: Unspecified exception";
if (ac)
delete ac;
return NULL;
}
return ac;
}
@@ -7915,6 +7943,7 @@ int getSelectPlan(gp_walk_info& gwi, SELECT_LEX& select_lex, SCSEP& csep, bool i
setError(gwi.thd, ER_INTERNAL_ERROR, gwi.parseErrorText, gwi);
return ER_CHECK_NOT_IMPLEMENTED;
}
// Replace the last (presumably constant) object with minSc
if ((*coliter)->aggParms().empty())
{

View File

@@ -782,7 +782,7 @@ int fetchNextRow(uchar* buf, cal_table_info& ti, cal_connection_info* ci, bool h
//double double_val = *(double*)(&value);
//f2->store(double_val);
if ((f2->decimals() == DECIMAL_NOT_SPECIFIED && row.getScale(s) > 0)
|| f2->decimals() < row.getScale(s))
|| f2->decimals() < row.getScale(s))
{
f2->dec = row.getScale(s);
}
@@ -5278,6 +5278,7 @@ int ha_calpont_impl_group_by_init(ha_calpont_group_by_handler* group_hand, TABLE
execplan::CalpontSelectExecutionPlan::ColumnMap::iterator colMapIter;
execplan::CalpontSelectExecutionPlan::ColumnMap::iterator condColMapIter;
execplan::ParseTree* ptIt;
for (TABLE_LIST* tl = gi.groupByTables; tl; tl = tl->next_local)
{
mapiter = ci->tableMap.find(tl->table);

View File

@@ -384,7 +384,7 @@ ReturnedColumn* buildWindowFunctionColumn(Item* item, gp_walk_info& gwi, bool& n
// Modified for MCOL-1201 multi-argument aggregate
for (size_t i = 0; i < funcParms.size(); ++i)
{
const execplan::CalpontSystemCatalog::ColType& resultType
const execplan::CalpontSystemCatalog::ColType& resultType
= funcParms[i]->resultType();
colType.dataType = resultType.colDataType;
colType.precision = resultType.precision;

View File

@@ -56,10 +56,62 @@ ST_FIELD_INFO is_columnstore_columns_fields[] =
};
static void get_cond_item(Item_func* item, String** table, String** db)
{
char tmp_char[MAX_FIELD_WIDTH];
Item_field* item_field = (Item_field*) item->arguments()[0]->real_item();
if (strcasecmp(item_field->field_name, "table_name") == 0)
{
String str_buf(tmp_char, sizeof(tmp_char), system_charset_info);
*table = item->arguments()[1]->val_str(&str_buf);
return;
}
else if (strcasecmp(item_field->field_name, "table_schema") == 0)
{
String str_buf(tmp_char, sizeof(tmp_char), system_charset_info);
*db = item->arguments()[1]->val_str(&str_buf);
return;
}
}
static void get_cond_items(COND* cond, String** table, String** db)
{
if (cond->type() == Item::FUNC_ITEM)
{
Item_func* fitem = (Item_func*) cond;
if (fitem->arguments()[0]->real_item()->type() == Item::FIELD_ITEM &&
fitem->arguments()[1]->const_item())
{
get_cond_item(fitem, table, db);
}
}
else if ((cond->type() == Item::COND_ITEM) && (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC))
{
List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
Item* item;
while ((item = li++))
{
if (item->type() == Item::FUNC_ITEM)
{
get_cond_item((Item_func*)item, table, db);
}
else
{
get_cond_items(item, table, db);
}
}
}
}
static int is_columnstore_columns_fill(THD* thd, TABLE_LIST* tables, COND* cond)
{
CHARSET_INFO* cs = system_charset_info;
TABLE* table = tables->table;
String* table_name = NULL;
String* db_name = NULL;
boost::shared_ptr<execplan::CalpontSystemCatalog> systemCatalogPtr =
execplan::CalpontSystemCatalog::makeCalpontSystemCatalog(execplan::CalpontSystemCatalog::idb_tid2sid(thd->thread_id));
@@ -69,9 +121,30 @@ static int is_columnstore_columns_fill(THD* thd, TABLE_LIST* tables, COND* cond)
systemCatalogPtr->identity(execplan::CalpontSystemCatalog::FE);
if (cond)
{
get_cond_items(cond, &table_name, &db_name);
}
for (std::vector<std::pair<execplan::CalpontSystemCatalog::OID, execplan::CalpontSystemCatalog::TableName> >::const_iterator it = catalog_tables.begin();
it != catalog_tables.end(); ++it)
{
if (db_name)
{
if ((*it).second.schema.compare(db_name->ptr()) != 0)
{
continue;
}
}
if (table_name)
{
if ((*it).second.table.compare(table_name->ptr()) != 0)
{
continue;
}
}
execplan::CalpontSystemCatalog::RIDList column_rid_list;
// Note a table may get dropped as you iterate over the list of tables.
@@ -184,8 +257,6 @@ static int is_columnstore_columns_fill(THD* thd, TABLE_LIST* tables, COND* cond)
}
}
return 0;
}

View File

@@ -52,14 +52,142 @@ ST_FIELD_INFO is_columnstore_extents_fields[] =
{0, 0, MYSQL_TYPE_NULL, 0, 0, 0, 0}
};
static int is_columnstore_extents_fill(THD* thd, TABLE_LIST* tables, COND* cond)
static int generate_result(BRM::OID_t oid, BRM::DBRM* emp, TABLE* table, THD* thd)
{
CHARSET_INFO* cs = system_charset_info;
TABLE* table = tables->table;
std::vector<struct BRM::EMEntry> entries;
std::vector<struct BRM::EMEntry>::iterator iter;
std::vector<struct BRM::EMEntry>::iterator end;
emp->getExtents(oid, entries, false, false, true);
if (entries.size() == 0)
return 0;
iter = entries.begin();
end = entries.end();
while (iter != end)
{
table->field[0]->store(oid);
if (iter->colWid > 0)
{
table->field[1]->store("Column", strlen("Column"), cs);
if (iter->partition.cprange.lo_val == std::numeric_limits<int64_t>::max() ||
iter->partition.cprange.lo_val <= (std::numeric_limits<int64_t>::min() + 2))
{
table->field[4]->set_null();
}
else
{
table->field[4]->set_notnull();
table->field[4]->store(iter->partition.cprange.lo_val);
}
if (iter->partition.cprange.hi_val == std::numeric_limits<int64_t>::max() ||
iter->partition.cprange.hi_val <= (std::numeric_limits<int64_t>::min() + 2))
{
table->field[5]->set_null();
}
else
{
table->field[5]->set_notnull();
table->field[5]->store(iter->partition.cprange.hi_val);
}
table->field[6]->store(iter->colWid);
}
else
{
table->field[1]->store("Dictionary", strlen("Dictionary"), cs);
table->field[4]->set_null();
table->field[5]->set_null();
table->field[6]->store(8192);
}
table->field[2]->store(iter->range.start);
table->field[3]->store(iter->range.start + (iter->range.size * 1024) - 1);
table->field[7]->store(iter->dbRoot);
table->field[8]->store(iter->partitionNum);
table->field[9]->store(iter->segmentNum);
table->field[10]->store(iter->blockOffset);
table->field[11]->store(iter->range.size * 1024);
table->field[12]->store(iter->HWM);
switch (iter->partition.cprange.isValid)
{
case 0:
table->field[13]->store("Invalid", strlen("Invalid"), cs);
break;
case 1:
table->field[13]->store("Updating", strlen("Updating"), cs);
break;
case 2:
table->field[13]->store("Valid", strlen("Valid"), cs);
break;
default:
table->field[13]->store("Unknown", strlen("Unknown"), cs);
break;
}
switch (iter->status)
{
case BRM::EXTENTAVAILABLE:
table->field[14]->store("Available", strlen("Available"), cs);
break;
case BRM::EXTENTUNAVAILABLE:
table->field[14]->store("Unavailable", strlen("Unavailable"), cs);
break;
case BRM::EXTENTOUTOFSERVICE:
table->field[14]->store("Out of service", strlen("Out of service"), cs);
break;
default:
table->field[14]->store("Unknown", strlen("Unknown"), cs);
}
// MCOL-1016: on multiple segments HWM is set to 0 on the lower
// segments, we don't want these to show as 8KB. The down side is
// if the column has less than 1 block it will show as 0 bytes.
// We have no lookahead without it getting messy so this is the
// best compromise.
if (iter->HWM == 0)
{
table->field[15]->store(0);
}
else
{
table->field[15]->store((iter->HWM + 1) * 8192);
}
if (schema_table_store_record(thd, table))
{
delete emp;
return 1;
}
iter++;
}
return 0;
}
static int is_columnstore_extents_fill(THD* thd, TABLE_LIST* tables, COND* cond)
{
BRM::OID_t cond_oid = 0;
TABLE* table = tables->table;
BRM::DBRM* emp = new BRM::DBRM();
if (!emp || !emp->isDBRMReady())
@@ -67,130 +195,83 @@ static int is_columnstore_extents_fill(THD* thd, TABLE_LIST* tables, COND* cond)
return 1;
}
if (cond && cond->type() == Item::FUNC_ITEM)
{
Item_func* fitem = (Item_func*) cond;
if ((fitem->functype() == Item_func::EQ_FUNC) && (fitem->argument_count() == 2))
{
if (fitem->arguments()[0]->real_item()->type() == Item::FIELD_ITEM &&
fitem->arguments()[1]->const_item())
{
// WHERE object_id = value
Item_field* item_field = (Item_field*) fitem->arguments()[0]->real_item();
if (strcasecmp(item_field->field_name, "object_id") == 0)
{
cond_oid = fitem->arguments()[1]->val_int();
return generate_result(cond_oid, emp, table, thd);
}
}
else if (fitem->arguments()[1]->real_item()->type() == Item::FIELD_ITEM &&
fitem->arguments()[0]->const_item())
{
// WHERE value = object_id
Item_field* item_field = (Item_field*) fitem->arguments()[1]->real_item();
if (strcasecmp(item_field->field_name, "object_id") == 0)
{
cond_oid = fitem->arguments()[0]->val_int();
return generate_result(cond_oid, emp, table, thd);
}
}
}
else if (fitem->functype() == Item_func::IN_FUNC)
{
// WHERE object_id in (value1, value2)
Item_field* item_field = (Item_field*) fitem->arguments()[0]->real_item();
if (strcasecmp(item_field->field_name, "object_id") == 0)
{
for (unsigned int i = 1; i < fitem->argument_count(); i++)
{
cond_oid = fitem->arguments()[i]->val_int();
int result = generate_result(cond_oid, emp, table, thd);
if (result)
return 1;
}
}
}
else if (fitem->functype() == Item_func::UNKNOWN_FUNC &&
strcasecmp(fitem->func_name(), "find_in_set") == 0)
{
// WHERE FIND_IN_SET(object_id, values)
String* tmp_var = fitem->arguments()[1]->val_str();
std::stringstream ss(tmp_var->ptr());
while (ss >> cond_oid)
{
int ret = generate_result(cond_oid, emp, table, thd);
if (ret)
return 1;
if (ss.peek() == ',')
ss.ignore();
}
}
}
execplan::ObjectIDManager oidm;
BRM::OID_t MaxOID = oidm.size();
for (BRM::OID_t oid = 3000; oid <= MaxOID; oid++)
{
emp->getExtents(oid, entries, false, false, true);
int result = generate_result(oid, emp, table, thd);
if (entries.size() == 0)
continue;
iter = entries.begin();
end = entries.end();
while (iter != end)
{
table->field[0]->store(oid);
if (iter->colWid > 0)
{
table->field[1]->store("Column", strlen("Column"), cs);
if (iter->partition.cprange.lo_val == std::numeric_limits<int64_t>::max() ||
iter->partition.cprange.lo_val <= (std::numeric_limits<int64_t>::min() + 2))
{
table->field[4]->set_null();
}
else
{
table->field[4]->set_notnull();
table->field[4]->store(iter->partition.cprange.lo_val);
}
if (iter->partition.cprange.hi_val == std::numeric_limits<int64_t>::max() ||
iter->partition.cprange.hi_val <= (std::numeric_limits<int64_t>::min() + 2))
{
table->field[5]->set_null();
}
else
{
table->field[5]->set_notnull();
table->field[5]->store(iter->partition.cprange.hi_val);
}
table->field[6]->store(iter->colWid);
}
else
{
table->field[1]->store("Dictionary", strlen("Dictionary"), cs);
table->field[4]->set_null();
table->field[5]->set_null();
table->field[6]->store(8192);
}
table->field[2]->store(iter->range.start);
table->field[3]->store(iter->range.start + (iter->range.size * 1024) - 1);
table->field[7]->store(iter->dbRoot);
table->field[8]->store(iter->partitionNum);
table->field[9]->store(iter->segmentNum);
table->field[10]->store(iter->blockOffset);
table->field[11]->store(iter->range.size * 1024);
table->field[12]->store(iter->HWM);
switch (iter->partition.cprange.isValid)
{
case 0:
table->field[13]->store("Invalid", strlen("Invalid"), cs);
break;
case 1:
table->field[13]->store("Updating", strlen("Updating"), cs);
break;
case 2:
table->field[13]->store("Valid", strlen("Valid"), cs);
break;
default:
table->field[13]->store("Unknown", strlen("Unknown"), cs);
break;
}
switch (iter->status)
{
case BRM::EXTENTAVAILABLE:
table->field[14]->store("Available", strlen("Available"), cs);
break;
case BRM::EXTENTUNAVAILABLE:
table->field[14]->store("Unavailable", strlen("Unavailable"), cs);
break;
case BRM::EXTENTOUTOFSERVICE:
table->field[14]->store("Out of service", strlen("Out of service"), cs);
break;
default:
table->field[14]->store("Unknown", strlen("Unknown"), cs);
}
// MCOL-1016: on multiple segments HWM is set to 0 on the lower
// segments, we don't want these to show as 8KB. The down side is
// if the column has less than 1 block it will show as 0 bytes.
// We have no lookahead without it getting messy so this is the
// best compromise.
if (iter->HWM == 0)
{
table->field[15]->store(0);
}
else
{
table->field[15]->store((iter->HWM + 1) * 8192);
}
if (schema_table_store_record(thd, table))
{
delete emp;
return 1;
}
iter++;
}
if (result)
return 1;
}
delete emp;

View File

@@ -84,12 +84,10 @@ static bool get_file_sizes(messageqcpp::MessageQueueClient* msgQueueClient, cons
}
}
static int is_columnstore_files_fill(THD* thd, TABLE_LIST* tables, COND* cond)
static int generate_result(BRM::OID_t oid, BRM::DBRM* emp, TABLE* table, THD* thd)
{
BRM::DBRM* emp = new BRM::DBRM();
std::vector<struct BRM::EMEntry> entries;
CHARSET_INFO* cs = system_charset_info;
TABLE* table = tables->table;
char oidDirName[WriteEngine::FILE_NAME_SIZE];
char fullFileName[WriteEngine::FILE_NAME_SIZE];
@@ -103,99 +101,184 @@ static int is_columnstore_files_fill(THD* thd, TABLE_LIST* tables, COND* cond)
oam::Oam oam_instance;
int pmId = 0;
emp->getExtents(oid, entries, false, false, true);
if (entries.size() == 0)
return 0;
std::vector<struct BRM::EMEntry>::const_iterator iter = entries.begin();
while ( iter != entries.end() ) //organize extents into files
{
// Don't include files more than once at different block offsets
if (iter->blockOffset > 0)
{
iter++;
return 0;
}
try
{
oam_instance.getDbrootPmConfig(iter->dbRoot, pmId);
}
catch (std::runtime_error)
{
// MCOL-1116: If we are here a DBRoot is offline/missing
iter++;
return 0;
}
table->field[0]->store(oid);
table->field[1]->store(iter->segmentNum);
table->field[2]->store(iter->partitionNum);
WriteEngine::Convertor::oid2FileName(oid, oidDirName, dbDir, iter->partitionNum, iter->segmentNum);
std::stringstream DbRootName;
DbRootName << "DBRoot" << iter->dbRoot;
std::string DbRootPath = config->getConfig("SystemConfig", DbRootName.str());
fileSize = compressedFileSize = 0;
snprintf(fullFileName, WriteEngine::FILE_NAME_SIZE, "%s/%s", DbRootPath.c_str(), oidDirName);
std::ostringstream oss;
oss << "pm" << pmId << "_WriteEngineServer";
std::string client = oss.str();
msgQueueClient = messageqcpp::MessageQueueClientPool::getInstance(oss.str());
if (!get_file_sizes(msgQueueClient, fullFileName, &fileSize, &compressedFileSize))
{
messageqcpp::MessageQueueClientPool::releaseInstance(msgQueueClient);
delete emp;
return 1;
}
table->field[3]->store(fullFileName, strlen(fullFileName), cs);
if (fileSize > 0)
{
table->field[4]->set_notnull();
table->field[4]->store(fileSize);
if (compressedFileSize > 0)
{
table->field[5]->set_notnull();
table->field[5]->store(compressedFileSize);
}
else
{
table->field[5]->set_null();
}
}
else
{
table->field[4]->set_null();
table->field[5]->set_null();
}
if (schema_table_store_record(thd, table))
{
messageqcpp::MessageQueueClientPool::releaseInstance(msgQueueClient);
delete emp;
return 1;
}
iter++;
messageqcpp::MessageQueueClientPool::releaseInstance(msgQueueClient);
msgQueueClient = NULL;
}
return 0;
}
static int is_columnstore_files_fill(THD* thd, TABLE_LIST* tables, COND* cond)
{
BRM::DBRM* emp = new BRM::DBRM();
BRM::OID_t cond_oid = 0;
TABLE* table = tables->table;
if (!emp || !emp->isDBRMReady())
{
return 1;
}
if (cond && cond->type() == Item::FUNC_ITEM)
{
Item_func* fitem = (Item_func*) cond;
if ((fitem->functype() == Item_func::EQ_FUNC) && (fitem->argument_count() == 2))
{
if (fitem->arguments()[0]->real_item()->type() == Item::FIELD_ITEM &&
fitem->arguments()[1]->const_item())
{
// WHERE object_id = value
Item_field* item_field = (Item_field*) fitem->arguments()[0]->real_item();
if (strcasecmp(item_field->field_name, "object_id") == 0)
{
cond_oid = fitem->arguments()[1]->val_int();
return generate_result(cond_oid, emp, table, thd);
}
}
else if (fitem->arguments()[1]->real_item()->type() == Item::FIELD_ITEM &&
fitem->arguments()[0]->const_item())
{
// WHERE value = object_id
Item_field* item_field = (Item_field*) fitem->arguments()[1]->real_item();
if (strcasecmp(item_field->field_name, "object_id") == 0)
{
cond_oid = fitem->arguments()[0]->val_int();
return generate_result(cond_oid, emp, table, thd);
}
}
}
else if (fitem->functype() == Item_func::IN_FUNC)
{
// WHERE object_id in (value1, value2)
Item_field* item_field = (Item_field*) fitem->arguments()[0]->real_item();
if (strcasecmp(item_field->field_name, "object_id") == 0)
{
for (unsigned int i = 1; i < fitem->argument_count(); i++)
{
cond_oid = fitem->arguments()[i]->val_int();
int result = generate_result(cond_oid, emp, table, thd);
if (result)
return 1;
}
}
}
else if (fitem->functype() == Item_func::UNKNOWN_FUNC &&
strcasecmp(fitem->func_name(), "find_in_set") == 0)
{
// WHERE FIND_IN_SET(object_id, values)
String* tmp_var = fitem->arguments()[1]->val_str();
std::stringstream ss(tmp_var->ptr());
while (ss >> cond_oid)
{
int ret = generate_result(cond_oid, emp, table, thd);
if (ret)
return 1;
if (ss.peek() == ',')
ss.ignore();
}
}
}
execplan::ObjectIDManager oidm;
BRM::OID_t MaxOID = oidm.size();
for (BRM::OID_t oid = 3000; oid <= MaxOID; oid++)
if (!cond_oid)
{
emp->getExtents(oid, entries, false, false, true);
if (entries.size() == 0)
continue;
std::vector<struct BRM::EMEntry>::const_iterator iter = entries.begin();
while ( iter != entries.end() ) //organize extents into files
for (BRM::OID_t oid = 3000; oid <= MaxOID; oid++)
{
// Don't include files more than once at different block offsets
if (iter->blockOffset > 0)
{
iter++;
continue;
}
int result = generate_result(oid, emp, table, thd);
try
{
oam_instance.getDbrootPmConfig(iter->dbRoot, pmId);
}
catch (std::runtime_error)
{
// MCOL-1116: If we are here a DBRoot is offline/missing
iter++;
continue;
}
table->field[0]->store(oid);
table->field[1]->store(iter->segmentNum);
table->field[2]->store(iter->partitionNum);
WriteEngine::Convertor::oid2FileName(oid, oidDirName, dbDir, iter->partitionNum, iter->segmentNum);
std::stringstream DbRootName;
DbRootName << "DBRoot" << iter->dbRoot;
std::string DbRootPath = config->getConfig("SystemConfig", DbRootName.str());
fileSize = compressedFileSize = 0;
snprintf(fullFileName, WriteEngine::FILE_NAME_SIZE, "%s/%s", DbRootPath.c_str(), oidDirName);
std::ostringstream oss;
oss << "pm" << pmId << "_WriteEngineServer";
std::string client = oss.str();
msgQueueClient = messageqcpp::MessageQueueClientPool::getInstance(oss.str());
if (!get_file_sizes(msgQueueClient, fullFileName, &fileSize, &compressedFileSize))
{
messageqcpp::MessageQueueClientPool::releaseInstance(msgQueueClient);
delete emp;
if (result)
return 1;
}
table->field[3]->store(fullFileName, strlen(fullFileName), cs);
if (fileSize > 0)
{
table->field[4]->set_notnull();
table->field[4]->store(fileSize);
if (compressedFileSize > 0)
{
table->field[5]->set_notnull();
table->field[5]->store(compressedFileSize);
}
else
{
table->field[5]->set_null();
}
}
else
{
table->field[4]->set_null();
table->field[5]->set_null();
}
if (schema_table_store_record(thd, table))
{
messageqcpp::MessageQueueClientPool::releaseInstance(msgQueueClient);
delete emp;
return 1;
}
iter++;
messageqcpp::MessageQueueClientPool::releaseInstance(msgQueueClient);
msgQueueClient = NULL;
}
}

View File

@@ -42,22 +42,95 @@ ST_FIELD_INFO is_columnstore_tables_fields[] =
{0, 0, MYSQL_TYPE_NULL, 0, 0, 0, 0}
};
static void get_cond_item(Item_func* item, String** table, String** db)
{
char tmp_char[MAX_FIELD_WIDTH];
Item_field* item_field = (Item_field*) item->arguments()[0]->real_item();
if (strcasecmp(item_field->field_name, "table_name") == 0)
{
String str_buf(tmp_char, sizeof(tmp_char), system_charset_info);
*table = item->arguments()[1]->val_str(&str_buf);
return;
}
else if (strcasecmp(item_field->field_name, "table_schema") == 0)
{
String str_buf(tmp_char, sizeof(tmp_char), system_charset_info);
*db = item->arguments()[1]->val_str(&str_buf);
return;
}
}
static void get_cond_items(COND* cond, String** table, String** db)
{
if (cond->type() == Item::FUNC_ITEM)
{
Item_func* fitem = (Item_func*) cond;
if (fitem->arguments()[0]->real_item()->type() == Item::FIELD_ITEM &&
fitem->arguments()[1]->const_item())
{
get_cond_item(fitem, table, db);
}
}
else if ((cond->type() == Item::COND_ITEM) && (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC))
{
List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
Item* item;
while ((item = li++))
{
if (item->type() == Item::FUNC_ITEM)
{
get_cond_item((Item_func*)item, table, db);
}
else
{
get_cond_items(item, table, db);
}
}
}
}
static int is_columnstore_tables_fill(THD* thd, TABLE_LIST* tables, COND* cond)
{
CHARSET_INFO* cs = system_charset_info;
TABLE* table = tables->table;
String* table_name = NULL;
String* db_name = NULL;
boost::shared_ptr<execplan::CalpontSystemCatalog> systemCatalogPtr =
execplan::CalpontSystemCatalog::makeCalpontSystemCatalog(execplan::CalpontSystemCatalog::idb_tid2sid(thd->thread_id));
systemCatalogPtr->identity(execplan::CalpontSystemCatalog::FE);
if (cond)
{
get_cond_items(cond, &table_name, &db_name);
}
const std::vector< std::pair<execplan::CalpontSystemCatalog::OID, execplan::CalpontSystemCatalog::TableName> > catalog_tables
= systemCatalogPtr->getTables();
for (std::vector<std::pair<execplan::CalpontSystemCatalog::OID, execplan::CalpontSystemCatalog::TableName> >::const_iterator it = catalog_tables.begin();
it != catalog_tables.end(); ++it)
{
if (db_name)
{
if ((*it).second.schema.compare(db_name->ptr()) != 0)
{
continue;
}
}
if (table_name)
{
if ((*it).second.table.compare(table_name->ptr()) != 0)
{
continue;
}
}
execplan::CalpontSystemCatalog::TableInfo tb_info = systemCatalogPtr->tableInfo((*it).second);
std::string create_date = dataconvert::DataConvert::dateToString((*it).second.create_date);
table->field[0]->store((*it).second.schema.c_str(), (*it).second.schema.length(), cs);