mirror of
https://github.com/MariaDB/server.git
synced 2025-08-27 13:04:36 +03:00
To get correct values in SHOW TABLE STATUS
Added pseudo column fragment memory changed ndb_get_table_statistics to take struct and read row_size and fragment_memory ndb/include/kernel/AttributeHeader.hpp: Added pseudo column fragment memory ndb/include/ndbapi/NdbDictionary.hpp: Added pseudo column fragment memory ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp: Added pseudo column fragment memory ndb/src/ndbapi/NdbDictionary.cpp: Added pseudo column fragment memory ndb/src/ndbapi/NdbDictionaryImpl.cpp: Added pseudo column fragment memory sql/ha_ndbcluster.cc: changed ndb_get_table_statistics to take struct and read row_size and fragment_memory sql/ha_ndbcluster.h: partially implemented update_table_comment
This commit is contained in:
@@ -40,6 +40,7 @@ public:
|
||||
STATIC_CONST( RANGE_NO = 0xFFFB ); // Read range no (when batched ranges)
|
||||
|
||||
STATIC_CONST( ROW_SIZE = 0xFFFA );
|
||||
STATIC_CONST( FRAGMENT_MEMORY= 0xFFF9 );
|
||||
|
||||
/** Initialize AttributeHeader at location aHeaderPtr */
|
||||
static AttributeHeader& init(void* aHeaderPtr, Uint32 anAttributeId,
|
||||
|
@@ -438,6 +438,7 @@ public:
|
||||
const char* getDefaultValue() const;
|
||||
|
||||
static const Column * FRAGMENT;
|
||||
static const Column * FRAGMENT_MEMORY;
|
||||
static const Column * ROW_COUNT;
|
||||
static const Column * COMMIT_COUNT;
|
||||
static const Column * ROW_SIZE;
|
||||
|
@@ -1001,6 +1001,13 @@ Dbtup::read_psuedo(Uint32 attrId, Uint32* outBuffer){
|
||||
case AttributeHeader::FRAGMENT:
|
||||
* outBuffer = operPtr.p->fragId >> 1; // remove "hash" bit
|
||||
return 1;
|
||||
case AttributeHeader::FRAGMENT_MEMORY:
|
||||
{
|
||||
Uint64 tmp= fragptr.p->noOfPages;
|
||||
tmp*= 32768;
|
||||
memcpy(outBuffer,&tmp,8);
|
||||
}
|
||||
return 2;
|
||||
case AttributeHeader::ROW_SIZE:
|
||||
* outBuffer = tabptr.p->tupheadsize << 2;
|
||||
return 1;
|
||||
|
@@ -1000,6 +1000,7 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
|
||||
}
|
||||
|
||||
const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT = 0;
|
||||
const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT_MEMORY = 0;
|
||||
const NdbDictionary::Column * NdbDictionary::Column::ROW_COUNT = 0;
|
||||
const NdbDictionary::Column * NdbDictionary::Column::COMMIT_COUNT = 0;
|
||||
const NdbDictionary::Column * NdbDictionary::Column::ROW_SIZE = 0;
|
||||
|
@@ -232,6 +232,11 @@ NdbColumnImpl::create_psuedo(const char * name){
|
||||
col->m_impl.m_attrId = AttributeHeader::FRAGMENT;
|
||||
col->m_impl.m_attrSize = 4;
|
||||
col->m_impl.m_arraySize = 1;
|
||||
} else if(!strcmp(name, "NDB$FRAGMENT_MEMORY")){
|
||||
col->setType(NdbDictionary::Column::Bigunsigned);
|
||||
col->m_impl.m_attrId = AttributeHeader::FRAGMENT_MEMORY;
|
||||
col->m_impl.m_attrSize = 8;
|
||||
col->m_impl.m_arraySize = 1;
|
||||
} else if(!strcmp(name, "NDB$ROW_COUNT")){
|
||||
col->setType(NdbDictionary::Column::Bigunsigned);
|
||||
col->m_impl.m_attrId = AttributeHeader::ROW_COUNT;
|
||||
@@ -685,10 +690,12 @@ NdbDictionaryImpl::~NdbDictionaryImpl()
|
||||
m_globalHash->lock();
|
||||
if(--f_dictionary_count == 0){
|
||||
delete NdbDictionary::Column::FRAGMENT;
|
||||
delete NdbDictionary::Column::FRAGMENT_MEMORY;
|
||||
delete NdbDictionary::Column::ROW_COUNT;
|
||||
delete NdbDictionary::Column::COMMIT_COUNT;
|
||||
delete NdbDictionary::Column::ROW_SIZE;
|
||||
NdbDictionary::Column::FRAGMENT= 0;
|
||||
NdbDictionary::Column::FRAGMENT_MEMORY= 0;
|
||||
NdbDictionary::Column::ROW_COUNT= 0;
|
||||
NdbDictionary::Column::COMMIT_COUNT= 0;
|
||||
NdbDictionary::Column::ROW_SIZE= 0;
|
||||
@@ -754,6 +761,8 @@ NdbDictionaryImpl::setTransporter(class Ndb* ndb,
|
||||
if(f_dictionary_count++ == 0){
|
||||
NdbDictionary::Column::FRAGMENT=
|
||||
NdbColumnImpl::create_psuedo("NDB$FRAGMENT");
|
||||
NdbDictionary::Column::FRAGMENT_MEMORY=
|
||||
NdbColumnImpl::create_psuedo("NDB$FRAGMENT_MEMORY");
|
||||
NdbDictionary::Column::ROW_COUNT=
|
||||
NdbColumnImpl::create_psuedo("NDB$ROW_COUNT");
|
||||
NdbDictionary::Column::COMMIT_COUNT=
|
||||
|
@@ -85,7 +85,7 @@ static int unpackfrm(const void **data, uint *len,
|
||||
const void* pack_data);
|
||||
|
||||
static int ndb_get_table_statistics(Ndb*, const char *,
|
||||
Uint64* rows, Uint64* commits);
|
||||
struct Ndb_statistics *);
|
||||
|
||||
|
||||
/*
|
||||
@@ -94,6 +94,17 @@ static int ndb_get_table_statistics(Ndb*, const char *,
|
||||
*/
|
||||
static uint32 dummy_buf;
|
||||
|
||||
/*
|
||||
Stats that can be retrieved from ndb
|
||||
*/
|
||||
|
||||
struct Ndb_statistics {
|
||||
Uint64 row_count;
|
||||
Uint64 commit_count;
|
||||
Uint64 row_size;
|
||||
Uint64 fragment_memory;
|
||||
};
|
||||
|
||||
/*
|
||||
Error handling functions
|
||||
*/
|
||||
@@ -262,9 +273,11 @@ void ha_ndbcluster::records_update()
|
||||
// if (info->records == ~(ha_rows)0)
|
||||
{
|
||||
Ndb *ndb= get_ndb();
|
||||
Uint64 rows;
|
||||
if(ndb_get_table_statistics(ndb, m_tabname, &rows, 0) == 0){
|
||||
info->records= rows;
|
||||
struct Ndb_statistics stat;
|
||||
if(ndb_get_table_statistics(ndb, m_tabname, &stat) == 0){
|
||||
mean_rec_length= stat.row_size;
|
||||
data_file_length= stat.fragment_memory;
|
||||
info->records= stat.row_count;
|
||||
}
|
||||
}
|
||||
{
|
||||
@@ -2724,10 +2737,19 @@ void ha_ndbcluster::info(uint flag)
|
||||
if ((my_errno= check_ndb_connection()))
|
||||
DBUG_VOID_RETURN;
|
||||
Ndb *ndb= get_ndb();
|
||||
Uint64 rows= 100;
|
||||
if (current_thd->variables.ndb_use_exact_count)
|
||||
ndb_get_table_statistics(ndb, m_tabname, &rows, 0);
|
||||
records= rows;
|
||||
struct Ndb_statistics stat;
|
||||
if (current_thd->variables.ndb_use_exact_count &&
|
||||
ndb_get_table_statistics(ndb, m_tabname, &stat) == 0)
|
||||
{
|
||||
mean_rec_length= stat.row_size;
|
||||
data_file_length= stat.fragment_memory;
|
||||
records= stat.row_count;
|
||||
}
|
||||
else
|
||||
{
|
||||
mean_rec_length= 0;
|
||||
records= 100;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (flag & HA_STATUS_CONST)
|
||||
@@ -4813,8 +4835,8 @@ static int unpackfrm(const void **unpack_data, uint *unpack_len,
|
||||
|
||||
static
|
||||
int
|
||||
ndb_get_table_statistics(Ndb* ndb, const char * table,
|
||||
Uint64* row_count, Uint64* commit_count)
|
||||
ndb_get_table_statistics(Ndb* ndb, const char * table,
|
||||
struct Ndb_statistics * ndbstat)
|
||||
{
|
||||
DBUG_ENTER("ndb_get_table_statistics");
|
||||
DBUG_PRINT("enter", ("table: %s", table));
|
||||
@@ -4835,9 +4857,11 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
|
||||
if (check == -1)
|
||||
break;
|
||||
|
||||
Uint64 rows, commits;
|
||||
Uint64 rows, commits, size, mem;
|
||||
pOp->getValue(NdbDictionary::Column::ROW_COUNT, (char*)&rows);
|
||||
pOp->getValue(NdbDictionary::Column::COMMIT_COUNT, (char*)&commits);
|
||||
pOp->getValue(NdbDictionary::Column::ROW_SIZE, (char*)&size);
|
||||
pOp->getValue(NdbDictionary::Column::FRAGMENT_MEMORY, (char*)&mem);
|
||||
|
||||
check= pTrans->execute(NdbTransaction::NoCommit,
|
||||
NdbTransaction::AbortOnError,
|
||||
@@ -4847,10 +4871,15 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
|
||||
|
||||
Uint64 sum_rows= 0;
|
||||
Uint64 sum_commits= 0;
|
||||
Uint64 sum_row_size= 0;
|
||||
Uint64 sum_mem= 0;
|
||||
while((check= pOp->nextResult(TRUE, TRUE)) == 0)
|
||||
{
|
||||
sum_rows+= rows;
|
||||
sum_commits+= commits;
|
||||
if (sum_row_size < size)
|
||||
sum_row_size= size;
|
||||
sum_mem+= mem;
|
||||
}
|
||||
|
||||
if (check == -1)
|
||||
@@ -4859,11 +4888,14 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
|
||||
pOp->close(TRUE);
|
||||
|
||||
ndb->closeTransaction(pTrans);
|
||||
if(row_count)
|
||||
* row_count= sum_rows;
|
||||
if(commit_count)
|
||||
* commit_count= sum_commits;
|
||||
DBUG_PRINT("exit", ("records: %u commits: %u", sum_rows, sum_commits));
|
||||
|
||||
ndbstat->row_count= sum_rows;
|
||||
ndbstat->commit_count= sum_commits;
|
||||
ndbstat->row_size= sum_row_size;
|
||||
ndbstat->fragment_memory= sum_mem;
|
||||
|
||||
DBUG_PRINT("exit", ("records: %u commits: %u row_size: %d mem: %d",
|
||||
sum_rows, sum_commits, sum_row_size, sum_mem));
|
||||
DBUG_RETURN(0);
|
||||
} while(0);
|
||||
|
||||
@@ -5248,4 +5280,30 @@ ha_ndbcluster::setup_recattr(const NdbRecAttr* curr)
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
char*
|
||||
ha_ndbcluster::update_table_comment(
|
||||
/* out: table comment + additional */
|
||||
const char* comment)/* in: table comment defined by user */
|
||||
{
|
||||
return (char*)comment;
|
||||
#if 0 // for the future
|
||||
uint length= strlen(comment);
|
||||
if(length > 64000 - 3)
|
||||
{
|
||||
return((char*)comment); /* string too long */
|
||||
}
|
||||
|
||||
char *str;
|
||||
const char *fmt="%s%sRow size: %d";
|
||||
const unsigned fmt_len_plus_extra= length + strlen(fmt) + 3;
|
||||
if ((str= my_malloc(fmt_len_plus_extra, MYF(0))) == NULL)
|
||||
{
|
||||
return (char*)comment;
|
||||
}
|
||||
|
||||
snprintf(str,fmt_len_plus_extra,fmt,comment,10);
|
||||
return str;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* HAVE_NDBCLUSTER_DB */
|
||||
|
@@ -215,6 +215,8 @@ class ha_ndbcluster: public handler
|
||||
|
||||
int write_ndb_file();
|
||||
|
||||
char *update_table_comment(const char * comment);
|
||||
|
||||
private:
|
||||
int check_ndb_connection();
|
||||
|
||||
|
Reference in New Issue
Block a user