You've already forked mysqld_exporter
mirror of
https://github.com/prometheus/mysqld_exporter.git
synced 2025-07-30 06:43:05 +03:00
Add innodb compression statistic (#275)
* Add innodb commpression statistic https://dev.mysql.com/doc/refman/5.5/en/innodb-cmp-table.html https://dev.mysql.com/doc/refman/5.5/en/innodb-cmpmem-table.html * Add innodb commpression statistic https://dev.mysql.com/doc/refman/5.5/en/innodb-cmp-table.html https://dev.mysql.com/doc/refman/5.5/en/innodb-cmpmem-table.html
This commit is contained in:
committed by
Ben Kochie
parent
1645bb4d70
commit
170f6645e8
@ -58,6 +58,8 @@ collect.global_variables | 5.1 | Collect
|
|||||||
collect.info_schema.clientstats | 5.5 | If running with userstat=1, set to true to collect client statistics.
|
collect.info_schema.clientstats | 5.5 | If running with userstat=1, set to true to collect client statistics.
|
||||||
collect.info_schema.innodb_metrics | 5.6 | Collect metrics from information_schema.innodb_metrics.
|
collect.info_schema.innodb_metrics | 5.6 | Collect metrics from information_schema.innodb_metrics.
|
||||||
collect.info_schema.innodb_tablespaces | 5.7 | Collect metrics from information_schema.innodb_sys_tablespaces.
|
collect.info_schema.innodb_tablespaces | 5.7 | Collect metrics from information_schema.innodb_sys_tablespaces.
|
||||||
|
collect.info_schema.innodb_cmp | 5.5 | Collect InnoDB compressed tables metrics from information_schema.innodb_cmp.
|
||||||
|
collect.info_schema.innodb_cmpmem | 5.5 | Collect InnoDB buffer pool compression metrics from information_schema.innodb_cmpmem.
|
||||||
collect.info_schema.processlist | 5.1 | Collect thread state counts from information_schema.processlist.
|
collect.info_schema.processlist | 5.1 | Collect thread state counts from information_schema.processlist.
|
||||||
collect.info_schema.processlist.min_time | 5.1 | Minimum time a thread must be in each state to be counted. (default: 0)
|
collect.info_schema.processlist.min_time | 5.1 | Minimum time a thread must be in each state to be counted. (default: 0)
|
||||||
collect.info_schema.query_response_time | 5.5 | Collect query response time distribution if query_response_time_stats is ON.
|
collect.info_schema.query_response_time | 5.5 | Collect query response time distribution if query_response_time_stats is ON.
|
||||||
|
@ -68,6 +68,8 @@ type Collect struct {
|
|||||||
UserStat bool
|
UserStat bool
|
||||||
ClientStat bool
|
ClientStat bool
|
||||||
TableStat bool
|
TableStat bool
|
||||||
|
InnodbCmp bool
|
||||||
|
InnodbCmpMem bool
|
||||||
QueryResponseTime bool
|
QueryResponseTime bool
|
||||||
EngineTokudbStatus bool
|
EngineTokudbStatus bool
|
||||||
EngineInnodbStatus bool
|
EngineInnodbStatus bool
|
||||||
@ -362,6 +364,24 @@ func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
|
|||||||
}
|
}
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.userstats")
|
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.userstats")
|
||||||
}
|
}
|
||||||
|
if e.collect.InnodbCmp {
|
||||||
|
scrapeTime = time.Now()
|
||||||
|
if err = ScrapeInnodbCmp(db, ch); err != nil {
|
||||||
|
log.Errorln("Error scraping for collect.info_schema.innodbcmp:", err)
|
||||||
|
e.scrapeErrors.WithLabelValues("collect.info_schema.innodbcmp").Inc()
|
||||||
|
e.error.Set(1)
|
||||||
|
}
|
||||||
|
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.innodbcmp")
|
||||||
|
}
|
||||||
|
if e.collect.InnodbCmpMem {
|
||||||
|
scrapeTime = time.Now()
|
||||||
|
if err = ScrapeInnodbCmpMem(db, ch); err != nil {
|
||||||
|
log.Errorln("Error scraping for collect.info_schema.innodbcmpmem:", err)
|
||||||
|
e.scrapeErrors.WithLabelValues("collect.info_schema.innodbcmpmem").Inc()
|
||||||
|
e.error.Set(1)
|
||||||
|
}
|
||||||
|
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.innodbcmpmem")
|
||||||
|
}
|
||||||
if e.collect.ClientStat {
|
if e.collect.ClientStat {
|
||||||
scrapeTime = time.Now()
|
scrapeTime = time.Now()
|
||||||
if err = ScrapeClientStat(db, ch); err != nil {
|
if err = ScrapeClientStat(db, ch); err != nil {
|
||||||
|
76
collector/info_schema_innodb_cmp.go
Normal file
76
collector/info_schema_innodb_cmp.go
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
// Scrape `information_schema.INNODB_CMP`.
|
||||||
|
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const innodbCmpQuery = `
|
||||||
|
SELECT
|
||||||
|
page_size, compress_ops, compress_ops_ok, compress_time, uncompress_ops, uncompress_time
|
||||||
|
FROM information_schema.innodb_cmp
|
||||||
|
`
|
||||||
|
|
||||||
|
var (
|
||||||
|
infoSchemaInnodbCmpCompressOps = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_total"),
|
||||||
|
"Number of times a B-tree page of the size PAGE_SIZE has been compressed.",
|
||||||
|
[]string{"page_size"}, nil,
|
||||||
|
)
|
||||||
|
infoSchemaInnodbCmpCompressOpsOk = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_ok_total"),
|
||||||
|
"Number of times a B-tree page of the size PAGE_SIZE has been successfully compressed.",
|
||||||
|
[]string{"page_size"}, nil,
|
||||||
|
)
|
||||||
|
infoSchemaInnodbCmpCompressTime = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_time_seconds_total"),
|
||||||
|
"Total time in seconds spent in attempts to compress B-tree pages.",
|
||||||
|
[]string{"page_size"}, nil,
|
||||||
|
)
|
||||||
|
infoSchemaInnodbCmpUncompressOps = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_uncompress_ops_total"),
|
||||||
|
"Number of times a B-tree page of the size PAGE_SIZE has been uncompressed.",
|
||||||
|
[]string{"page_size"}, nil,
|
||||||
|
)
|
||||||
|
infoSchemaInnodbCmpUncompressTime = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_uncompress_time_seconds_total"),
|
||||||
|
"Total time in seconds spent in uncompressing B-tree pages.",
|
||||||
|
[]string{"page_size"}, nil,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
// ScrapeInnodbCmp collects from `information_schema.innodb_cmp`.
|
||||||
|
func ScrapeInnodbCmp(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
|
|
||||||
|
informationSchemaInnodbCmpRows, err := db.Query(innodbCmpQuery)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer informationSchemaInnodbCmpRows.Close()
|
||||||
|
|
||||||
|
var (
|
||||||
|
page_size string
|
||||||
|
compress_ops, compress_ops_ok, compress_time, uncompress_ops, uncompress_time float64
|
||||||
|
)
|
||||||
|
|
||||||
|
for informationSchemaInnodbCmpRows.Next() {
|
||||||
|
|
||||||
|
if err := informationSchemaInnodbCmpRows.Scan(
|
||||||
|
&page_size, &compress_ops, &compress_ops_ok, &compress_time, &uncompress_ops, &uncompress_time,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressOps, prometheus.CounterValue, compress_ops, page_size)
|
||||||
|
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressOpsOk, prometheus.CounterValue, compress_ops_ok, page_size)
|
||||||
|
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressTime, prometheus.CounterValue, compress_time, page_size)
|
||||||
|
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpUncompressOps, prometheus.CounterValue, uncompress_ops, page_size)
|
||||||
|
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpUncompressTime, prometheus.CounterValue, uncompress_time, page_size)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
50
collector/info_schema_innodb_cmp_test.go
Normal file
50
collector/info_schema_innodb_cmp_test.go
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/smartystreets/goconvey/convey"
|
||||||
|
"gopkg.in/DATA-DOG/go-sqlmock.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestScrapeInnodbCmp(t *testing.T) {
|
||||||
|
db, mock, err := sqlmock.New()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error opening a stub database connection: %s", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
columns := []string{"page_size", "compress_ops", "compress_ops_ok", "compress_time", "uncompress_ops", "uncompress_time"}
|
||||||
|
rows := sqlmock.NewRows(columns).
|
||||||
|
AddRow("1024", 10, 20, 30, 40, 50)
|
||||||
|
mock.ExpectQuery(sanitizeQuery(innodbCmpQuery)).WillReturnRows(rows)
|
||||||
|
|
||||||
|
ch := make(chan prometheus.Metric)
|
||||||
|
go func() {
|
||||||
|
if err = ScrapeInnodbCmp(db, ch); err != nil {
|
||||||
|
t.Errorf("error calling function on test: %s", err)
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
}()
|
||||||
|
|
||||||
|
expected := []MetricResult{
|
||||||
|
{labels: labelMap{"page_size": "1024"}, value: 10, metricType: dto.MetricType_COUNTER},
|
||||||
|
{labels: labelMap{"page_size": "1024"}, value: 20, metricType: dto.MetricType_COUNTER},
|
||||||
|
{labels: labelMap{"page_size": "1024"}, value: 30, metricType: dto.MetricType_COUNTER},
|
||||||
|
{labels: labelMap{"page_size": "1024"}, value: 40, metricType: dto.MetricType_COUNTER},
|
||||||
|
{labels: labelMap{"page_size": "1024"}, value: 50, metricType: dto.MetricType_COUNTER},
|
||||||
|
}
|
||||||
|
convey.Convey("Metrics comparison", t, func() {
|
||||||
|
for _, expect := range expected {
|
||||||
|
got := readMetric(<-ch)
|
||||||
|
convey.So(expect, convey.ShouldResemble, got)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Ensure all SQL queries were executed
|
||||||
|
if err := mock.ExpectationsWereMet(); err != nil {
|
||||||
|
t.Errorf("there were unfulfilled expections: %s", err)
|
||||||
|
}
|
||||||
|
}
|
68
collector/info_schema_innodb_cmpmem.go
Normal file
68
collector/info_schema_innodb_cmpmem.go
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
// Scrape `information_schema.INNODB_CMPMEM`.
|
||||||
|
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const innodbCmpMemQuery = `
|
||||||
|
SELECT
|
||||||
|
page_size, buffer_pool_instance, pages_used, pages_free, relocation_ops, relocation_time
|
||||||
|
FROM information_schema.innodb_cmpmem
|
||||||
|
`
|
||||||
|
|
||||||
|
var (
|
||||||
|
infoSchemaInnodbCmpMemPagesRead = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_pages_used_total"),
|
||||||
|
"Number of blocks of the size PAGE_SIZE that are currently in use.",
|
||||||
|
[]string{"page_size", "buffer_pool"}, nil,
|
||||||
|
)
|
||||||
|
infoSchemaInnodbCmpMemPagesFree = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_pages_free_total"),
|
||||||
|
"Number of blocks of the size PAGE_SIZE that are currently available for allocation.",
|
||||||
|
[]string{"page_size", "buffer_pool"}, nil,
|
||||||
|
)
|
||||||
|
infoSchemaInnodbCmpMemRelocationOps = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_relocation_ops_total"),
|
||||||
|
"Number of times a block of the size PAGE_SIZE has been relocated.",
|
||||||
|
[]string{"page_size", "buffer_pool"}, nil,
|
||||||
|
)
|
||||||
|
infoSchemaInnodbCmpMemRelocationTime = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_relocation_time_seconds_total"),
|
||||||
|
"Total time in seconds spent in relocating blocks.",
|
||||||
|
[]string{"page_size", "buffer_pool"}, nil,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
// ScrapeInnodbCmp collects from `information_schema.innodb_cmp`.
|
||||||
|
func ScrapeInnodbCmpMem(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
|
|
||||||
|
informationSchemaInnodbCmpMemRows, err := db.Query(innodbCmpMemQuery)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer informationSchemaInnodbCmpMemRows.Close()
|
||||||
|
|
||||||
|
var (
|
||||||
|
page_size, buffer_pool string
|
||||||
|
pages_used, pages_free, relocation_ops, relocation_time float64
|
||||||
|
)
|
||||||
|
|
||||||
|
for informationSchemaInnodbCmpMemRows.Next() {
|
||||||
|
if err := informationSchemaInnodbCmpMemRows.Scan(
|
||||||
|
&page_size, &buffer_pool, &pages_used, &pages_free, &relocation_ops, &relocation_time,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemPagesRead, prometheus.CounterValue, pages_used, page_size, buffer_pool)
|
||||||
|
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemPagesFree, prometheus.CounterValue, pages_free, page_size, buffer_pool)
|
||||||
|
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemRelocationOps, prometheus.CounterValue, relocation_ops, page_size, buffer_pool)
|
||||||
|
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemRelocationTime, prometheus.CounterValue, (relocation_time / 1000), page_size, buffer_pool)
|
||||||
|
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
49
collector/info_schema_innodb_cmpmem_test.go
Normal file
49
collector/info_schema_innodb_cmpmem_test.go
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/smartystreets/goconvey/convey"
|
||||||
|
"gopkg.in/DATA-DOG/go-sqlmock.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestScrapeInnodbCmpMem(t *testing.T) {
|
||||||
|
db, mock, err := sqlmock.New()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error opening a stub database connection: %s", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
columns := []string{"page_size", "buffer_pool", "pages_used", "pages_free", "relocation_ops", "relocation_time"}
|
||||||
|
rows := sqlmock.NewRows(columns).
|
||||||
|
AddRow("1024", "0", 30, 40, 50, 6000)
|
||||||
|
mock.ExpectQuery(sanitizeQuery(innodbCmpMemQuery)).WillReturnRows(rows)
|
||||||
|
|
||||||
|
ch := make(chan prometheus.Metric)
|
||||||
|
go func() {
|
||||||
|
if err = ScrapeInnodbCmpMem(db, ch); err != nil {
|
||||||
|
t.Errorf("error calling function on test: %s", err)
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
}()
|
||||||
|
|
||||||
|
expected := []MetricResult{
|
||||||
|
{labels: labelMap{"page_size": "1024", "buffer_pool": "0"}, value: 30, metricType: dto.MetricType_COUNTER},
|
||||||
|
{labels: labelMap{"page_size": "1024", "buffer_pool": "0"}, value: 40, metricType: dto.MetricType_COUNTER},
|
||||||
|
{labels: labelMap{"page_size": "1024", "buffer_pool": "0"}, value: 50, metricType: dto.MetricType_COUNTER},
|
||||||
|
{labels: labelMap{"page_size": "1024", "buffer_pool": "0"}, value: 6, metricType: dto.MetricType_COUNTER},
|
||||||
|
}
|
||||||
|
convey.Convey("Metrics comparison", t, func() {
|
||||||
|
for _, expect := range expected {
|
||||||
|
got := readMetric(<-ch)
|
||||||
|
convey.So(expect, convey.ShouldResemble, got)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Ensure all SQL queries were executed
|
||||||
|
if err := mock.ExpectationsWereMet(); err != nil {
|
||||||
|
t.Errorf("there were unfulfilled expections: %s", err)
|
||||||
|
}
|
||||||
|
}
|
@ -113,6 +113,14 @@ var (
|
|||||||
"collect.info_schema.query_response_time",
|
"collect.info_schema.query_response_time",
|
||||||
"Collect query response time distribution if query_response_time_stats is ON.",
|
"Collect query response time distribution if query_response_time_stats is ON.",
|
||||||
).Default("false").Bool()
|
).Default("false").Bool()
|
||||||
|
collectInnodbCmp = kingpin.Flag(
|
||||||
|
"collect.info_schema.innodbcmp",
|
||||||
|
"If running with innodbcmp=1, set to true to collect innodb cmp statistics",
|
||||||
|
).Default("false").Bool()
|
||||||
|
collectInnodbCmpMem = kingpin.Flag(
|
||||||
|
"collect.info_schema.innodbcmpmem",
|
||||||
|
"If running with innodbcmpmem=1, set to true to collect innodb cmpmem statistics",
|
||||||
|
).Default("false").Bool()
|
||||||
collectEngineTokudbStatus = kingpin.Flag(
|
collectEngineTokudbStatus = kingpin.Flag(
|
||||||
"collect.engine_tokudb_status",
|
"collect.engine_tokudb_status",
|
||||||
"Collect from SHOW ENGINE TOKUDB STATUS",
|
"Collect from SHOW ENGINE TOKUDB STATUS",
|
||||||
@ -202,6 +210,8 @@ func handler(w http.ResponseWriter, r *http.Request) {
|
|||||||
PerfRepGroupMemberStats: filter(filters, "perf_schema.replication_group_member_stats", *collectPerfRepGroupMemberStats),
|
PerfRepGroupMemberStats: filter(filters, "perf_schema.replication_group_member_stats", *collectPerfRepGroupMemberStats),
|
||||||
UserStat: filter(filters, "info_schema.userstats", *collectUserStat),
|
UserStat: filter(filters, "info_schema.userstats", *collectUserStat),
|
||||||
ClientStat: filter(filters, "info_schema.clientstats", *collectClientStat),
|
ClientStat: filter(filters, "info_schema.clientstats", *collectClientStat),
|
||||||
|
InnodbCmp: filter(filters, "info_schema.innodbcmp", *collectInnodbCmp),
|
||||||
|
InnodbCmpMem: filter(filters, "info_schema.innodbcmpmem", *collectInnodbCmpMem),
|
||||||
TableStat: filter(filters, "info_schema.tablestats", *collectTableStat),
|
TableStat: filter(filters, "info_schema.tablestats", *collectTableStat),
|
||||||
QueryResponseTime: filter(filters, "info_schema.query_response_time", *collectQueryResponseTime),
|
QueryResponseTime: filter(filters, "info_schema.query_response_time", *collectQueryResponseTime),
|
||||||
EngineTokudbStatus: filter(filters, "engine_tokudb_status", *collectEngineTokudbStatus),
|
EngineTokudbStatus: filter(filters, "engine_tokudb_status", *collectEngineTokudbStatus),
|
||||||
|
Reference in New Issue
Block a user