You've already forked mysqld_exporter
mirror of
https://github.com/prometheus/mysqld_exporter.git
synced 2025-07-31 17:44:21 +03:00
Add special handling of "buffer_page_io" subsystem
For information_schema.innodb_metrics we can split out labels based on the type of buffer page IO operation in the "buffer_page_io" subsystem. * Simplify scrapeInnodbMetrics() by casting value to float64. * Test for broken metric output from MySQL. * Supress log messages in the test.
This commit is contained in:
@ -4,8 +4,10 @@ package collector
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/common/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
const infoSchemaInnodbMetricsQuery = `
|
const infoSchemaInnodbMetricsQuery = `
|
||||||
@ -16,6 +18,23 @@ const infoSchemaInnodbMetricsQuery = `
|
|||||||
WHERE status = 'enabled'
|
WHERE status = 'enabled'
|
||||||
`
|
`
|
||||||
|
|
||||||
|
// Metrics descriptors.
|
||||||
|
var (
|
||||||
|
infoSchemaBufferPageReadTotalDesc = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, informationSchema, "innodb_metrics_buffer_page_read_total"),
|
||||||
|
"Total number of buffer pages read total.",
|
||||||
|
[]string{"type"}, nil,
|
||||||
|
)
|
||||||
|
infoSchemaBufferPageWrittenTotalDesc = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, informationSchema, "innodb_metrics_buffer_page_written_total"),
|
||||||
|
"Total number of buffer pages written total.",
|
||||||
|
[]string{"type"}, nil,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Regexp for matching metric aggregations.
|
||||||
|
var bufferPageRE = regexp.MustCompile(`^buffer_page_(read|written)_(.*)$`)
|
||||||
|
|
||||||
// ScrapeInnodbMetrics collects from `information_schema.innodb_metrics`.
|
// ScrapeInnodbMetrics collects from `information_schema.innodb_metrics`.
|
||||||
func ScrapeInnodbMetrics(db *sql.DB, ch chan<- prometheus.Metric) error {
|
func ScrapeInnodbMetrics(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
innodbMetricsRows, err := db.Query(infoSchemaInnodbMetricsQuery)
|
innodbMetricsRows, err := db.Query(infoSchemaInnodbMetricsQuery)
|
||||||
@ -26,7 +45,7 @@ func ScrapeInnodbMetrics(db *sql.DB, ch chan<- prometheus.Metric) error {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
name, subsystem, metricType, comment string
|
name, subsystem, metricType, comment string
|
||||||
value int64
|
value float64
|
||||||
)
|
)
|
||||||
|
|
||||||
for innodbMetricsRows.Next() {
|
for innodbMetricsRows.Next() {
|
||||||
@ -35,6 +54,25 @@ func ScrapeInnodbMetrics(db *sql.DB, ch chan<- prometheus.Metric) error {
|
|||||||
); err != nil {
|
); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// Special handling of the "buffer_page_io" subsystem.
|
||||||
|
if subsystem == "buffer_page_io" {
|
||||||
|
match := bufferPageRE.FindStringSubmatch(name)
|
||||||
|
if len(match) != 3 {
|
||||||
|
log.Warnln("innodb_metrics subsystem buffer_page_io returned an invalid name:", name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch match[1] {
|
||||||
|
case "read":
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
infoSchemaBufferPageReadTotalDesc, prometheus.CounterValue, value, match[2],
|
||||||
|
)
|
||||||
|
case "written":
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
infoSchemaBufferPageWrittenTotalDesc, prometheus.CounterValue, value, match[2],
|
||||||
|
)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
metricName := "innodb_metrics_" + subsystem + "_" + name
|
metricName := "innodb_metrics_" + subsystem + "_" + name
|
||||||
// MySQL returns counters named two different ways. "counter" and "status_counter"
|
// MySQL returns counters named two different ways. "counter" and "status_counter"
|
||||||
// value >= 0 is necessary due to upstream bugs: http://bugs.mysql.com/bug.php?id=75966
|
// value >= 0 is necessary due to upstream bugs: http://bugs.mysql.com/bug.php?id=75966
|
||||||
@ -46,7 +84,7 @@ func ScrapeInnodbMetrics(db *sql.DB, ch chan<- prometheus.Metric) error {
|
|||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
description,
|
description,
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
float64(value),
|
value,
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
description := prometheus.NewDesc(
|
description := prometheus.NewDesc(
|
||||||
@ -56,7 +94,7 @@ func ScrapeInnodbMetrics(db *sql.DB, ch chan<- prometheus.Metric) error {
|
|||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
description,
|
description,
|
||||||
prometheus.GaugeValue,
|
prometheus.GaugeValue,
|
||||||
float64(value),
|
value,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
62
collector/info_schema_innodb_metrics_test.go
Normal file
62
collector/info_schema_innodb_metrics_test.go
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/smartystreets/goconvey/convey"
|
||||||
|
"gopkg.in/DATA-DOG/go-sqlmock.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestScrapeInnodbMetrics(t *testing.T) {
|
||||||
|
// Suppress a log messages
|
||||||
|
err := flag.Set("log.level", "fatal")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
db, mock, err := sqlmock.New()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error opening a stub database connection: %s", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
columns := []string{"name", "subsystem", "type", "comment", "count"}
|
||||||
|
rows := sqlmock.NewRows(columns).
|
||||||
|
AddRow("lock_timeouts", "lock", "counter", "Number of lock timeouts", 0).
|
||||||
|
AddRow("buffer_pool_reads", "buffer", "status_counter", "Number of reads directly from disk (innodb_buffer_pool_reads)", 1).
|
||||||
|
AddRow("buffer_pool_size", "server", "value", "Server buffer pool size (all buffer pools) in bytes", 2).
|
||||||
|
AddRow("buffer_page_read_system_page", "buffer_page_io", "counter", "Number of System Pages read", 3).
|
||||||
|
AddRow("buffer_page_written_undo_log", "buffer_page_io", "counter", "Number of Undo Log Pages written", 4).
|
||||||
|
AddRow("NOPE", "buffer_page_io", "counter", "An invalid buffer_page_io metric", 5)
|
||||||
|
mock.ExpectQuery(sanitizeQuery(infoSchemaInnodbMetricsQuery)).WillReturnRows(rows)
|
||||||
|
|
||||||
|
ch := make(chan prometheus.Metric)
|
||||||
|
go func() {
|
||||||
|
if err = ScrapeInnodbMetrics(db, ch); err != nil {
|
||||||
|
t.Errorf("error calling function on test: %s", err)
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
}()
|
||||||
|
|
||||||
|
metricExpected := []MetricResult{
|
||||||
|
{labels: labelMap{}, value: 0, metricType: dto.MetricType_COUNTER},
|
||||||
|
{labels: labelMap{}, value: 1, metricType: dto.MetricType_COUNTER},
|
||||||
|
{labels: labelMap{}, value: 2, metricType: dto.MetricType_GAUGE},
|
||||||
|
{labels: labelMap{"type": "system_page"}, value: 3, metricType: dto.MetricType_COUNTER},
|
||||||
|
{labels: labelMap{"type": "undo_log"}, value: 4, metricType: dto.MetricType_COUNTER},
|
||||||
|
}
|
||||||
|
convey.Convey("Metrics comparison", t, func() {
|
||||||
|
for _, expect := range metricExpected {
|
||||||
|
got := readMetric(<-ch)
|
||||||
|
convey.So(got, convey.ShouldResemble, expect)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Ensure all SQL queries were executed
|
||||||
|
if err := mock.ExpectationsWereMet(); err != nil {
|
||||||
|
t.Errorf("there were unfulfilled expections: %s", err)
|
||||||
|
}
|
||||||
|
}
|
Reference in New Issue
Block a user