1
0
mirror of https://github.com/prometheus/mysqld_exporter.git synced 2025-08-08 01:22:53 +03:00

Fix file_instances metric collector (#205)

* Add event_name label to perf_schema file_instances

Include the event name column as a label for metrics gathered from
`performance_schema.file_summary_by_instance`.

* Change file_instances path cleaner default

Default to not cleaning the path on `performance_schema.file_summary_by_instance`
names to avoid name colisions.
This commit is contained in:
Ben Kochie
2017-07-05 19:54:37 +02:00
committed by GitHub
parent bfc59d86fa
commit e755b01ea4
2 changed files with 21 additions and 21 deletions

View File

@@ -14,7 +14,7 @@ import (
const perfFileInstancesQuery = ` const perfFileInstancesQuery = `
SELECT SELECT
FILE_NAME, FILE_NAME, EVENT_NAME,
COUNT_READ, COUNT_WRITE, COUNT_READ, COUNT_WRITE,
SUM_NUMBER_OF_BYTES_READ, SUM_NUMBER_OF_BYTES_WRITE SUM_NUMBER_OF_BYTES_READ, SUM_NUMBER_OF_BYTES_WRITE
FROM performance_schema.file_summary_by_instance FROM performance_schema.file_summary_by_instance
@@ -29,19 +29,19 @@ var (
) )
performanceSchemaFileInstancesRemovePrefix = flag.Bool( performanceSchemaFileInstancesRemovePrefix = flag.Bool(
"collect.perf_schema.file_instances.remove_prefix", true, "collect.perf_schema.file_instances.remove_prefix", false,
"Remove path prefix in performance_schema.file_summary_by_instance", "Remove path prefix in performance_schema.file_summary_by_instance",
) )
performanceSchemaFileInstancesBytesDesc = prometheus.NewDesc( performanceSchemaFileInstancesBytesDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, performanceSchema, "file_instances_bytes"), prometheus.BuildFQName(namespace, performanceSchema, "file_instances_bytes"),
"The number of bytes processed by file read/write operations.", "The number of bytes processed by file read/write operations.",
[]string{"file_name", "mode"}, nil, []string{"file_name", "event_name", "mode"}, nil,
) )
performanceSchemaFileInstancesCountDesc = prometheus.NewDesc( performanceSchemaFileInstancesCountDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, performanceSchema, "file_instances_total"), prometheus.BuildFQName(namespace, performanceSchema, "file_instances_total"),
"The total number of file read/write operations.", "The total number of file read/write operations.",
[]string{"file_name", "mode"}, nil, []string{"file_name", "event_name", "mode"}, nil,
) )
) )
@@ -55,14 +55,14 @@ func ScrapePerfFileInstances(db *sql.DB, ch chan<- prometheus.Metric) error {
defer perfSchemaFileInstancesRows.Close() defer perfSchemaFileInstancesRows.Close()
var ( var (
fileName string fileName, eventName string
countRead, countWrite uint64 countRead, countWrite uint64
sumBytesRead, sumBytesWritten uint64 sumBytesRead, sumBytesWritten uint64
) )
for perfSchemaFileInstancesRows.Next() { for perfSchemaFileInstancesRows.Next() {
if err := perfSchemaFileInstancesRows.Scan( if err := perfSchemaFileInstancesRows.Scan(
&fileName, &fileName, &eventName,
&countRead, &countWrite, &countRead, &countWrite,
&sumBytesRead, &sumBytesWritten, &sumBytesRead, &sumBytesWritten,
); err != nil { ); err != nil {
@@ -74,19 +74,19 @@ func ScrapePerfFileInstances(db *sql.DB, ch chan<- prometheus.Metric) error {
} }
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
performanceSchemaFileInstancesCountDesc, prometheus.CounterValue, float64(countRead), performanceSchemaFileInstancesCountDesc, prometheus.CounterValue, float64(countRead),
fileName, "read", fileName, eventName, "read",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
performanceSchemaFileInstancesCountDesc, prometheus.CounterValue, float64(countWrite), performanceSchemaFileInstancesCountDesc, prometheus.CounterValue, float64(countWrite),
fileName, "write", fileName, eventName, "write",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
performanceSchemaFileInstancesBytesDesc, prometheus.CounterValue, float64(sumBytesRead), performanceSchemaFileInstancesBytesDesc, prometheus.CounterValue, float64(sumBytesRead),
fileName, "read", fileName, eventName, "read",
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
performanceSchemaFileInstancesBytesDesc, prometheus.CounterValue, float64(sumBytesWritten), performanceSchemaFileInstancesBytesDesc, prometheus.CounterValue, float64(sumBytesWritten),
fileName, "write", fileName, eventName, "write",
) )
} }

View File

@@ -23,11 +23,11 @@ func TestScrapePerfFileInstances(t *testing.T) {
} }
defer db.Close() defer db.Close()
columns := []string{"FILE_NAME", "COUNT_READ", "COUNT_WRITE", "SUM_NUMBER_OF_BYTES_READ", "SUM_NUMBER_OF_BYTES_WRITE"} columns := []string{"FILE_NAME", "EVENT_NAME", "COUNT_READ", "COUNT_WRITE", "SUM_NUMBER_OF_BYTES_READ", "SUM_NUMBER_OF_BYTES_WRITE"}
rows := sqlmock.NewRows(columns). rows := sqlmock.NewRows(columns).
AddRow("file_1", "3", "4", "725", "128"). AddRow("file_1", "event1", "3", "4", "725", "128").
AddRow("file_2", "23", "12", "3123", "967") AddRow("file_2", "event2", "23", "12", "3123", "967")
mock.ExpectQuery(sanitizeQuery(perfFileInstancesQuery)).WillReturnRows(rows) mock.ExpectQuery(sanitizeQuery(perfFileInstancesQuery)).WillReturnRows(rows)
ch := make(chan prometheus.Metric) ch := make(chan prometheus.Metric)
@@ -39,14 +39,14 @@ func TestScrapePerfFileInstances(t *testing.T) {
}() }()
metricExpected := []MetricResult{ metricExpected := []MetricResult{
{labels: labelMap{"file_name": "file_1", "mode": "read"}, value: 3, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "file_1", "event_name": "event1", "mode": "read"}, value: 3, metricType: dto.MetricType_COUNTER},
{labels: labelMap{"file_name": "file_1", "mode": "write"}, value: 4, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "file_1", "event_name": "event1", "mode": "write"}, value: 4, metricType: dto.MetricType_COUNTER},
{labels: labelMap{"file_name": "file_1", "mode": "read"}, value: 725, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "file_1", "event_name": "event1", "mode": "read"}, value: 725, metricType: dto.MetricType_COUNTER},
{labels: labelMap{"file_name": "file_1", "mode": "write"}, value: 128, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "file_1", "event_name": "event1", "mode": "write"}, value: 128, metricType: dto.MetricType_COUNTER},
{labels: labelMap{"file_name": "file_2", "mode": "read"}, value: 23, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "file_2", "event_name": "event2", "mode": "read"}, value: 23, metricType: dto.MetricType_COUNTER},
{labels: labelMap{"file_name": "file_2", "mode": "write"}, value: 12, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "file_2", "event_name": "event2", "mode": "write"}, value: 12, metricType: dto.MetricType_COUNTER},
{labels: labelMap{"file_name": "file_2", "mode": "read"}, value: 3123, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "file_2", "event_name": "event2", "mode": "read"}, value: 3123, metricType: dto.MetricType_COUNTER},
{labels: labelMap{"file_name": "file_2", "mode": "write"}, value: 967, metricType: dto.MetricType_COUNTER}, {labels: labelMap{"file_name": "file_2", "event_name": "event2", "mode": "write"}, value: 967, metricType: dto.MetricType_COUNTER},
} }
convey.Convey("Metrics comparison", t, func() { convey.Convey("Metrics comparison", t, func() {
for _, expect := range metricExpected { for _, expect := range metricExpected {