1
0
mirror of https://github.com/prometheus/mysqld_exporter.git synced 2025-07-31 17:44:21 +03:00

Add minimal MySQL version to Scraper interface (#328)

* Add Version method to Scraper interface.
* Skip scrapers for unsupported MySQL versions.

Signed-off-by: Alexey Palazhchenko <alexey.palazhchenko@percona.com>
This commit is contained in:
Alexey Palazhchenko
2018-10-29 18:35:38 +03:00
committed by Ben Kochie
parent ff14a3d4cb
commit 1465a0b0e0
30 changed files with 267 additions and 8 deletions

View File

@ -64,6 +64,11 @@ func (ScrapeBinlogSize) Help() string {
return "Collect the current size of all registered binlog files" return "Collect the current size of all registered binlog files"
} }
// Version of MySQL from which scraper is available.
func (ScrapeBinlogSize) Version() float64 {
return 5.1
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeBinlogSize) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeBinlogSize) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
var logBin uint8 var logBin uint8
@ -113,3 +118,6 @@ func (ScrapeBinlogSize) Scrape(ctx context.Context, db *sql.DB, ch chan<- promet
return nil return nil
} }
// check interface
var _ Scraper = ScrapeBinlogSize{}

View File

@ -45,6 +45,11 @@ func (ScrapeEngineInnodbStatus) Help() string {
return "Collect from SHOW ENGINE INNODB STATUS" return "Collect from SHOW ENGINE INNODB STATUS"
} }
// Version of MySQL from which scraper is available.
func (ScrapeEngineInnodbStatus) Version() float64 {
return 5.1
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeEngineInnodbStatus) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeEngineInnodbStatus) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
rows, err := db.QueryContext(ctx, engineInnodbStatusQuery) rows, err := db.QueryContext(ctx, engineInnodbStatusQuery)
@ -92,3 +97,6 @@ func (ScrapeEngineInnodbStatus) Scrape(ctx context.Context, db *sql.DB, ch chan<
return nil return nil
} }
// check interface
var _ Scraper = ScrapeEngineInnodbStatus{}

View File

@ -43,6 +43,11 @@ func (ScrapeEngineTokudbStatus) Help() string {
return "Collect from SHOW ENGINE TOKUDB STATUS" return "Collect from SHOW ENGINE TOKUDB STATUS"
} }
// Version of MySQL from which scraper is available.
func (ScrapeEngineTokudbStatus) Version() float64 {
return 5.6
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeEngineTokudbStatus) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeEngineTokudbStatus) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
tokudbRows, err := db.QueryContext(ctx, engineTokudbStatusQuery) tokudbRows, err := db.QueryContext(ctx, engineTokudbStatusQuery)
@ -87,3 +92,6 @@ func sanitizeTokudbMetric(metricName string) string {
} }
return metricName return metricName
} }
// check interface
var _ Scraper = ScrapeEngineTokudbStatus{}

View File

@ -17,6 +17,8 @@ import (
"context" "context"
"database/sql" "database/sql"
"fmt" "fmt"
"regexp"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -33,14 +35,20 @@ const (
exporter = "exporter" exporter = "exporter"
) )
// SQL Queries. // SQL queries and parameters.
const ( const (
versionQuery = `SELECT @@version`
// System variable params formatting. // System variable params formatting.
// See: https://github.com/go-sql-driver/mysql#system-variables // See: https://github.com/go-sql-driver/mysql#system-variables
sessionSettingsParam = `log_slow_filter=%27tmp_table_on_disk,filesort_on_disk%27` sessionSettingsParam = `log_slow_filter=%27tmp_table_on_disk,filesort_on_disk%27`
timeoutParam = `lock_wait_timeout=%d` timeoutParam = `lock_wait_timeout=%d`
) )
var (
versionRE = regexp.MustCompile(`^\d+\.\d+`)
)
// Tunable flags. // Tunable flags.
var ( var (
exporterLockTimeout = kingpin.Flag( exporterLockTimeout = kingpin.Flag(
@ -145,9 +153,14 @@ func (e *Exporter) scrape(ctx context.Context, ch chan<- prometheus.Metric) {
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "connection") ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "connection")
wg := &sync.WaitGroup{} version := getMySQLVersion(db)
var wg sync.WaitGroup
defer wg.Wait() defer wg.Wait()
for _, scraper := range e.scrapers { for _, scraper := range e.scrapers {
if version < scraper.Version() {
continue
}
wg.Add(1) wg.Add(1)
go func(scraper Scraper) { go func(scraper Scraper) {
defer wg.Done() defer wg.Done()
@ -163,6 +176,19 @@ func (e *Exporter) scrape(ctx context.Context, ch chan<- prometheus.Metric) {
} }
} }
func getMySQLVersion(db *sql.DB) float64 {
var versionStr string
var versionNum float64
if err := db.QueryRow(versionQuery).Scan(&versionStr); err == nil {
versionNum, _ = strconv.ParseFloat(versionRE.FindString(versionStr), 64)
}
// If we can't match/parse the version, set it some big value that matches all versions.
if versionNum == 0 {
versionNum = 999
}
return versionNum
}
// Metrics represents exporter metrics which values can be carried between http requests. // Metrics represents exporter metrics which values can be carried between http requests.
type Metrics struct { type Metrics struct {
TotalScrapes prometheus.Counter TotalScrapes prometheus.Counter

View File

@ -15,6 +15,7 @@ package collector
import ( import (
"context" "context"
"database/sql"
"testing" "testing"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@ -63,3 +64,17 @@ func TestExporter(t *testing.T) {
} }
}) })
} }
func TestGetMySQLVersion(t *testing.T) {
if testing.Short() {
t.Skip("-short is passed, skipping test")
}
convey.Convey("Version parsing", t, func() {
db, err := sql.Open("mysql", dsn)
convey.So(err, convey.ShouldBeNil)
defer db.Close()
convey.So(getMySQLVersion(db), convey.ShouldBeBetweenOrEqual, 5.5, 10.3)
})
}

View File

@ -87,6 +87,11 @@ func (ScrapeGlobalStatus) Help() string {
return "Collect from SHOW GLOBAL STATUS" return "Collect from SHOW GLOBAL STATUS"
} }
// Version of MySQL from which scraper is available.
func (ScrapeGlobalStatus) Version() float64 {
return 5.1
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeGlobalStatus) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeGlobalStatus) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
globalStatusRows, err := db.QueryContext(ctx, globalStatusQuery) globalStatusRows, err := db.QueryContext(ctx, globalStatusQuery)
@ -207,3 +212,6 @@ func (ScrapeGlobalStatus) Scrape(ctx context.Context, db *sql.DB, ch chan<- prom
return nil return nil
} }
// check interface
var _ Scraper = ScrapeGlobalStatus{}

View File

@ -131,6 +131,11 @@ func (ScrapeGlobalVariables) Help() string {
return "Collect from SHOW GLOBAL VARIABLES" return "Collect from SHOW GLOBAL VARIABLES"
} }
// Version of MySQL from which scraper is available.
func (ScrapeGlobalVariables) Version() float64 {
return 5.1
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeGlobalVariables) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeGlobalVariables) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
globalVariablesRows, err := db.QueryContext(ctx, globalVariablesQuery) globalVariablesRows, err := db.QueryContext(ctx, globalVariablesQuery)
@ -227,3 +232,6 @@ func validPrometheusName(s string) string {
s = strings.ToLower(s) s = strings.ToLower(s)
return s return s
} }
// check interface
var _ Scraper = ScrapeGlobalVariables{}

View File

@ -79,6 +79,11 @@ func (ScrapeHeartbeat) Help() string {
return "Collect from heartbeat" return "Collect from heartbeat"
} }
// Version of MySQL from which scraper is available.
func (ScrapeHeartbeat) Version() float64 {
return 5.1
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeHeartbeat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeHeartbeat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
query := fmt.Sprintf(heartbeatQuery, *collectHeartbeatDatabase, *collectHeartbeatTable) query := fmt.Sprintf(heartbeatQuery, *collectHeartbeatDatabase, *collectHeartbeatTable)
@ -126,3 +131,6 @@ func (ScrapeHeartbeat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometh
return nil return nil
} }
// check interface
var _ Scraper = ScrapeHeartbeat{}

View File

@ -63,6 +63,11 @@ func (ScrapeAutoIncrementColumns) Help() string {
return "Collect auto_increment columns and max values from information_schema" return "Collect auto_increment columns and max values from information_schema"
} }
// Version of MySQL from which scraper is available.
func (ScrapeAutoIncrementColumns) Version() float64 {
return 5.1
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeAutoIncrementColumns) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeAutoIncrementColumns) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
autoIncrementRows, err := db.QueryContext(ctx, infoSchemaAutoIncrementQuery) autoIncrementRows, err := db.QueryContext(ctx, infoSchemaAutoIncrementQuery)
@ -93,3 +98,6 @@ func (ScrapeAutoIncrementColumns) Scrape(ctx context.Context, db *sql.DB, ch cha
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapeAutoIncrementColumns{}

View File

@ -154,6 +154,11 @@ func (ScrapeClientStat) Help() string {
return "If running with userstat=1, set to true to collect client statistics" return "If running with userstat=1, set to true to collect client statistics"
} }
// Version of MySQL from which scraper is available.
func (ScrapeClientStat) Version() float64 {
return 5.5
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeClientStat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeClientStat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
var varName, varVal string var varName, varVal string
@ -213,3 +218,6 @@ func (ScrapeClientStat) Scrape(ctx context.Context, db *sql.DB, ch chan<- promet
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapeClientStat{}

View File

@ -70,6 +70,11 @@ func (ScrapeInnodbCmp) Help() string {
return "Collect metrics from information_schema.innodb_cmp" return "Collect metrics from information_schema.innodb_cmp"
} }
// Version of MySQL from which scraper is available.
func (ScrapeInnodbCmp) Version() float64 {
return 5.5
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeInnodbCmp) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeInnodbCmp) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
informationSchemaInnodbCmpRows, err := db.QueryContext(ctx, innodbCmpQuery) informationSchemaInnodbCmpRows, err := db.QueryContext(ctx, innodbCmpQuery)
@ -84,7 +89,6 @@ func (ScrapeInnodbCmp) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometh
) )
for informationSchemaInnodbCmpRows.Next() { for informationSchemaInnodbCmpRows.Next() {
if err := informationSchemaInnodbCmpRows.Scan( if err := informationSchemaInnodbCmpRows.Scan(
&page_size, &compress_ops, &compress_ops_ok, &compress_time, &uncompress_ops, &uncompress_time, &page_size, &compress_ops, &compress_ops_ok, &compress_time, &uncompress_ops, &uncompress_time,
); err != nil { ); err != nil {
@ -96,8 +100,10 @@ func (ScrapeInnodbCmp) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometh
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressTime, prometheus.CounterValue, compress_time, page_size) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressTime, prometheus.CounterValue, compress_time, page_size)
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpUncompressOps, prometheus.CounterValue, uncompress_ops, page_size) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpUncompressOps, prometheus.CounterValue, uncompress_ops, page_size)
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpUncompressTime, prometheus.CounterValue, uncompress_time, page_size) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpUncompressTime, prometheus.CounterValue, uncompress_time, page_size)
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapeInnodbCmp{}

View File

@ -65,6 +65,11 @@ func (ScrapeInnodbCmpMem) Help() string {
return "Collect metrics from information_schema.innodb_cmpmem" return "Collect metrics from information_schema.innodb_cmpmem"
} }
// Version of MySQL from which scraper is available.
func (ScrapeInnodbCmpMem) Version() float64 {
return 5.5
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeInnodbCmpMem) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeInnodbCmpMem) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
informationSchemaInnodbCmpMemRows, err := db.QueryContext(ctx, innodbCmpMemQuery) informationSchemaInnodbCmpMemRows, err := db.QueryContext(ctx, innodbCmpMemQuery)
@ -89,7 +94,9 @@ func (ScrapeInnodbCmpMem) Scrape(ctx context.Context, db *sql.DB, ch chan<- prom
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemPagesFree, prometheus.CounterValue, pages_free, page_size, buffer_pool) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemPagesFree, prometheus.CounterValue, pages_free, page_size, buffer_pool)
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemRelocationOps, prometheus.CounterValue, relocation_ops, page_size, buffer_pool) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemRelocationOps, prometheus.CounterValue, relocation_ops, page_size, buffer_pool)
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemRelocationTime, prometheus.CounterValue, (relocation_time / 1000), page_size, buffer_pool) ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpMemRelocationTime, prometheus.CounterValue, (relocation_time / 1000), page_size, buffer_pool)
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapeInnodbCmpMem{}

View File

@ -75,6 +75,11 @@ func (ScrapeInnodbMetrics) Help() string {
return "Collect metrics from information_schema.innodb_metrics" return "Collect metrics from information_schema.innodb_metrics"
} }
// Version of MySQL from which scraper is available.
func (ScrapeInnodbMetrics) Version() float64 {
return 5.6
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeInnodbMetrics) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeInnodbMetrics) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
innodbMetricsRows, err := db.QueryContext(ctx, infoSchemaInnodbMetricsQuery) innodbMetricsRows, err := db.QueryContext(ctx, infoSchemaInnodbMetricsQuery)
@ -164,3 +169,6 @@ func (ScrapeInnodbMetrics) Scrape(ctx context.Context, db *sql.DB, ch chan<- pro
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapeInnodbMetrics{}

View File

@ -66,6 +66,11 @@ func (ScrapeInfoSchemaInnodbTablespaces) Help() string {
return "Collect metrics from information_schema.innodb_sys_tablespaces" return "Collect metrics from information_schema.innodb_sys_tablespaces"
} }
// Version of MySQL from which scraper is available.
func (ScrapeInfoSchemaInnodbTablespaces) Version() float64 {
return 5.7
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeInfoSchemaInnodbTablespaces) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeInfoSchemaInnodbTablespaces) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
tablespacesRows, err := db.QueryContext(ctx, innodbTablespacesQuery) tablespacesRows, err := db.QueryContext(ctx, innodbTablespacesQuery)
@ -113,3 +118,6 @@ func (ScrapeInfoSchemaInnodbTablespaces) Scrape(ctx context.Context, db *sql.DB,
return nil return nil
} }
// check interface
var _ Scraper = ScrapeInfoSchemaInnodbTablespaces{}

View File

@ -170,6 +170,11 @@ func (ScrapeProcesslist) Help() string {
return "Collect current thread state counts from the information_schema.processlist" return "Collect current thread state counts from the information_schema.processlist"
} }
// Version of MySQL from which scraper is available.
func (ScrapeProcesslist) Version() float64 {
return 5.1
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeProcesslist) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeProcesslist) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
processQuery := fmt.Sprintf( processQuery := fmt.Sprintf(
@ -261,3 +266,6 @@ func deriveThreadState(command string, state string) string {
} }
return "other" return "other"
} }
// check interface
var _ Scraper = ScrapeProcesslist{}

View File

@ -112,6 +112,11 @@ func (ScrapeQueryResponseTime) Help() string {
return "Collect query response time distribution if query_response_time_stats is ON." return "Collect query response time distribution if query_response_time_stats is ON."
} }
// Version of MySQL from which scraper is available.
func (ScrapeQueryResponseTime) Version() float64 {
return 5.5
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeQueryResponseTime) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeQueryResponseTime) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
var queryStats uint8 var queryStats uint8
@ -135,3 +140,6 @@ func (ScrapeQueryResponseTime) Scrape(ctx context.Context, db *sql.DB, ch chan<-
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapeQueryResponseTime{}

View File

@ -90,6 +90,11 @@ func (ScrapeTableSchema) Help() string {
return "Collect metrics from information_schema.tables" return "Collect metrics from information_schema.tables"
} }
// Version of MySQL from which scraper is available.
func (ScrapeTableSchema) Version() float64 {
return 5.1
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeTableSchema) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeTableSchema) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
var dbList []string var dbList []string
@ -177,3 +182,6 @@ func (ScrapeTableSchema) Scrape(ctx context.Context, db *sql.DB, ch chan<- prome
return nil return nil
} }
// check interface
var _ Scraper = ScrapeTableSchema{}

View File

@ -65,6 +65,11 @@ func (ScrapeTableStat) Help() string {
return "If running with userstat=1, set to true to collect table statistics" return "If running with userstat=1, set to true to collect table statistics"
} }
// Version of MySQL from which scraper is available.
func (ScrapeTableStat) Version() float64 {
return 5.1
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeTableStat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeTableStat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
var varName, varVal string var varName, varVal string
@ -118,3 +123,6 @@ func (ScrapeTableStat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometh
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapeTableStat{}

View File

@ -150,6 +150,11 @@ func (ScrapeUserStat) Help() string {
return "If running with userstat=1, set to true to collect user statistics" return "If running with userstat=1, set to true to collect user statistics"
} }
// Version of MySQL from which scraper is available.
func (ScrapeUserStat) Version() float64 {
return 5.1
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeUserStat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeUserStat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
var varName, varVal string var varName, varVal string
@ -208,3 +213,6 @@ func (ScrapeUserStat) Scrape(ctx context.Context, db *sql.DB, ch chan<- promethe
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapeUserStat{}

View File

@ -161,6 +161,11 @@ func (ScrapePerfEventsStatements) Help() string {
return "Collect metrics from performance_schema.events_statements_summary_by_digest" return "Collect metrics from performance_schema.events_statements_summary_by_digest"
} }
// Version of MySQL from which scraper is available.
func (ScrapePerfEventsStatements) Version() float64 {
return 5.6
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapePerfEventsStatements) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapePerfEventsStatements) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
perfQuery := fmt.Sprintf( perfQuery := fmt.Sprintf(
@ -241,3 +246,6 @@ func (ScrapePerfEventsStatements) Scrape(ctx context.Context, db *sql.DB, ch cha
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapePerfEventsStatements{}

View File

@ -54,6 +54,11 @@ func (ScrapePerfEventsWaits) Help() string {
return "Collect metrics from performance_schema.events_waits_summary_global_by_event_name" return "Collect metrics from performance_schema.events_waits_summary_global_by_event_name"
} }
// Version of MySQL from which scraper is available.
func (ScrapePerfEventsWaits) Version() float64 {
return 5.5
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapePerfEventsWaits) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapePerfEventsWaits) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
// Timers here are returned in picoseconds. // Timers here are returned in picoseconds.
@ -85,3 +90,6 @@ func (ScrapePerfEventsWaits) Scrape(ctx context.Context, db *sql.DB, ch chan<- p
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapePerfEventsWaits{}

View File

@ -63,6 +63,11 @@ func (ScrapePerfFileEvents) Help() string {
return "Collect metrics from performance_schema.file_summary_by_event_name" return "Collect metrics from performance_schema.file_summary_by_event_name"
} }
// Version of MySQL from which scraper is available.
func (ScrapePerfFileEvents) Version() float64 {
return 5.6
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapePerfFileEvents) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapePerfFileEvents) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
// Timers here are returned in picoseconds. // Timers here are returned in picoseconds.
@ -122,3 +127,6 @@ func (ScrapePerfFileEvents) Scrape(ctx context.Context, db *sql.DB, ch chan<- pr
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapePerfFileEvents{}

View File

@ -73,6 +73,11 @@ func (ScrapePerfFileInstances) Help() string {
return "Collect metrics from performance_schema.file_summary_by_instance" return "Collect metrics from performance_schema.file_summary_by_instance"
} }
// Version of MySQL from which scraper is available.
func (ScrapePerfFileInstances) Version() float64 {
return 5.5
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapePerfFileInstances) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapePerfFileInstances) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
// Timers here are returned in picoseconds. // Timers here are returned in picoseconds.
@ -118,3 +123,6 @@ func (ScrapePerfFileInstances) Scrape(ctx context.Context, db *sql.DB, ch chan<-
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapePerfFileInstances{}

View File

@ -57,6 +57,11 @@ func (ScrapePerfIndexIOWaits) Help() string {
return "Collect metrics from performance_schema.table_io_waits_summary_by_index_usage" return "Collect metrics from performance_schema.table_io_waits_summary_by_index_usage"
} }
// Version of MySQL from which scraper is available.
func (ScrapePerfIndexIOWaits) Version() float64 {
return 5.6
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapePerfIndexIOWaits) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapePerfIndexIOWaits) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
perfSchemaIndexWaitsRows, err := db.QueryContext(ctx, perfIndexIOWaitsQuery) perfSchemaIndexWaitsRows, err := db.QueryContext(ctx, perfIndexIOWaitsQuery)
@ -120,3 +125,6 @@ func (ScrapePerfIndexIOWaits) Scrape(ctx context.Context, db *sql.DB, ch chan<-
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapePerfIndexIOWaits{}

View File

@ -63,6 +63,11 @@ func (ScrapePerfReplicationGroupMemberStats) Help() string {
return "Collect metrics from performance_schema.replication_group_member_stats" return "Collect metrics from performance_schema.replication_group_member_stats"
} }
// Version of MySQL from which scraper is available.
func (ScrapePerfReplicationGroupMemberStats) Version() float64 {
return 5.7
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapePerfReplicationGroupMemberStats) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapePerfReplicationGroupMemberStats) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
perfReplicationGroupMemeberStatsRows, err := db.QueryContext(ctx, perfReplicationGroupMemeberStatsQuery) perfReplicationGroupMemeberStatsRows, err := db.QueryContext(ctx, perfReplicationGroupMemeberStatsQuery)
@ -103,3 +108,6 @@ func (ScrapePerfReplicationGroupMemberStats) Scrape(ctx context.Context, db *sql
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapePerfReplicationGroupMemberStats{}

View File

@ -58,6 +58,11 @@ func (ScrapePerfTableIOWaits) Help() string {
return "Collect metrics from performance_schema.table_io_waits_summary_by_table" return "Collect metrics from performance_schema.table_io_waits_summary_by_table"
} }
// Version of MySQL from which scraper is available.
func (ScrapePerfTableIOWaits) Version() float64 {
return 5.6
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapePerfTableIOWaits) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapePerfTableIOWaits) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
perfSchemaTableWaitsRows, err := db.QueryContext(ctx, perfTableIOWaitsQuery) perfSchemaTableWaitsRows, err := db.QueryContext(ctx, perfTableIOWaitsQuery)
@ -114,3 +119,6 @@ func (ScrapePerfTableIOWaits) Scrape(ctx context.Context, db *sql.DB, ch chan<-
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapePerfTableIOWaits{}

View File

@ -87,6 +87,11 @@ func (ScrapePerfTableLockWaits) Help() string {
return "Collect metrics from performance_schema.table_lock_waits_summary_by_table" return "Collect metrics from performance_schema.table_lock_waits_summary_by_table"
} }
// Version of MySQL from which scraper is available.
func (ScrapePerfTableLockWaits) Version() float64 {
return 5.6
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapePerfTableLockWaits) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapePerfTableLockWaits) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
perfSchemaTableLockWaitsRows, err := db.QueryContext(ctx, perfTableLockWaitsQuery) perfSchemaTableLockWaitsRows, err := db.QueryContext(ctx, perfTableLockWaitsQuery)
@ -230,3 +235,6 @@ func (ScrapePerfTableLockWaits) Scrape(ctx context.Context, db *sql.DB, ch chan<
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapePerfTableLockWaits{}

View File

@ -25,9 +25,14 @@ import (
type Scraper interface { type Scraper interface {
// Name of the Scraper. Should be unique. // Name of the Scraper. Should be unique.
Name() string Name() string
// Help describes the role of the Scraper. // Help describes the role of the Scraper.
// Example: "Collect from SHOW ENGINE INNODB STATUS" // Example: "Collect from SHOW ENGINE INNODB STATUS"
Help() string Help() string
// Version of MySQL from which scraper is available.
Version() float64
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error
} }

View File

@ -55,6 +55,11 @@ func (ScrapeSlaveHosts) Help() string {
return "Scrape information from 'SHOW SLAVE HOSTS'" return "Scrape information from 'SHOW SLAVE HOSTS'"
} }
// Version of MySQL from which scraper is available.
func (ScrapeSlaveHosts) Version() float64 {
return 5.1
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeSlaveHosts) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeSlaveHosts) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
slaveHostsRows, err := db.QueryContext(ctx, slaveHostsQuery) slaveHostsRows, err := db.QueryContext(ctx, slaveHostsQuery)
@ -110,3 +115,6 @@ func (ScrapeSlaveHosts) Scrape(ctx context.Context, db *sql.DB, ch chan<- promet
return nil return nil
} }
// check interface
var _ Scraper = ScrapeSlaveHosts{}

View File

@ -62,6 +62,11 @@ func (ScrapeSlaveStatus) Help() string {
return "Collect from SHOW SLAVE STATUS" return "Collect from SHOW SLAVE STATUS"
} }
// Version of MySQL from which scraper is available.
func (ScrapeSlaveStatus) Version() float64 {
return 5.1
}
// Scrape collects data from database connection and sends it over channel as prometheus metric. // Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeSlaveStatus) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { func (ScrapeSlaveStatus) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
var ( var (
@ -129,3 +134,6 @@ func (ScrapeSlaveStatus) Scrape(ctx context.Context, db *sql.DB, ch chan<- prome
} }
return nil return nil
} }
// check interface
var _ Scraper = ScrapeSlaveStatus{}