You've already forked mysqld_exporter
mirror of
https://github.com/prometheus/mysqld_exporter.git
synced 2025-07-31 17:44:21 +03:00
Introduce Scraper interface
Signed-off-by: Kamil Dziedzic <arvenil@klecza.pl>
This commit is contained in:
@ -38,7 +38,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapeBinlogSize colects from `SHOW BINARY LOGS`.
|
// ScrapeBinlogSize colects from `SHOW BINARY LOGS`.
|
||||||
func ScrapeBinlogSize(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapeBinlogSize struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapeBinlogSize) Name() string {
|
||||||
|
return "binlog_size"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapeBinlogSize) Help() string {
|
||||||
|
return "Collect the current size of all registered binlog files"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeBinlogSize) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
var logBin uint8
|
var logBin uint8
|
||||||
err := db.QueryRow(logbinQuery).Scan(&logBin)
|
err := db.QueryRow(logbinQuery).Scan(&logBin)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -27,7 +27,7 @@ func TestScrapeBinlogSize(t *testing.T) {
|
|||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
if err = ScrapeBinlogSize(db, ch); err != nil {
|
if err = (ScrapeBinlogSize{}).Scrape(db, ch); err != nil {
|
||||||
t.Errorf("error calling function on test: %s", err)
|
t.Errorf("error calling function on test: %s", err)
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -19,7 +19,20 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapeEngineInnodbStatus scrapes from `SHOW ENGINE INNODB STATUS`.
|
// ScrapeEngineInnodbStatus scrapes from `SHOW ENGINE INNODB STATUS`.
|
||||||
func ScrapeEngineInnodbStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapeEngineInnodbStatus struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapeEngineInnodbStatus) Name() string {
|
||||||
|
return "engine_innodb_status"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapeEngineInnodbStatus) Help() string {
|
||||||
|
return "Collect from SHOW ENGINE INNODB STATUS"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeEngineInnodbStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
rows, err := db.Query(engineInnodbStatusQuery)
|
rows, err := db.Query(engineInnodbStatusQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -140,7 +140,7 @@ END OF INNODB MONITOR OUTPUT
|
|||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
if err = ScrapeEngineInnodbStatus(db, ch); err != nil {
|
if err = (ScrapeEngineInnodbStatus{}).Scrape(db, ch); err != nil {
|
||||||
t.Errorf("error calling function on test: %s", err)
|
t.Errorf("error calling function on test: %s", err)
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -16,26 +16,21 @@ const (
|
|||||||
engineTokudbStatusQuery = `SHOW ENGINE TOKUDB STATUS`
|
engineTokudbStatusQuery = `SHOW ENGINE TOKUDB STATUS`
|
||||||
)
|
)
|
||||||
|
|
||||||
func sanitizeTokudbMetric(metricName string) string {
|
// ScrapeEngineTokudbStatus scrapes from `SHOW ENGINE TOKUDB STATUS`.
|
||||||
replacements := map[string]string{
|
type ScrapeEngineTokudbStatus struct{}
|
||||||
">": "",
|
|
||||||
",": "",
|
// Name of the Scraper. Should be unique.
|
||||||
":": "",
|
func (ScrapeEngineTokudbStatus) Name() string {
|
||||||
"(": "",
|
return "engine_tokudb_status"
|
||||||
")": "",
|
|
||||||
" ": "_",
|
|
||||||
"-": "_",
|
|
||||||
"+": "and",
|
|
||||||
"/": "and",
|
|
||||||
}
|
|
||||||
for r := range replacements {
|
|
||||||
metricName = strings.Replace(metricName, r, replacements[r], -1)
|
|
||||||
}
|
|
||||||
return metricName
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ScrapeEngineTokudbStatus scrapes from `SHOW ENGINE TOKUDB STATUS`.
|
// Help describes the role of the Scraper.
|
||||||
func ScrapeEngineTokudbStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
|
func (ScrapeEngineTokudbStatus) Help() string {
|
||||||
|
return "Collect from SHOW ENGINE TOKUDB STATUS"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeEngineTokudbStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
tokudbRows, err := db.Query(engineTokudbStatusQuery)
|
tokudbRows, err := db.Query(engineTokudbStatusQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -60,3 +55,21 @@ func ScrapeEngineTokudbStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sanitizeTokudbMetric(metricName string) string {
|
||||||
|
replacements := map[string]string{
|
||||||
|
">": "",
|
||||||
|
",": "",
|
||||||
|
":": "",
|
||||||
|
"(": "",
|
||||||
|
")": "",
|
||||||
|
" ": "_",
|
||||||
|
"-": "_",
|
||||||
|
"+": "and",
|
||||||
|
"/": "and",
|
||||||
|
}
|
||||||
|
for r := range replacements {
|
||||||
|
metricName = strings.Replace(metricName, r, replacements[r], -1)
|
||||||
|
}
|
||||||
|
return metricName
|
||||||
|
}
|
||||||
|
@ -44,7 +44,7 @@ func TestScrapeEngineTokudbStatus(t *testing.T) {
|
|||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
if err = ScrapeEngineTokudbStatus(db, ch); err != nil {
|
if err = (ScrapeEngineTokudbStatus{}).Scrape(db, ch); err != nil {
|
||||||
t.Errorf("error calling function on test: %s", err)
|
t.Errorf("error calling function on test: %s", err)
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
_ "github.com/go-sql-driver/mysql"
|
_ "github.com/go-sql-driver/mysql"
|
||||||
@ -28,7 +29,7 @@ const (
|
|||||||
upQuery = `SELECT 1`
|
upQuery = `SELECT 1`
|
||||||
)
|
)
|
||||||
|
|
||||||
// Metric descriptors.
|
// Tunable flags.
|
||||||
var (
|
var (
|
||||||
exporterLockTimeout = kingpin.Flag(
|
exporterLockTimeout = kingpin.Flag(
|
||||||
"exporter.lock_wait_timeout",
|
"exporter.lock_wait_timeout",
|
||||||
@ -38,7 +39,10 @@ var (
|
|||||||
"exporter.log_slow_filter",
|
"exporter.log_slow_filter",
|
||||||
"Add a log_slow_filter to avoid slow query logging of scrapes. NOTE: Not supported by Oracle MySQL.",
|
"Add a log_slow_filter to avoid slow query logging of scrapes. NOTE: Not supported by Oracle MySQL.",
|
||||||
).Default("false").Bool()
|
).Default("false").Bool()
|
||||||
|
)
|
||||||
|
|
||||||
|
// Metric descriptors.
|
||||||
|
var (
|
||||||
scrapeDurationDesc = prometheus.NewDesc(
|
scrapeDurationDesc = prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, exporter, "collector_duration_seconds"),
|
prometheus.BuildFQName(namespace, exporter, "collector_duration_seconds"),
|
||||||
"Collector time duration.",
|
"Collector time duration.",
|
||||||
@ -46,42 +50,10 @@ var (
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Collect defines which metrics we should collect
|
|
||||||
type Collect struct {
|
|
||||||
Processlist bool
|
|
||||||
TableSchema bool
|
|
||||||
InnodbTablespaces bool
|
|
||||||
InnodbMetrics bool
|
|
||||||
GlobalStatus bool
|
|
||||||
GlobalVariables bool
|
|
||||||
SlaveStatus bool
|
|
||||||
AutoIncrementColumns bool
|
|
||||||
BinlogSize bool
|
|
||||||
PerfTableIOWaits bool
|
|
||||||
PerfIndexIOWaits bool
|
|
||||||
PerfTableLockWaits bool
|
|
||||||
PerfEventsStatements bool
|
|
||||||
PerfEventsWaits bool
|
|
||||||
PerfFileEvents bool
|
|
||||||
PerfFileInstances bool
|
|
||||||
PerfRepGroupMemberStats bool
|
|
||||||
UserStat bool
|
|
||||||
ClientStat bool
|
|
||||||
TableStat bool
|
|
||||||
InnodbCmp bool
|
|
||||||
InnodbCmpMem bool
|
|
||||||
QueryResponseTime bool
|
|
||||||
EngineTokudbStatus bool
|
|
||||||
EngineInnodbStatus bool
|
|
||||||
Heartbeat bool
|
|
||||||
HeartbeatDatabase string
|
|
||||||
HeartbeatTable string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exporter collects MySQL metrics. It implements prometheus.Collector.
|
// Exporter collects MySQL metrics. It implements prometheus.Collector.
|
||||||
type Exporter struct {
|
type Exporter struct {
|
||||||
dsn string
|
dsn string
|
||||||
collect Collect
|
scrapers []Scraper
|
||||||
error prometheus.Gauge
|
error prometheus.Gauge
|
||||||
totalScrapes prometheus.Counter
|
totalScrapes prometheus.Counter
|
||||||
scrapeErrors *prometheus.CounterVec
|
scrapeErrors *prometheus.CounterVec
|
||||||
@ -89,7 +61,7 @@ type Exporter struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new MySQL exporter for the provided DSN.
|
// New returns a new MySQL exporter for the provided DSN.
|
||||||
func New(dsn string, collect Collect) *Exporter {
|
func New(dsn string, scrapers []Scraper) *Exporter {
|
||||||
// Setup extra params for the DSN, default to having a lock timeout.
|
// Setup extra params for the DSN, default to having a lock timeout.
|
||||||
dsnParams := []string{fmt.Sprintf(timeoutParam, *exporterLockTimeout)}
|
dsnParams := []string{fmt.Sprintf(timeoutParam, *exporterLockTimeout)}
|
||||||
|
|
||||||
@ -106,7 +78,7 @@ func New(dsn string, collect Collect) *Exporter {
|
|||||||
|
|
||||||
return &Exporter{
|
return &Exporter{
|
||||||
dsn: dsn,
|
dsn: dsn,
|
||||||
collect: collect,
|
scrapers: scrapers,
|
||||||
totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{
|
totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: exporter,
|
Subsystem: exporter,
|
||||||
@ -203,237 +175,20 @@ func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
|
|||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "connection")
|
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "connection")
|
||||||
|
|
||||||
if e.collect.GlobalStatus {
|
wg := &sync.WaitGroup{}
|
||||||
scrapeTime = time.Now()
|
defer wg.Wait()
|
||||||
if err = ScrapeGlobalStatus(db, ch); err != nil {
|
for _, scraper := range e.scrapers {
|
||||||
log.Errorln("Error scraping for collect.global_status:", err)
|
wg.Add(1)
|
||||||
e.scrapeErrors.WithLabelValues("collect.global_status").Inc()
|
go func(scraper Scraper) {
|
||||||
|
defer wg.Done()
|
||||||
|
label := "collect." + scraper.Name()
|
||||||
|
scrapeTime := time.Now()
|
||||||
|
if err := scraper.Scrape(db, ch); err != nil {
|
||||||
|
log.Errorln("Error scraping for "+label+":", err)
|
||||||
|
e.scrapeErrors.WithLabelValues(label).Inc()
|
||||||
e.error.Set(1)
|
e.error.Set(1)
|
||||||
}
|
}
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.global_status")
|
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), label)
|
||||||
}
|
}(scraper)
|
||||||
if e.collect.GlobalVariables {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeGlobalVariables(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.global_variables:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.global_variables").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.global_variables")
|
|
||||||
}
|
|
||||||
if e.collect.SlaveStatus {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeSlaveStatus(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.slave_status:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.slave_status").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.slave_status")
|
|
||||||
}
|
|
||||||
if e.collect.Processlist {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeProcesslist(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.info_schema.processlist:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.info_schema.processlist").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.processlist")
|
|
||||||
}
|
|
||||||
if e.collect.TableSchema {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeTableSchema(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.info_schema.tables:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.info_schema.tables").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.tables")
|
|
||||||
}
|
|
||||||
if e.collect.InnodbTablespaces {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeInfoSchemaInnodbTablespaces(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.info_schema.innodb_sys_tablespaces:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.info_schema.innodb_sys_tablespaces").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.innodb_sys_tablespaces")
|
|
||||||
}
|
|
||||||
if e.collect.InnodbMetrics {
|
|
||||||
if err = ScrapeInnodbMetrics(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.info_schema.innodb_metrics:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.info_schema.innodb_metrics").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.innodb_metrics")
|
|
||||||
}
|
|
||||||
if e.collect.AutoIncrementColumns {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeAutoIncrementColumns(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.auto_increment.columns:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.auto_increment.columns").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.auto_increment.columns")
|
|
||||||
}
|
|
||||||
if e.collect.BinlogSize {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeBinlogSize(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.binlog_size:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.binlog_size").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.binlog_size")
|
|
||||||
}
|
|
||||||
if e.collect.PerfTableIOWaits {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapePerfTableIOWaits(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.perf_schema.tableiowaits:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.perf_schema.tableiowaits").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.perf_schema.tableiowaits")
|
|
||||||
}
|
|
||||||
if e.collect.PerfIndexIOWaits {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapePerfIndexIOWaits(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.perf_schema.indexiowaits:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.perf_schema.indexiowaits").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.perf_schema.indexiowaits")
|
|
||||||
}
|
|
||||||
if e.collect.PerfTableLockWaits {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapePerfTableLockWaits(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.perf_schema.tablelocks:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.perf_schema.tablelocks").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.perf_schema.tablelocks")
|
|
||||||
}
|
|
||||||
if e.collect.PerfEventsStatements {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapePerfEventsStatements(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.perf_schema.eventsstatements:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.perf_schema.eventsstatements").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.perf_schema.eventsstatements")
|
|
||||||
}
|
|
||||||
if e.collect.PerfEventsWaits {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapePerfEventsWaits(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.perf_schema.eventswaits:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.perf_schema.eventswaits").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.perf_schema.eventswaits")
|
|
||||||
}
|
|
||||||
if e.collect.PerfFileEvents {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapePerfFileEvents(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.perf_schema.file_events:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.perf_schema.file_events").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.perf_schema.file_events")
|
|
||||||
}
|
|
||||||
if e.collect.PerfFileInstances {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapePerfFileInstances(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.perf_schema.file_instances:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.perf_schema.file_instances").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.perf_schema.file_instances")
|
|
||||||
}
|
|
||||||
if e.collect.PerfRepGroupMemberStats {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeReplicationGroupMemberStats(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.replication_group_member_stats:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.replication_group_member_stats").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.replication_group_member_stats")
|
|
||||||
}
|
|
||||||
if e.collect.UserStat {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeUserStat(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.info_schema.userstats:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.info_schema.userstats").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.userstats")
|
|
||||||
}
|
|
||||||
if e.collect.InnodbCmp {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeInnodbCmp(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.info_schema.innodbcmp:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.info_schema.innodbcmp").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.innodbcmp")
|
|
||||||
}
|
|
||||||
if e.collect.InnodbCmpMem {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeInnodbCmpMem(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.info_schema.innodbcmpmem:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.info_schema.innodbcmpmem").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.innodbcmpmem")
|
|
||||||
}
|
|
||||||
if e.collect.ClientStat {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeClientStat(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.info_schema.clientstats:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.info_schema.clientstats").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.clientstats")
|
|
||||||
}
|
|
||||||
if e.collect.TableStat {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeTableStat(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.info_schema.tablestats:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.info_schema.tablestats").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.tablestats")
|
|
||||||
}
|
|
||||||
if e.collect.QueryResponseTime {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeQueryResponseTime(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.info_schema.query_response_time:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.info_schema.query_response_time").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.query_response_time")
|
|
||||||
}
|
|
||||||
if e.collect.EngineTokudbStatus {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeEngineTokudbStatus(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.engine_tokudb_status:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.engine_tokudb_status").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.engine_tokudb_status")
|
|
||||||
}
|
|
||||||
if e.collect.EngineInnodbStatus {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeEngineInnodbStatus(db, ch); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.engine_innodb_status:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.engine_innodb_status").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.engine_innodb_status")
|
|
||||||
}
|
|
||||||
if e.collect.Heartbeat {
|
|
||||||
scrapeTime = time.Now()
|
|
||||||
if err = ScrapeHeartbeat(db, ch, e.collect.HeartbeatDatabase, e.collect.HeartbeatTable); err != nil {
|
|
||||||
log.Errorln("Error scraping for collect.heartbeat:", err)
|
|
||||||
e.scrapeErrors.WithLabelValues("collect.heartbeat").Inc()
|
|
||||||
e.error.Set(1)
|
|
||||||
}
|
|
||||||
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.heartbeat")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,8 +15,8 @@ func TestExporter(t *testing.T) {
|
|||||||
t.Skip("-short is passed, skipping test")
|
t.Skip("-short is passed, skipping test")
|
||||||
}
|
}
|
||||||
|
|
||||||
exporter := New(dsn, Collect{
|
exporter := New(dsn, []Scraper{
|
||||||
GlobalStatus: true,
|
ScrapeGlobalStatus{},
|
||||||
})
|
})
|
||||||
|
|
||||||
convey.Convey("Metrics describing", t, func() {
|
convey.Convey("Metrics describing", t, func() {
|
||||||
|
@ -11,7 +11,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Scrape query
|
// Scrape query.
|
||||||
globalStatusQuery = `SHOW GLOBAL STATUS`
|
globalStatusQuery = `SHOW GLOBAL STATUS`
|
||||||
// Subsystem.
|
// Subsystem.
|
||||||
globalStatus = "global_status"
|
globalStatus = "global_status"
|
||||||
@ -20,6 +20,7 @@ const (
|
|||||||
// Regexp to match various groups of status vars.
|
// Regexp to match various groups of status vars.
|
||||||
var globalStatusRE = regexp.MustCompile(`^(com|handler|connection_errors|innodb_buffer_pool_pages|innodb_rows|performance_schema)_(.*)$`)
|
var globalStatusRE = regexp.MustCompile(`^(com|handler|connection_errors|innodb_buffer_pool_pages|innodb_rows|performance_schema)_(.*)$`)
|
||||||
|
|
||||||
|
// Metric descriptors.
|
||||||
var (
|
var (
|
||||||
globalCommandsDesc = prometheus.NewDesc(
|
globalCommandsDesc = prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, globalStatus, "commands_total"),
|
prometheus.BuildFQName(namespace, globalStatus, "commands_total"),
|
||||||
@ -59,7 +60,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapeGlobalStatus collects from `SHOW GLOBAL STATUS`.
|
// ScrapeGlobalStatus collects from `SHOW GLOBAL STATUS`.
|
||||||
func ScrapeGlobalStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapeGlobalStatus struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapeGlobalStatus) Name() string {
|
||||||
|
return globalStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapeGlobalStatus) Help() string {
|
||||||
|
return "Collect from SHOW GLOBAL STATUS"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeGlobalStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
globalStatusRows, err := db.Query(globalStatusQuery)
|
globalStatusRows, err := db.Query(globalStatusQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -38,7 +38,7 @@ func TestScrapeGlobalStatus(t *testing.T) {
|
|||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
if err = ScrapeGlobalStatus(db, ch); err != nil {
|
if err = (ScrapeGlobalStatus{}).Scrape(db, ch); err != nil {
|
||||||
t.Errorf("error calling function on test: %s", err)
|
t.Errorf("error calling function on test: %s", err)
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -19,7 +19,20 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapeGlobalVariables collects from `SHOW GLOBAL VARIABLES`.
|
// ScrapeGlobalVariables collects from `SHOW GLOBAL VARIABLES`.
|
||||||
func ScrapeGlobalVariables(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapeGlobalVariables struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapeGlobalVariables) Name() string {
|
||||||
|
return globalVariables
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapeGlobalVariables) Help() string {
|
||||||
|
return "Collect from SHOW GLOBAL VARIABLES"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeGlobalVariables) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
globalVariablesRows, err := db.Query(globalVariablesQuery)
|
globalVariablesRows, err := db.Query(globalVariablesQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -37,7 +37,7 @@ func TestScrapeGlobalVariables(t *testing.T) {
|
|||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
if err = ScrapeGlobalVariables(db, ch); err != nil {
|
if err = (ScrapeGlobalVariables{}).Scrape(db, ch); err != nil {
|
||||||
t.Errorf("error calling function on test: %s", err)
|
t.Errorf("error calling function on test: %s", err)
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"gopkg.in/alecthomas/kingpin.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -20,6 +21,17 @@ const (
|
|||||||
heartbeatQuery = "SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(NOW(6)), server_id from `%s`.`%s`"
|
heartbeatQuery = "SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(NOW(6)), server_id from `%s`.`%s`"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
collectHeartbeatDatabase = kingpin.Flag(
|
||||||
|
"collect.heartbeat.database",
|
||||||
|
"Database from where to collect heartbeat data",
|
||||||
|
).Default("heartbeat").String()
|
||||||
|
collectHeartbeatTable = kingpin.Flag(
|
||||||
|
"collect.heartbeat.table",
|
||||||
|
"Table from where to collect heartbeat data",
|
||||||
|
).Default("heartbeat").String()
|
||||||
|
)
|
||||||
|
|
||||||
// Metric descriptors.
|
// Metric descriptors.
|
||||||
var (
|
var (
|
||||||
HeartbeatStoredDesc = prometheus.NewDesc(
|
HeartbeatStoredDesc = prometheus.NewDesc(
|
||||||
@ -41,8 +53,21 @@ var (
|
|||||||
// ts varchar(26) NOT NULL,
|
// ts varchar(26) NOT NULL,
|
||||||
// server_id int unsigned NOT NULL PRIMARY KEY,
|
// server_id int unsigned NOT NULL PRIMARY KEY,
|
||||||
// );
|
// );
|
||||||
func ScrapeHeartbeat(db *sql.DB, ch chan<- prometheus.Metric, collectDatabase, collectTable string) error {
|
type ScrapeHeartbeat struct{}
|
||||||
query := fmt.Sprintf(heartbeatQuery, collectDatabase, collectTable)
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapeHeartbeat) Name() string {
|
||||||
|
return "heartbeat"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapeHeartbeat) Help() string {
|
||||||
|
return "Collect from heartbeat"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeHeartbeat) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
|
query := fmt.Sprintf(heartbeatQuery, *collectHeartbeatDatabase, *collectHeartbeatTable)
|
||||||
heartbeatRows, err := db.Query(query)
|
heartbeatRows, err := db.Query(query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -7,9 +7,18 @@ import (
|
|||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
"github.com/smartystreets/goconvey/convey"
|
"github.com/smartystreets/goconvey/convey"
|
||||||
"gopkg.in/DATA-DOG/go-sqlmock.v1"
|
"gopkg.in/DATA-DOG/go-sqlmock.v1"
|
||||||
|
"gopkg.in/alecthomas/kingpin.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestScrapeHeartbeat(t *testing.T) {
|
func TestScrapeHeartbeat(t *testing.T) {
|
||||||
|
_, err := kingpin.CommandLine.Parse([]string{
|
||||||
|
"--collect.heartbeat.database", "heartbeat-test",
|
||||||
|
"--collect.heartbeat.table", "heartbeat-test",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
db, mock, err := sqlmock.New()
|
db, mock, err := sqlmock.New()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error opening a stub database connection: %s", err)
|
t.Fatalf("error opening a stub database connection: %s", err)
|
||||||
@ -19,13 +28,11 @@ func TestScrapeHeartbeat(t *testing.T) {
|
|||||||
columns := []string{"UNIX_TIMESTAMP(ts)", "UNIX_TIMESTAMP(NOW(6))", "server_id"}
|
columns := []string{"UNIX_TIMESTAMP(ts)", "UNIX_TIMESTAMP(NOW(6))", "server_id"}
|
||||||
rows := sqlmock.NewRows(columns).
|
rows := sqlmock.NewRows(columns).
|
||||||
AddRow("1487597613.001320", "1487598113.448042", 1)
|
AddRow("1487597613.001320", "1487598113.448042", 1)
|
||||||
mock.ExpectQuery(sanitizeQuery("SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(NOW(6)), server_id from `heartbeat`.`heartbeat`")).WillReturnRows(rows)
|
mock.ExpectQuery(sanitizeQuery("SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(NOW(6)), server_id from `heartbeat-test`.`heartbeat-test`")).WillReturnRows(rows)
|
||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
database := "heartbeat"
|
if err = (ScrapeHeartbeat{}).Scrape(db, ch); err != nil {
|
||||||
table := "heartbeat"
|
|
||||||
if err = ScrapeHeartbeat(db, ch, database, table); err != nil {
|
|
||||||
t.Errorf("error calling function on test: %s", err)
|
t.Errorf("error calling function on test: %s", err)
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -22,6 +22,7 @@ const infoSchemaAutoIncrementQuery = `
|
|||||||
WHERE c.extra = 'auto_increment' AND t.auto_increment IS NOT NULL
|
WHERE c.extra = 'auto_increment' AND t.auto_increment IS NOT NULL
|
||||||
`
|
`
|
||||||
|
|
||||||
|
// Metric descriptors.
|
||||||
var (
|
var (
|
||||||
globalInfoSchemaAutoIncrementDesc = prometheus.NewDesc(
|
globalInfoSchemaAutoIncrementDesc = prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, informationSchema, "auto_increment_column"),
|
prometheus.BuildFQName(namespace, informationSchema, "auto_increment_column"),
|
||||||
@ -36,7 +37,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapeAutoIncrementColumns collects auto_increment column information.
|
// ScrapeAutoIncrementColumns collects auto_increment column information.
|
||||||
func ScrapeAutoIncrementColumns(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapeAutoIncrementColumns struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapeAutoIncrementColumns) Name() string {
|
||||||
|
return "auto_increment.columns"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapeAutoIncrementColumns) Help() string {
|
||||||
|
return "Collect auto_increment columns and max values from information_schema"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeAutoIncrementColumns) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
autoIncrementRows, err := db.Query(infoSchemaAutoIncrementQuery)
|
autoIncrementRows, err := db.Query(infoSchemaAutoIncrementQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -128,7 +128,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapeClientStat collects from `information_schema.client_statistics`.
|
// ScrapeClientStat collects from `information_schema.client_statistics`.
|
||||||
func ScrapeClientStat(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapeClientStat struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapeClientStat) Name() string {
|
||||||
|
return "info_schema.clientstats"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapeClientStat) Help() string {
|
||||||
|
return "If running with userstat=1, set to true to collect client statistics"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeClientStat) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
var varName, varVal string
|
var varName, varVal string
|
||||||
err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal)
|
err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -26,7 +26,7 @@ func TestScrapeClientStat(t *testing.T) {
|
|||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
if err = ScrapeClientStat(db, ch); err != nil {
|
if err = (ScrapeClientStat{}).Scrape(db, ch); err != nil {
|
||||||
t.Errorf("error calling function on test: %s", err)
|
t.Errorf("error calling function on test: %s", err)
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -14,6 +14,7 @@ const innodbCmpQuery = `
|
|||||||
FROM information_schema.innodb_cmp
|
FROM information_schema.innodb_cmp
|
||||||
`
|
`
|
||||||
|
|
||||||
|
// Metric descriptors.
|
||||||
var (
|
var (
|
||||||
infoSchemaInnodbCmpCompressOps = prometheus.NewDesc(
|
infoSchemaInnodbCmpCompressOps = prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_total"),
|
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_total"),
|
||||||
@ -43,7 +44,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapeInnodbCmp collects from `information_schema.innodb_cmp`.
|
// ScrapeInnodbCmp collects from `information_schema.innodb_cmp`.
|
||||||
func ScrapeInnodbCmp(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapeInnodbCmp struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapeInnodbCmp) Name() string {
|
||||||
|
return informationSchema + ".innodb_cmp"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapeInnodbCmp) Help() string {
|
||||||
|
return "Collect metrics from information_schema.innodb_cmp"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeInnodbCmp) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
|
|
||||||
informationSchemaInnodbCmpRows, err := db.Query(innodbCmpQuery)
|
informationSchemaInnodbCmpRows, err := db.Query(innodbCmpQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -23,7 +23,7 @@ func TestScrapeInnodbCmp(t *testing.T) {
|
|||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
if err = ScrapeInnodbCmp(db, ch); err != nil {
|
if err = (ScrapeInnodbCmp{}).Scrape(db, ch); err != nil {
|
||||||
t.Errorf("error calling function on test: %s", err)
|
t.Errorf("error calling function on test: %s", err)
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -14,6 +14,7 @@ const innodbCmpMemQuery = `
|
|||||||
FROM information_schema.innodb_cmpmem
|
FROM information_schema.innodb_cmpmem
|
||||||
`
|
`
|
||||||
|
|
||||||
|
// Metric descriptors.
|
||||||
var (
|
var (
|
||||||
infoSchemaInnodbCmpMemPagesRead = prometheus.NewDesc(
|
infoSchemaInnodbCmpMemPagesRead = prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_pages_used_total"),
|
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_pages_used_total"),
|
||||||
@ -38,7 +39,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapeInnodbCmp collects from `information_schema.innodb_cmp`.
|
// ScrapeInnodbCmp collects from `information_schema.innodb_cmp`.
|
||||||
func ScrapeInnodbCmpMem(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapeInnodbCmpMem struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapeInnodbCmpMem) Name() string {
|
||||||
|
return informationSchema + ".innodb_cmpmem"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapeInnodbCmpMem) Help() string {
|
||||||
|
return "Collect metrics from information_schema.innodb_cmpmem"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeInnodbCmpMem) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
|
|
||||||
informationSchemaInnodbCmpMemRows, err := db.Query(innodbCmpMemQuery)
|
informationSchemaInnodbCmpMemRows, err := db.Query(innodbCmpMemQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -23,7 +23,7 @@ func TestScrapeInnodbCmpMem(t *testing.T) {
|
|||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
if err = ScrapeInnodbCmpMem(db, ch); err != nil {
|
if err = (ScrapeInnodbCmpMem{}).Scrape(db, ch); err != nil {
|
||||||
t.Errorf("error calling function on test: %s", err)
|
t.Errorf("error calling function on test: %s", err)
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -49,7 +49,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapeInnodbMetrics collects from `information_schema.innodb_metrics`.
|
// ScrapeInnodbMetrics collects from `information_schema.innodb_metrics`.
|
||||||
func ScrapeInnodbMetrics(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapeInnodbMetrics struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapeInnodbMetrics) Name() string {
|
||||||
|
return informationSchema + ".innodb_metrics"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapeInnodbMetrics) Help() string {
|
||||||
|
return "Collect metrics from information_schema.innodb_metrics"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeInnodbMetrics) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
innodbMetricsRows, err := db.Query(infoSchemaInnodbMetricsQuery)
|
innodbMetricsRows, err := db.Query(infoSchemaInnodbMetricsQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -40,7 +40,7 @@ func TestScrapeInnodbMetrics(t *testing.T) {
|
|||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
if err = ScrapeInnodbMetrics(db, ch); err != nil {
|
if err = (ScrapeInnodbMetrics{}).Scrape(db, ch); err != nil {
|
||||||
t.Errorf("error calling function on test: %s", err)
|
t.Errorf("error calling function on test: %s", err)
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -20,6 +20,7 @@ const innodbTablespacesQuery = `
|
|||||||
FROM information_schema.innodb_sys_tablespaces
|
FROM information_schema.innodb_sys_tablespaces
|
||||||
`
|
`
|
||||||
|
|
||||||
|
// Metric descriptors.
|
||||||
var (
|
var (
|
||||||
infoSchemaInnodbTablesspaceInfoDesc = prometheus.NewDesc(
|
infoSchemaInnodbTablesspaceInfoDesc = prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, informationSchema, "innodb_tablespace_space_info"),
|
prometheus.BuildFQName(namespace, informationSchema, "innodb_tablespace_space_info"),
|
||||||
@ -39,7 +40,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapeInfoSchemaInnodbTablespaces collects from `information_schema.innodb_sys_tablespaces`.
|
// ScrapeInfoSchemaInnodbTablespaces collects from `information_schema.innodb_sys_tablespaces`.
|
||||||
func ScrapeInfoSchemaInnodbTablespaces(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapeInfoSchemaInnodbTablespaces struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapeInfoSchemaInnodbTablespaces) Name() string {
|
||||||
|
return informationSchema + ".innodb_tablespaces"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapeInfoSchemaInnodbTablespaces) Help() string {
|
||||||
|
return "Collect metrics from information_schema.innodb_sys_tablespaces"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeInfoSchemaInnodbTablespaces) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
tablespacesRows, err := db.Query(innodbTablespacesQuery)
|
tablespacesRows, err := db.Query(innodbTablespacesQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -24,7 +24,7 @@ func TestScrapeInfoSchemaInnodbTablespaces(t *testing.T) {
|
|||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
if err = ScrapeInfoSchemaInnodbTablespaces(db, ch); err != nil {
|
if err = (ScrapeInfoSchemaInnodbTablespaces{}).Scrape(db, ch); err != nil {
|
||||||
t.Errorf("error calling function on test: %s", err)
|
t.Errorf("error calling function on test: %s", err)
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -20,13 +20,16 @@ const infoSchemaProcesslistQuery = `
|
|||||||
ORDER BY null
|
ORDER BY null
|
||||||
`
|
`
|
||||||
|
|
||||||
|
// Tunable flags.
|
||||||
var (
|
var (
|
||||||
// Tunable flags.
|
|
||||||
processlistMinTime = kingpin.Flag(
|
processlistMinTime = kingpin.Flag(
|
||||||
"collect.info_schema.processlist.min_time",
|
"collect.info_schema.processlist.min_time",
|
||||||
"Minimum time a thread must be in each state to be counted",
|
"Minimum time a thread must be in each state to be counted",
|
||||||
).Default("0").Int()
|
).Default("0").Int()
|
||||||
// Prometheus descriptors.
|
)
|
||||||
|
|
||||||
|
// Metric descriptors.
|
||||||
|
var (
|
||||||
processlistCountDesc = prometheus.NewDesc(
|
processlistCountDesc = prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, informationSchema, "threads"),
|
prometheus.BuildFQName(namespace, informationSchema, "threads"),
|
||||||
"The number of threads (connections) split by current state.",
|
"The number of threads (connections) split by current state.",
|
||||||
@ -118,37 +121,21 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func deriveThreadState(command string, state string) string {
|
// ScrapeProcesslist collects from `information_schema.processlist`.
|
||||||
var normCmd = strings.Replace(strings.ToLower(command), "_", " ", -1)
|
type ScrapeProcesslist struct{}
|
||||||
var normState = strings.Replace(strings.ToLower(state), "_", " ", -1)
|
|
||||||
// check if it's already a valid state
|
// Name of the Scraper. Should be unique.
|
||||||
_, knownState := threadStateCounterMap[normState]
|
func (ScrapeProcesslist) Name() string {
|
||||||
if knownState {
|
return informationSchema + ".processlist"
|
||||||
return normState
|
|
||||||
}
|
|
||||||
// check if plain mapping applies
|
|
||||||
mappedState, canMap := threadStateMapping[normState]
|
|
||||||
if canMap {
|
|
||||||
return mappedState
|
|
||||||
}
|
|
||||||
// check special waiting for XYZ lock
|
|
||||||
if strings.Contains(normState, "waiting for") && strings.Contains(normState, "lock") {
|
|
||||||
return "waiting for lock"
|
|
||||||
}
|
|
||||||
if normCmd == "sleep" && normState == "" {
|
|
||||||
return "idle"
|
|
||||||
}
|
|
||||||
if normCmd == "query" {
|
|
||||||
return "executing"
|
|
||||||
}
|
|
||||||
if normCmd == "binlog dump" {
|
|
||||||
return "replication master"
|
|
||||||
}
|
|
||||||
return "other"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ScrapeProcesslist collects from `information_schema.processlist`.
|
// Help describes the role of the Scraper.
|
||||||
func ScrapeProcesslist(db *sql.DB, ch chan<- prometheus.Metric) error {
|
func (ScrapeProcesslist) Help() string {
|
||||||
|
return "Collect current thread state counts from the information_schema.processlist"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeProcesslist) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
processQuery := fmt.Sprintf(
|
processQuery := fmt.Sprintf(
|
||||||
infoSchemaProcesslistQuery,
|
infoSchemaProcesslistQuery,
|
||||||
*processlistMinTime,
|
*processlistMinTime,
|
||||||
@ -191,3 +178,32 @@ func ScrapeProcesslist(db *sql.DB, ch chan<- prometheus.Metric) error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func deriveThreadState(command string, state string) string {
|
||||||
|
var normCmd = strings.Replace(strings.ToLower(command), "_", " ", -1)
|
||||||
|
var normState = strings.Replace(strings.ToLower(state), "_", " ", -1)
|
||||||
|
// check if it's already a valid state
|
||||||
|
_, knownState := threadStateCounterMap[normState]
|
||||||
|
if knownState {
|
||||||
|
return normState
|
||||||
|
}
|
||||||
|
// check if plain mapping applies
|
||||||
|
mappedState, canMap := threadStateMapping[normState]
|
||||||
|
if canMap {
|
||||||
|
return mappedState
|
||||||
|
}
|
||||||
|
// check special waiting for XYZ lock
|
||||||
|
if strings.Contains(normState, "waiting for") && strings.Contains(normState, "lock") {
|
||||||
|
return "waiting for lock"
|
||||||
|
}
|
||||||
|
if normCmd == "sleep" && normState == "" {
|
||||||
|
return "idle"
|
||||||
|
}
|
||||||
|
if normCmd == "query" {
|
||||||
|
return "executing"
|
||||||
|
}
|
||||||
|
if normCmd == "binlog dump" {
|
||||||
|
return "replication master"
|
||||||
|
}
|
||||||
|
return "other"
|
||||||
|
}
|
||||||
|
@ -86,7 +86,20 @@ func processQueryResponseTimeTable(db *sql.DB, ch chan<- prometheus.Metric, quer
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ScrapeQueryResponseTime collects from `information_schema.query_response_time`.
|
// ScrapeQueryResponseTime collects from `information_schema.query_response_time`.
|
||||||
func ScrapeQueryResponseTime(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapeQueryResponseTime struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapeQueryResponseTime) Name() string {
|
||||||
|
return "info_schema.query_response_time"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapeQueryResponseTime) Help() string {
|
||||||
|
return "Collect query response time distribution if query_response_time_stats is ON."
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeQueryResponseTime) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
var queryStats uint8
|
var queryStats uint8
|
||||||
err := db.QueryRow(queryResponseCheckQuery).Scan(&queryStats)
|
err := db.QueryRow(queryResponseCheckQuery).Scan(&queryStats)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -37,7 +37,7 @@ func TestScrapeQueryResponseTime(t *testing.T) {
|
|||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
if err = ScrapeQueryResponseTime(db, ch); err != nil {
|
if err = (ScrapeQueryResponseTime{}).Scrape(db, ch); err != nil {
|
||||||
t.Errorf("error calling function on test: %s", err)
|
t.Errorf("error calling function on test: %s", err)
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -36,11 +36,16 @@ const (
|
|||||||
`
|
`
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Tunable flags.
|
||||||
var (
|
var (
|
||||||
tableSchemaDatabases = kingpin.Flag(
|
tableSchemaDatabases = kingpin.Flag(
|
||||||
"collect.info_schema.tables.databases",
|
"collect.info_schema.tables.databases",
|
||||||
"The list of databases to collect table stats for, or '*' for all",
|
"The list of databases to collect table stats for, or '*' for all",
|
||||||
).Default("*").String()
|
).Default("*").String()
|
||||||
|
)
|
||||||
|
|
||||||
|
// Metric descriptors.
|
||||||
|
var (
|
||||||
infoSchemaTablesVersionDesc = prometheus.NewDesc(
|
infoSchemaTablesVersionDesc = prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, informationSchema, "table_version"),
|
prometheus.BuildFQName(namespace, informationSchema, "table_version"),
|
||||||
"The version number of the table's .frm file",
|
"The version number of the table's .frm file",
|
||||||
@ -59,7 +64,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapeTableSchema collects from `information_schema.tables`.
|
// ScrapeTableSchema collects from `information_schema.tables`.
|
||||||
func ScrapeTableSchema(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapeTableSchema struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapeTableSchema) Name() string {
|
||||||
|
return informationSchema + ".tables"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapeTableSchema) Help() string {
|
||||||
|
return "Collect metrics from information_schema.tables"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeTableSchema) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
var dbList []string
|
var dbList []string
|
||||||
if *tableSchemaDatabases == "*" {
|
if *tableSchemaDatabases == "*" {
|
||||||
dbListRows, err := db.Query(dbListQuery)
|
dbListRows, err := db.Query(dbListQuery)
|
||||||
|
@ -19,6 +19,7 @@ const tableStatQuery = `
|
|||||||
FROM information_schema.table_statistics
|
FROM information_schema.table_statistics
|
||||||
`
|
`
|
||||||
|
|
||||||
|
// Metric descriptors.
|
||||||
var (
|
var (
|
||||||
infoSchemaTableStatsRowsReadDesc = prometheus.NewDesc(
|
infoSchemaTableStatsRowsReadDesc = prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, informationSchema, "table_statistics_rows_read_total"),
|
prometheus.BuildFQName(namespace, informationSchema, "table_statistics_rows_read_total"),
|
||||||
@ -38,7 +39,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapeTableStat collects from `information_schema.table_statistics`.
|
// ScrapeTableStat collects from `information_schema.table_statistics`.
|
||||||
func ScrapeTableStat(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapeTableStat struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapeTableStat) Name() string {
|
||||||
|
return "info_schema.tablestats"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapeTableStat) Help() string {
|
||||||
|
return "If running with userstat=1, set to true to collect table statistics"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeTableStat) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
var varName, varVal string
|
var varName, varVal string
|
||||||
err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal)
|
err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -27,7 +27,7 @@ func TestScrapeTableStat(t *testing.T) {
|
|||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
if err = ScrapeTableStat(db, ch); err != nil {
|
if err = (ScrapeTableStat{}).Scrape(db, ch); err != nil {
|
||||||
t.Errorf("error calling function on test: %s", err)
|
t.Errorf("error calling function on test: %s", err)
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -124,7 +124,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapeUserStat collects from `information_schema.user_statistics`.
|
// ScrapeUserStat collects from `information_schema.user_statistics`.
|
||||||
func ScrapeUserStat(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapeUserStat struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapeUserStat) Name() string {
|
||||||
|
return "info_schema.userstats"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapeUserStat) Help() string {
|
||||||
|
return "If running with userstat=1, set to true to collect user statistics"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeUserStat) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
var varName, varVal string
|
var varName, varVal string
|
||||||
err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal)
|
err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -26,7 +26,7 @@ func TestScrapeUserStat(t *testing.T) {
|
|||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
if err = ScrapeUserStat(db, ch); err != nil {
|
if err = (ScrapeUserStat{}).Scrape(db, ch); err != nil {
|
||||||
t.Errorf("error calling function on test: %s", err)
|
t.Errorf("error calling function on test: %s", err)
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -54,7 +54,7 @@ const perfEventsStatementsQuery = `
|
|||||||
LIMIT %d
|
LIMIT %d
|
||||||
`
|
`
|
||||||
|
|
||||||
// Tuning flags.
|
// Tunable flags.
|
||||||
var (
|
var (
|
||||||
perfEventsStatementsLimit = kingpin.Flag(
|
perfEventsStatementsLimit = kingpin.Flag(
|
||||||
"collect.perf_schema.eventsstatements.limit",
|
"collect.perf_schema.eventsstatements.limit",
|
||||||
@ -135,7 +135,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapePerfEventsStatements collects from `performance_schema.events_statements_summary_by_digest`.
|
// ScrapePerfEventsStatements collects from `performance_schema.events_statements_summary_by_digest`.
|
||||||
func ScrapePerfEventsStatements(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapePerfEventsStatements struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapePerfEventsStatements) Name() string {
|
||||||
|
return "perf_schema.eventsstatements"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapePerfEventsStatements) Help() string {
|
||||||
|
return "Collect metrics from performance_schema.events_statements_summary_by_digest"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapePerfEventsStatements) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
perfQuery := fmt.Sprintf(
|
perfQuery := fmt.Sprintf(
|
||||||
perfEventsStatementsQuery,
|
perfEventsStatementsQuery,
|
||||||
*perfEventsStatementsDigestTextLimit,
|
*perfEventsStatementsDigestTextLimit,
|
||||||
|
@ -28,7 +28,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapePerfEventsWaits collects from `performance_schema.events_waits_summary_global_by_event_name`.
|
// ScrapePerfEventsWaits collects from `performance_schema.events_waits_summary_global_by_event_name`.
|
||||||
func ScrapePerfEventsWaits(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapePerfEventsWaits struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapePerfEventsWaits) Name() string {
|
||||||
|
return "perf_schema.eventswaits"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapePerfEventsWaits) Help() string {
|
||||||
|
return "Collect metrics from performance_schema.events_waits_summary_global_by_event_name"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapePerfEventsWaits) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
// Timers here are returned in picoseconds.
|
// Timers here are returned in picoseconds.
|
||||||
perfSchemaEventsWaitsRows, err := db.Query(perfEventsWaitsQuery)
|
perfSchemaEventsWaitsRows, err := db.Query(perfEventsWaitsQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -37,7 +37,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapePerfFileEvents collects from `performance_schema.file_summary_by_event_name`.
|
// ScrapePerfFileEvents collects from `performance_schema.file_summary_by_event_name`.
|
||||||
func ScrapePerfFileEvents(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapePerfFileEvents struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapePerfFileEvents) Name() string {
|
||||||
|
return "perf_schema.file_events"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapePerfFileEvents) Help() string {
|
||||||
|
return "Collect metrics from performance_schema.file_summary_by_event_name"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapePerfFileEvents) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
// Timers here are returned in picoseconds.
|
// Timers here are returned in picoseconds.
|
||||||
perfSchemaFileEventsRows, err := db.Query(perfFileEventsQuery)
|
perfSchemaFileEventsRows, err := db.Query(perfFileEventsQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -19,13 +19,16 @@ const perfFileInstancesQuery = `
|
|||||||
where FILE_NAME REGEXP ?
|
where FILE_NAME REGEXP ?
|
||||||
`
|
`
|
||||||
|
|
||||||
// Metric descriptors.
|
// Tunable flags.
|
||||||
var (
|
var (
|
||||||
performanceSchemaFileInstancesFilter = kingpin.Flag(
|
performanceSchemaFileInstancesFilter = kingpin.Flag(
|
||||||
"collect.perf_schema.file_instances.filter",
|
"collect.perf_schema.file_instances.filter",
|
||||||
"RegEx file_name filter for performance_schema.file_summary_by_instance",
|
"RegEx file_name filter for performance_schema.file_summary_by_instance",
|
||||||
).Default(".*").String()
|
).Default(".*").String()
|
||||||
|
)
|
||||||
|
|
||||||
|
// Metric descriptors.
|
||||||
|
var (
|
||||||
performanceSchemaFileInstancesRemovePrefix = kingpin.Flag(
|
performanceSchemaFileInstancesRemovePrefix = kingpin.Flag(
|
||||||
"collect.perf_schema.file_instances.remove_prefix",
|
"collect.perf_schema.file_instances.remove_prefix",
|
||||||
"Remove path prefix in performance_schema.file_summary_by_instance",
|
"Remove path prefix in performance_schema.file_summary_by_instance",
|
||||||
@ -43,8 +46,21 @@ var (
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
// ScrapePerfFileEvents collects from `performance_schema.file_summary_by_event_name`.
|
// ScrapePerfFileInstances collects from `performance_schema.file_summary_by_instance`.
|
||||||
func ScrapePerfFileInstances(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapePerfFileInstances struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapePerfFileInstances) Name() string {
|
||||||
|
return "perf_schema.file_instances"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapePerfFileInstances) Help() string {
|
||||||
|
return "Collect metrics from performance_schema.file_summary_by_instance"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapePerfFileInstances) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
// Timers here are returned in picoseconds.
|
// Timers here are returned in picoseconds.
|
||||||
perfSchemaFileInstancesRows, err := db.Query(perfFileInstancesQuery, *performanceSchemaFileInstancesFilter)
|
perfSchemaFileInstancesRows, err := db.Query(perfFileInstancesQuery, *performanceSchemaFileInstancesFilter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -33,7 +33,7 @@ func TestScrapePerfFileInstances(t *testing.T) {
|
|||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
if err = ScrapePerfFileInstances(db, ch); err != nil {
|
if err = (ScrapePerfFileInstances{}).Scrape(db, ch); err != nil {
|
||||||
panic(fmt.Sprintf("error calling function on test: %s", err))
|
panic(fmt.Sprintf("error calling function on test: %s", err))
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -31,7 +31,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapePerfIndexIOWaits collects for `performance_schema.table_io_waits_summary_by_index_usage`.
|
// ScrapePerfIndexIOWaits collects for `performance_schema.table_io_waits_summary_by_index_usage`.
|
||||||
func ScrapePerfIndexIOWaits(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapePerfIndexIOWaits struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapePerfIndexIOWaits) Name() string {
|
||||||
|
return "perf_schema.indexiowaits"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapePerfIndexIOWaits) Help() string {
|
||||||
|
return "Collect metrics from performance_schema.table_io_waits_summary_by_index_usage"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapePerfIndexIOWaits) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
perfSchemaIndexWaitsRows, err := db.Query(perfIndexIOWaitsQuery)
|
perfSchemaIndexWaitsRows, err := db.Query(perfIndexIOWaitsQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -25,7 +25,7 @@ func TestScrapePerfIndexIOWaits(t *testing.T) {
|
|||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
if err = ScrapePerfIndexIOWaits(db, ch); err != nil {
|
if err = (ScrapePerfIndexIOWaits{}).Scrape(db, ch); err != nil {
|
||||||
t.Errorf("error calling function on test: %s", err)
|
t.Errorf("error calling function on test: %s", err)
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -37,7 +37,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapeReplicationGroupMemberStats collects from `performance_schema.replication_group_member_stats`.
|
// ScrapeReplicationGroupMemberStats collects from `performance_schema.replication_group_member_stats`.
|
||||||
func ScrapeReplicationGroupMemberStats(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapePerfReplicationGroupMemberStats struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapePerfReplicationGroupMemberStats) Name() string {
|
||||||
|
return performanceSchema + ".replication_group_member_stats"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapePerfReplicationGroupMemberStats) Help() string {
|
||||||
|
return "Collect metrics from performance_schema.replication_group_member_stats"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapePerfReplicationGroupMemberStats) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
perfReplicationGroupMemeberStatsRows, err := db.Query(perfReplicationGroupMemeberStatsQuery)
|
perfReplicationGroupMemeberStatsRows, err := db.Query(perfReplicationGroupMemeberStatsQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -58,19 +71,19 @@ func ScrapeReplicationGroupMemberStats(db *sql.DB, ch chan<- prometheus.Metric)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
performanceSchemaTableWaitsDesc, prometheus.CounterValue, float64(countTransactionsInQueue),
|
performanceSchemaReplicationGroupMemberStatsTransInQueueDesc, prometheus.CounterValue, float64(countTransactionsInQueue),
|
||||||
memberId,
|
memberId,
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
performanceSchemaTableWaitsDesc, prometheus.CounterValue, float64(countTransactionsChecked),
|
performanceSchemaReplicationGroupMemberStatsTransCheckedDesc, prometheus.CounterValue, float64(countTransactionsChecked),
|
||||||
memberId,
|
memberId,
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
performanceSchemaTableWaitsDesc, prometheus.CounterValue, float64(countConflictsDetected),
|
performanceSchemaReplicationGroupMemberStatsConflictsDetectedDesc, prometheus.CounterValue, float64(countConflictsDetected),
|
||||||
memberId,
|
memberId,
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
performanceSchemaTableWaitsDesc, prometheus.CounterValue, float64(countTransactionsRowsValidating),
|
performanceSchemaReplicationGroupMemberStatsTransRowValidatingDesc, prometheus.CounterValue, float64(countTransactionsRowsValidating),
|
||||||
memberId,
|
memberId,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapePerfTableIOWaits collects from `performance_schema.table_io_waits_summary_by_table`.
|
// ScrapePerfTableIOWaits collects from `performance_schema.table_io_waits_summary_by_table`.
|
||||||
func ScrapePerfTableIOWaits(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapePerfTableIOWaits struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapePerfTableIOWaits) Name() string {
|
||||||
|
return "perf_schema.tableiowaits"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapePerfTableIOWaits) Help() string {
|
||||||
|
return "Collect metrics from performance_schema.table_io_waits_summary_by_table"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapePerfTableIOWaits) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
perfSchemaTableWaitsRows, err := db.Query(perfTableIOWaitsQuery)
|
perfSchemaTableWaitsRows, err := db.Query(perfTableIOWaitsQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -61,7 +61,20 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ScrapePerfTableLockWaits collects from `performance_schema.table_lock_waits_summary_by_table`.
|
// ScrapePerfTableLockWaits collects from `performance_schema.table_lock_waits_summary_by_table`.
|
||||||
func ScrapePerfTableLockWaits(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapePerfTableLockWaits struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapePerfTableLockWaits) Name() string {
|
||||||
|
return "perf_schema.tablelocks"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapePerfTableLockWaits) Help() string {
|
||||||
|
return "Collect metrics from performance_schema.table_lock_waits_summary_by_table"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapePerfTableLockWaits) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
perfSchemaTableLockWaitsRows, err := db.Query(perfTableLockWaitsQuery)
|
perfSchemaTableLockWaitsRows, err := db.Query(perfTableLockWaitsQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
19
collector/scraper.go
Normal file
19
collector/scraper.go
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
|
||||||
|
_ "github.com/go-sql-driver/mysql"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Scraper is minimal interface that let's you add new prometheus metrics to mysqld_exporter.
|
||||||
|
type Scraper interface {
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
Name() string
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
// Example: "Collect from SHOW ENGINE INNODB STATUS"
|
||||||
|
Help() string
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
Scrape(db *sql.DB, ch chan<- prometheus.Metric) error
|
||||||
|
}
|
@ -36,7 +36,20 @@ func columnValue(scanArgs []interface{}, slaveCols []string, colName string) str
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ScrapeSlaveStatus collects from `SHOW SLAVE STATUS`.
|
// ScrapeSlaveStatus collects from `SHOW SLAVE STATUS`.
|
||||||
func ScrapeSlaveStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
|
type ScrapeSlaveStatus struct{}
|
||||||
|
|
||||||
|
// Name of the Scraper. Should be unique.
|
||||||
|
func (ScrapeSlaveStatus) Name() string {
|
||||||
|
return slaveStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help describes the role of the Scraper.
|
||||||
|
func (ScrapeSlaveStatus) Help() string {
|
||||||
|
return "Collect from SHOW SLAVE STATUS"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scrape collects data from database connection and sends it over channel as prometheus metric.
|
||||||
|
func (ScrapeSlaveStatus) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
var (
|
var (
|
||||||
slaveStatusRows *sql.Rows
|
slaveStatusRows *sql.Rows
|
||||||
err error
|
err error
|
||||||
|
@ -23,7 +23,7 @@ func TestScrapeSlaveStatus(t *testing.T) {
|
|||||||
|
|
||||||
ch := make(chan prometheus.Metric)
|
ch := make(chan prometheus.Metric)
|
||||||
go func() {
|
go func() {
|
||||||
if err = ScrapeSlaveStatus(db, ch); err != nil {
|
if err = (ScrapeSlaveStatus{}).Scrape(db, ch); err != nil {
|
||||||
t.Errorf("error calling function on test: %s", err)
|
t.Errorf("error calling function on test: %s", err)
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -29,121 +29,39 @@ var (
|
|||||||
"config.my-cnf",
|
"config.my-cnf",
|
||||||
"Path to .my.cnf file to read MySQL credentials from.",
|
"Path to .my.cnf file to read MySQL credentials from.",
|
||||||
).Default(path.Join(os.Getenv("HOME"), ".my.cnf")).String()
|
).Default(path.Join(os.Getenv("HOME"), ".my.cnf")).String()
|
||||||
collectProcesslist = kingpin.Flag(
|
|
||||||
"collect.info_schema.processlist",
|
|
||||||
"Collect current thread state counts from the information_schema.processlist",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectTableSchema = kingpin.Flag(
|
|
||||||
"collect.info_schema.tables",
|
|
||||||
"Collect metrics from information_schema.tables",
|
|
||||||
).Default("true").Bool()
|
|
||||||
collectInnodbTablespaces = kingpin.Flag(
|
|
||||||
"collect.info_schema.innodb_tablespaces",
|
|
||||||
"Collect metrics from information_schema.innodb_sys_tablespaces",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectInnodbMetrics = kingpin.Flag(
|
|
||||||
"collect.info_schema.innodb_metrics",
|
|
||||||
"Collect metrics from information_schema.innodb_metrics",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectGlobalStatus = kingpin.Flag(
|
|
||||||
"collect.global_status",
|
|
||||||
"Collect from SHOW GLOBAL STATUS",
|
|
||||||
).Default("true").Bool()
|
|
||||||
collectGlobalVariables = kingpin.Flag(
|
|
||||||
"collect.global_variables",
|
|
||||||
"Collect from SHOW GLOBAL VARIABLES",
|
|
||||||
).Default("true").Bool()
|
|
||||||
collectSlaveStatus = kingpin.Flag(
|
|
||||||
"collect.slave_status",
|
|
||||||
"Collect from SHOW SLAVE STATUS",
|
|
||||||
).Default("true").Bool()
|
|
||||||
collectAutoIncrementColumns = kingpin.Flag(
|
|
||||||
"collect.auto_increment.columns",
|
|
||||||
"Collect auto_increment columns and max values from information_schema",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectBinlogSize = kingpin.Flag(
|
|
||||||
"collect.binlog_size",
|
|
||||||
"Collect the current size of all registered binlog files",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectPerfTableIOWaits = kingpin.Flag(
|
|
||||||
"collect.perf_schema.tableiowaits",
|
|
||||||
"Collect metrics from performance_schema.table_io_waits_summary_by_table",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectPerfIndexIOWaits = kingpin.Flag(
|
|
||||||
"collect.perf_schema.indexiowaits",
|
|
||||||
"Collect metrics from performance_schema.table_io_waits_summary_by_index_usage",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectPerfTableLockWaits = kingpin.Flag(
|
|
||||||
"collect.perf_schema.tablelocks",
|
|
||||||
"Collect metrics from performance_schema.table_lock_waits_summary_by_table",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectPerfEventsStatements = kingpin.Flag(
|
|
||||||
"collect.perf_schema.eventsstatements",
|
|
||||||
"Collect metrics from performance_schema.events_statements_summary_by_digest",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectPerfEventsWaits = kingpin.Flag(
|
|
||||||
"collect.perf_schema.eventswaits",
|
|
||||||
"Collect metrics from performance_schema.events_waits_summary_global_by_event_name",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectPerfFileEvents = kingpin.Flag(
|
|
||||||
"collect.perf_schema.file_events",
|
|
||||||
"Collect metrics from performance_schema.file_summary_by_event_name",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectPerfFileInstances = kingpin.Flag(
|
|
||||||
"collect.perf_schema.file_instances",
|
|
||||||
"Collect metrics from performance_schema.file_summary_by_instance",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectPerfRepGroupMemberStats = kingpin.Flag(
|
|
||||||
"collect.replication_group_member_stats",
|
|
||||||
"Collect metrics from performance_schema.replication_group_member_stats",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectUserStat = kingpin.Flag(
|
|
||||||
"collect.info_schema.userstats",
|
|
||||||
"If running with userstat=1, set to true to collect user statistics",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectClientStat = kingpin.Flag(
|
|
||||||
"collect.info_schema.clientstats",
|
|
||||||
"If running with userstat=1, set to true to collect client statistics",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectTableStat = kingpin.Flag(
|
|
||||||
"collect.info_schema.tablestats",
|
|
||||||
"If running with userstat=1, set to true to collect table statistics",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectQueryResponseTime = kingpin.Flag(
|
|
||||||
"collect.info_schema.query_response_time",
|
|
||||||
"Collect query response time distribution if query_response_time_stats is ON.",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectInnodbCmp = kingpin.Flag(
|
|
||||||
"collect.info_schema.innodbcmp",
|
|
||||||
"If running with innodbcmp=1, set to true to collect innodb cmp statistics",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectInnodbCmpMem = kingpin.Flag(
|
|
||||||
"collect.info_schema.innodbcmpmem",
|
|
||||||
"If running with innodbcmpmem=1, set to true to collect innodb cmpmem statistics",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectEngineTokudbStatus = kingpin.Flag(
|
|
||||||
"collect.engine_tokudb_status",
|
|
||||||
"Collect from SHOW ENGINE TOKUDB STATUS",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectEngineInnodbStatus = kingpin.Flag(
|
|
||||||
"collect.engine_innodb_status",
|
|
||||||
"Collect from SHOW ENGINE INNODB STATUS",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectHeartbeat = kingpin.Flag(
|
|
||||||
"collect.heartbeat",
|
|
||||||
"Collect from heartbeat",
|
|
||||||
).Default("false").Bool()
|
|
||||||
collectHeartbeatDatabase = kingpin.Flag(
|
|
||||||
"collect.heartbeat.database",
|
|
||||||
"Database from where to collect heartbeat data",
|
|
||||||
).Default("heartbeat").String()
|
|
||||||
collectHeartbeatTable = kingpin.Flag(
|
|
||||||
"collect.heartbeat.table",
|
|
||||||
"Table from where to collect heartbeat data",
|
|
||||||
).Default("heartbeat").String()
|
|
||||||
dsn string
|
dsn string
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// scrapers lists all possible collection methods and if they should be enabled by default.
|
||||||
|
var scrapers = map[collector.Scraper]bool{
|
||||||
|
collector.ScrapeGlobalStatus{}: true,
|
||||||
|
collector.ScrapeGlobalVariables{}: true,
|
||||||
|
collector.ScrapeSlaveStatus{}: true,
|
||||||
|
collector.ScrapeProcesslist{}: false,
|
||||||
|
collector.ScrapeTableSchema{}: true,
|
||||||
|
collector.ScrapeInfoSchemaInnodbTablespaces{}: false,
|
||||||
|
collector.ScrapeInnodbMetrics{}: false,
|
||||||
|
collector.ScrapeAutoIncrementColumns{}: false,
|
||||||
|
collector.ScrapeBinlogSize{}: false,
|
||||||
|
collector.ScrapePerfTableIOWaits{}: false,
|
||||||
|
collector.ScrapePerfIndexIOWaits{}: false,
|
||||||
|
collector.ScrapePerfTableLockWaits{}: false,
|
||||||
|
collector.ScrapePerfEventsStatements{}: false,
|
||||||
|
collector.ScrapePerfEventsWaits{}: false,
|
||||||
|
collector.ScrapePerfFileEvents{}: false,
|
||||||
|
collector.ScrapePerfFileInstances{}: false,
|
||||||
|
collector.ScrapePerfReplicationGroupMemberStats{}: false,
|
||||||
|
collector.ScrapeUserStat{}: false,
|
||||||
|
collector.ScrapeClientStat{}: false,
|
||||||
|
collector.ScrapeTableStat{}: false,
|
||||||
|
collector.ScrapeInnodbCmp{}: false,
|
||||||
|
collector.ScrapeInnodbCmpMem{}: false,
|
||||||
|
collector.ScrapeQueryResponseTime{}: false,
|
||||||
|
collector.ScrapeEngineTokudbStatus{}: false,
|
||||||
|
collector.ScrapeEngineInnodbStatus{}: false,
|
||||||
|
collector.ScrapeHeartbeat{}: false,
|
||||||
|
}
|
||||||
|
|
||||||
func parseMycnf(config interface{}) (string, error) {
|
func parseMycnf(config interface{}) (string, error) {
|
||||||
var dsn string
|
var dsn string
|
||||||
cfg, err := ini.Load(config)
|
cfg, err := ini.Load(config)
|
||||||
@ -171,58 +89,29 @@ func init() {
|
|||||||
prometheus.MustRegister(version.NewCollector("mysqld_exporter"))
|
prometheus.MustRegister(version.NewCollector("mysqld_exporter"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func filter(filters map[string]bool, name string, flag bool) bool {
|
func newHandler(scrapers []collector.Scraper) http.HandlerFunc {
|
||||||
if len(filters) > 0 {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
return flag && filters[name]
|
filteredScrapers := scrapers
|
||||||
}
|
|
||||||
return flag
|
|
||||||
}
|
|
||||||
|
|
||||||
func handler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
var filters map[string]bool
|
|
||||||
params := r.URL.Query()["collect[]"]
|
params := r.URL.Query()["collect[]"]
|
||||||
log.Debugln("collect query:", params)
|
log.Debugln("collect query:", params)
|
||||||
|
|
||||||
|
// Check if we have some "collect[]" query parameters.
|
||||||
if len(params) > 0 {
|
if len(params) > 0 {
|
||||||
filters = make(map[string]bool)
|
filters := make(map[string]bool)
|
||||||
for _, param := range params {
|
for _, param := range params {
|
||||||
filters[param] = true
|
filters[param] = true
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
collect := collector.Collect{
|
filteredScrapers = nil
|
||||||
Processlist: filter(filters, "info_schema.processlist", *collectProcesslist),
|
for _, scraper := range scrapers {
|
||||||
TableSchema: filter(filters, "info_schema.tables", *collectTableSchema),
|
if filters[scraper.Name()] {
|
||||||
InnodbTablespaces: filter(filters, "info_schema.innodb_tablespaces", *collectInnodbTablespaces),
|
filteredScrapers = append(filteredScrapers, scraper)
|
||||||
InnodbMetrics: filter(filters, "info_schema.innodb_metrics", *collectInnodbMetrics),
|
}
|
||||||
GlobalStatus: filter(filters, "global_status", *collectGlobalStatus),
|
}
|
||||||
GlobalVariables: filter(filters, "global_variables", *collectGlobalVariables),
|
|
||||||
SlaveStatus: filter(filters, "slave_status", *collectSlaveStatus),
|
|
||||||
AutoIncrementColumns: filter(filters, "auto_increment.columns", *collectAutoIncrementColumns),
|
|
||||||
BinlogSize: filter(filters, "binlog_size", *collectBinlogSize),
|
|
||||||
PerfTableIOWaits: filter(filters, "perf_schema.tableiowaits", *collectPerfTableIOWaits),
|
|
||||||
PerfIndexIOWaits: filter(filters, "perf_schema.indexiowaits", *collectPerfIndexIOWaits),
|
|
||||||
PerfTableLockWaits: filter(filters, "perf_schema.tablelocks", *collectPerfTableLockWaits),
|
|
||||||
PerfEventsStatements: filter(filters, "perf_schema.eventsstatements", *collectPerfEventsStatements),
|
|
||||||
PerfEventsWaits: filter(filters, "perf_schema.eventswaits", *collectPerfEventsWaits),
|
|
||||||
PerfFileEvents: filter(filters, "perf_schema.file_events", *collectPerfFileEvents),
|
|
||||||
PerfFileInstances: filter(filters, "perf_schema.file_instances", *collectPerfFileInstances),
|
|
||||||
PerfRepGroupMemberStats: filter(filters, "perf_schema.replication_group_member_stats", *collectPerfRepGroupMemberStats),
|
|
||||||
UserStat: filter(filters, "info_schema.userstats", *collectUserStat),
|
|
||||||
ClientStat: filter(filters, "info_schema.clientstats", *collectClientStat),
|
|
||||||
InnodbCmp: filter(filters, "info_schema.innodbcmp", *collectInnodbCmp),
|
|
||||||
InnodbCmpMem: filter(filters, "info_schema.innodbcmpmem", *collectInnodbCmpMem),
|
|
||||||
TableStat: filter(filters, "info_schema.tablestats", *collectTableStat),
|
|
||||||
QueryResponseTime: filter(filters, "info_schema.query_response_time", *collectQueryResponseTime),
|
|
||||||
EngineTokudbStatus: filter(filters, "engine_tokudb_status", *collectEngineTokudbStatus),
|
|
||||||
EngineInnodbStatus: filter(filters, "engine_innodb_status", *collectEngineInnodbStatus),
|
|
||||||
Heartbeat: filter(filters, "heartbeat", *collectHeartbeat),
|
|
||||||
HeartbeatDatabase: *collectHeartbeatDatabase,
|
|
||||||
HeartbeatTable: *collectHeartbeatTable,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
registry := prometheus.NewRegistry()
|
registry := prometheus.NewRegistry()
|
||||||
registry.MustRegister(collector.New(dsn, collect))
|
registry.MustRegister(collector.New(dsn, filteredScrapers))
|
||||||
|
|
||||||
gatherers := prometheus.Gatherers{
|
gatherers := prometheus.Gatherers{
|
||||||
prometheus.DefaultGatherer,
|
prometheus.DefaultGatherer,
|
||||||
@ -231,9 +120,27 @@ func handler(w http.ResponseWriter, r *http.Request) {
|
|||||||
// Delegate http serving to Prometheus client library, which will call collector.Collect.
|
// Delegate http serving to Prometheus client library, which will call collector.Collect.
|
||||||
h := promhttp.HandlerFor(gatherers, promhttp.HandlerOpts{})
|
h := promhttp.HandlerFor(gatherers, promhttp.HandlerOpts{})
|
||||||
h.ServeHTTP(w, r)
|
h.ServeHTTP(w, r)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
// Generate ON/OFF flags for all scrapers.
|
||||||
|
scraperFlags := map[collector.Scraper]*bool{}
|
||||||
|
for scraper, enabledByDefault := range scrapers {
|
||||||
|
defaultOn := "false"
|
||||||
|
if enabledByDefault {
|
||||||
|
defaultOn = "true"
|
||||||
|
}
|
||||||
|
|
||||||
|
f := kingpin.Flag(
|
||||||
|
"collect."+scraper.Name(),
|
||||||
|
scraper.Help(),
|
||||||
|
).Default(defaultOn).Bool()
|
||||||
|
|
||||||
|
scraperFlags[scraper] = f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse flags.
|
||||||
log.AddFlags(kingpin.CommandLine)
|
log.AddFlags(kingpin.CommandLine)
|
||||||
kingpin.Version(version.Print("mysqld_exporter"))
|
kingpin.Version(version.Print("mysqld_exporter"))
|
||||||
kingpin.HelpFlag.Short('h')
|
kingpin.HelpFlag.Short('h')
|
||||||
@ -261,7 +168,16 @@ func main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
http.HandleFunc(*metricPath, prometheus.InstrumentHandlerFunc("metrics", handler))
|
// Register only scrapers enabled by flag.
|
||||||
|
log.Infof("Enabled scrapers:")
|
||||||
|
enabledScrapers := []collector.Scraper{}
|
||||||
|
for scraper, enabled := range scraperFlags {
|
||||||
|
if *enabled {
|
||||||
|
log.Infof(" --collect.%s", scraper.Name())
|
||||||
|
enabledScrapers = append(enabledScrapers, scraper)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
http.HandleFunc(*metricPath, prometheus.InstrumentHandlerFunc("metrics", newHandler(enabledScrapers)))
|
||||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||||
w.Write(landingPage)
|
w.Write(landingPage)
|
||||||
})
|
})
|
||||||
|
Reference in New Issue
Block a user