You've already forked mysqld_exporter
mirror of
https://github.com/prometheus/mysqld_exporter.git
synced 2025-07-31 17:44:21 +03:00
Added metrics from SHOW ENGINE INNODB STATUS.
This commit is contained in:
@ -39,6 +39,7 @@ Name | MySQL Version | Descrip
|
||||
-------------------------------------------------------|---------------|------------------------------------------------------------------------------------
|
||||
collect.auto_increment.columns | 5.1 | Collect auto_increment columns and max values from information_schema.
|
||||
collect.binlog_size | 5.1 | Collect the current size of all registered binlog files
|
||||
collect.engine_innodb_status | 5.1 | Collect from SHOW ENGINE INNODB STATUS.
|
||||
collect.engine_tokudb_status | 5.6 | Collect from SHOW ENGINE TOKUDB STATUS.
|
||||
collect.global_status | 5.1 | Collect from SHOW GLOBAL STATUS (Enabled by default)
|
||||
collect.global_variables | 5.1 | Collect from SHOW GLOBAL VARIABLES (Enabled by default)
|
||||
|
67
collector/engine_innodb.go
Normal file
67
collector/engine_innodb.go
Normal file
@ -0,0 +1,67 @@
|
||||
// Scrape `SHOW ENGINE INNODB STATUS`.
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
// Subsystem.
|
||||
innodb = "engine_innodb"
|
||||
// Query.
|
||||
engineInnodbStatusQuery = `SHOW ENGINE INNODB STATUS`
|
||||
)
|
||||
|
||||
// ScrapeEngineInnodbStatus scrapes from `SHOW ENGINE INNODB STATUS`.
|
||||
func ScrapeEngineInnodbStatus(db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||
rows, err := db.Query(engineInnodbStatusQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var typeCol, nameCol, statusCol string
|
||||
// First row should contain the necessary info. If many rows returned then it's unknown case.
|
||||
if rows.Next() {
|
||||
if err := rows.Scan(&typeCol, &nameCol, &statusCol); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 0 queries inside InnoDB, 0 queries in queue
|
||||
// 0 read views open inside InnoDB
|
||||
rQueries, _ := regexp.Compile(`(\d+) queries inside InnoDB, (\d+) queries in queue`)
|
||||
rViews, _ := regexp.Compile(`(\d+) read views open inside InnoDB`)
|
||||
|
||||
for _, line := range strings.Split(statusCol, "\n") {
|
||||
if data := rQueries.FindStringSubmatch(line); data != nil {
|
||||
value, _ := strconv.ParseFloat(data[1], 64)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
newDesc(innodb, "queries_inside_innodb", "Queries inside InnoDB."),
|
||||
prometheus.GaugeValue,
|
||||
value,
|
||||
)
|
||||
value, _ = strconv.ParseFloat(data[2], 64)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
newDesc(innodb, "queries_in_queue", "Queries in queue."),
|
||||
prometheus.GaugeValue,
|
||||
value,
|
||||
)
|
||||
} else if data := rViews.FindStringSubmatch(line); data != nil {
|
||||
value, _ := strconv.ParseFloat(data[1], 64)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
newDesc(innodb, "read_views_open_inside_innodb", "Read views open inside InnoDB."),
|
||||
prometheus.GaugeValue,
|
||||
value,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
165
collector/engine_innodb_test.go
Normal file
165
collector/engine_innodb_test.go
Normal file
@ -0,0 +1,165 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"gopkg.in/DATA-DOG/go-sqlmock.v1"
|
||||
)
|
||||
|
||||
func TestScrapeEngineInnodbStatus(t *testing.T) {
|
||||
db, mock, err := sqlmock.New()
|
||||
if err != nil {
|
||||
t.Fatalf("error opening a stub database connection: %s", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
sample := `
|
||||
=====================================
|
||||
2016-09-14 19:04:38 0x7fed21462700 INNODB MONITOR OUTPUT
|
||||
=====================================
|
||||
Per second averages calculated from the last 30 seconds
|
||||
-----------------
|
||||
BACKGROUND THREAD
|
||||
-----------------
|
||||
srv_master_thread loops: 1 srv_active, 0 srv_shutdown, 49166 srv_idle
|
||||
srv_master_thread log flush and writes: 49165
|
||||
----------
|
||||
SEMAPHORES
|
||||
----------
|
||||
OS WAIT ARRAY INFO: reservation count 15
|
||||
OS WAIT ARRAY INFO: signal count 12
|
||||
RW-shared spins 0, rounds 4, OS waits 2
|
||||
RW-excl spins 0, rounds 0, OS waits 0
|
||||
RW-sx spins 0, rounds 0, OS waits 0
|
||||
Spin rounds per wait: 4.00 RW-shared, 0.00 RW-excl, 0.00 RW-sx
|
||||
------------
|
||||
TRANSACTIONS
|
||||
------------
|
||||
Trx id counter 67843
|
||||
Purge done for trx's n:o < 55764 undo n:o < 0 state: running but idle
|
||||
History list length 779
|
||||
LIST OF TRANSACTIONS FOR EACH SESSION:
|
||||
---TRANSACTION 422131596298608, not started
|
||||
0 lock struct(s), heap size 1136, 0 row lock(s)
|
||||
--------
|
||||
FILE I/O
|
||||
--------
|
||||
I/O thread 0 state: waiting for completed aio requests (insert buffer thread)
|
||||
I/O thread 1 state: waiting for completed aio requests (log thread)
|
||||
I/O thread 2 state: waiting for completed aio requests (read thread)
|
||||
I/O thread 3 state: waiting for completed aio requests (read thread)
|
||||
I/O thread 4 state: waiting for completed aio requests (read thread)
|
||||
I/O thread 5 state: waiting for completed aio requests (read thread)
|
||||
I/O thread 6 state: waiting for completed aio requests (write thread)
|
||||
I/O thread 7 state: waiting for completed aio requests (write thread)
|
||||
I/O thread 8 state: waiting for completed aio requests (write thread)
|
||||
I/O thread 9 state: waiting for completed aio requests (write thread)
|
||||
Pending normal aio reads: [0, 0, 0, 0] , aio writes: [0, 0, 0, 0] ,
|
||||
ibuf aio reads:, log i/o's:, sync i/o's:
|
||||
Pending flushes (fsync) log: 0; buffer pool: 0
|
||||
512 OS file reads, 57 OS file writes, 8 OS fsyncs
|
||||
0.00 reads/s, 0 avg bytes/read, 0.00 writes/s, 0.00 fsyncs/s
|
||||
-------------------------------------
|
||||
INSERT BUFFER AND ADAPTIVE HASH INDEX
|
||||
-------------------------------------
|
||||
Ibuf: size 1, free list len 0, seg size 2, 0 merges
|
||||
merged operations:
|
||||
insert 0, delete mark 0, delete 0
|
||||
discarded operations:
|
||||
insert 0, delete mark 0, delete 0
|
||||
Hash table size 34673, node heap has 0 buffer(s)
|
||||
Hash table size 34673, node heap has 0 buffer(s)
|
||||
Hash table size 34673, node heap has 0 buffer(s)
|
||||
Hash table size 34673, node heap has 0 buffer(s)
|
||||
Hash table size 34673, node heap has 0 buffer(s)
|
||||
Hash table size 34673, node heap has 0 buffer(s)
|
||||
Hash table size 34673, node heap has 0 buffer(s)
|
||||
Hash table size 34673, node heap has 0 buffer(s)
|
||||
0.00 hash searches/s, 0.00 non-hash searches/s
|
||||
---
|
||||
LOG
|
||||
---
|
||||
Log sequence number 37771171
|
||||
Log flushed up to 37771171
|
||||
Pages flushed up to 37771171
|
||||
Last checkpoint at 37771162
|
||||
Max checkpoint age 80826164
|
||||
Checkpoint age target 78300347
|
||||
Modified age 0
|
||||
Checkpoint age 9
|
||||
0 pending log flushes, 0 pending chkp writes
|
||||
10 log i/o's done, 0.00 log i/o's/second
|
||||
----------------------
|
||||
BUFFER POOL AND MEMORY
|
||||
----------------------
|
||||
Total large memory allocated 139722752
|
||||
Dictionary memory allocated 367821
|
||||
Internal hash tables (constant factor + variable factor)
|
||||
Adaptive hash index 2252736 (2219072 + 33664)
|
||||
Page hash 139112 (buffer pool 0 only)
|
||||
Dictionary cache 922589 (554768 + 367821)
|
||||
File system 839328 (812272 + 27056)
|
||||
Lock system 334008 (332872 + 1136)
|
||||
Recovery system 0 (0 + 0)
|
||||
Buffer pool size 8191
|
||||
Buffer pool size, bytes 0
|
||||
Free buffers 7684
|
||||
Database pages 507
|
||||
Old database pages 0
|
||||
Modified db pages 0
|
||||
Pending reads 0
|
||||
Pending writes: LRU 0, flush list 0, single page 0
|
||||
Pages made young 0, not young 0
|
||||
0.00 youngs/s, 0.00 non-youngs/s
|
||||
Pages read 473, created 34, written 36
|
||||
0.00 reads/s, 0.00 creates/s, 0.00 writes/s
|
||||
No buffer pool page gets since the last printout
|
||||
Pages read ahead 0.00/s, evicted without access 0.00/s, Random read ahead 0.00/s
|
||||
LRU len: 507, unzip_LRU len: 0
|
||||
I/O sum[0]:cur[0], unzip sum[0]:cur[0]
|
||||
--------------
|
||||
ROW OPERATIONS
|
||||
--------------
|
||||
661 queries inside InnoDB, 10 queries in queue
|
||||
15 read views open inside InnoDB
|
||||
0 RW transactions active inside InnoDB
|
||||
Process ID=1, Main thread ID=140656308950784, state: sleeping
|
||||
Number of rows inserted 0, updated 0, deleted 0, read 12
|
||||
0.00 inserts/s, 0.00 updates/s, 0.00 deletes/s, 0.00 reads/s
|
||||
----------------------------
|
||||
END OF INNODB MONITOR OUTPUT
|
||||
============================
|
||||
`
|
||||
columns := []string{"Type", "Name", "Status"}
|
||||
rows := sqlmock.NewRows(columns).AddRow("InnoDB", "", sample)
|
||||
|
||||
mock.ExpectQuery(sanitizeQuery(engineInnodbStatusQuery)).WillReturnRows(rows)
|
||||
|
||||
ch := make(chan prometheus.Metric)
|
||||
go func() {
|
||||
if err = ScrapeEngineInnodbStatus(db, ch); err != nil {
|
||||
t.Errorf("error calling function on test: %s", err)
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
metricsExpected := []MetricResult{
|
||||
{labels: labelMap{}, value: 661, metricType: dto.MetricType_GAUGE},
|
||||
{labels: labelMap{}, value: 10, metricType: dto.MetricType_GAUGE},
|
||||
{labels: labelMap{}, value: 15, metricType: dto.MetricType_GAUGE},
|
||||
}
|
||||
convey.Convey("Metrics comparison", t, func() {
|
||||
for _, expect := range metricsExpected {
|
||||
got := readMetric(<-ch)
|
||||
convey.So(got, convey.ShouldResemble, expect)
|
||||
}
|
||||
})
|
||||
|
||||
// Ensure all SQL queries were executed
|
||||
if err := mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("there were unfulfilled expections: %s", err)
|
||||
}
|
||||
}
|
@ -109,9 +109,14 @@ var (
|
||||
"If running with userstat=1, set to true to collect table statistics",
|
||||
)
|
||||
collectQueryResponseTime = flag.Bool("collect.info_schema.query_response_time", false,
|
||||
"Collect query response time distribution if query_response_time_stats is ON.")
|
||||
"Collect query response time distribution if query_response_time_stats is ON.",
|
||||
)
|
||||
collectEngineTokudbStatus = flag.Bool("collect.engine_tokudb_status", false,
|
||||
"Collect from SHOW ENGINE TOKUDB STATUS")
|
||||
"Collect from SHOW ENGINE TOKUDB STATUS",
|
||||
)
|
||||
collectEngineInnodbStatus = flag.Bool("collect.engine_innodb_status", false,
|
||||
"Collect from SHOW ENGINE INNODB STATUS",
|
||||
)
|
||||
)
|
||||
|
||||
// Metric name parts.
|
||||
@ -381,6 +386,12 @@ func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
|
||||
e.scrapeErrors.WithLabelValues("collect.engine_tokudb_status").Inc()
|
||||
}
|
||||
}
|
||||
if *collectEngineInnodbStatus {
|
||||
if err = collector.ScrapeEngineInnodbStatus(db, ch); err != nil {
|
||||
log.Errorln("Error scraping for collect.engine_innodb_status:", err)
|
||||
e.scrapeErrors.WithLabelValues("collect.engine_innodb_status").Inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseMycnf(config interface{}) (string, error) {
|
||||
|
Reference in New Issue
Block a user