1
0
mirror of https://github.com/prometheus/mysqld_exporter.git synced 2025-07-09 18:21:43 +03:00
Files
mysqld_exporter/collector/engine_innodb.go
TJ Hoplock be5dc65671 chore!: adopt log/slog, drop go-kit/log (#875)
* chore!: adopt log/slog, drop go-kit/log

Requires: prometheus/common#697

This PR includes:
- linter updates to enable `sloglint` linter
- Go dep updates for prometheus/{client_golang,common,exporter-toolkit}
  libs
- refactorings to adopt log/slog in favor of go-kit/log

The bulk of this PR was automated by the following script which is being
used to aid in converting the various exporters/projects to use slog:

https://gist.github.com/tjhop/49f96fb7ebbe55b12deee0b0312d8434

Builds and passes tests locally with go workspaces and up-to-date main
branch of prometheus/common.

Signed-off-by: TJ Hoplock <t.hoplock@gmail.com>

* build(deps): bump prometheus/common to v0.60.0

Signed-off-by: TJ Hoplock <t.hoplock@gmail.com>

---------

Signed-off-by: TJ Hoplock <t.hoplock@gmail.com>
2024-10-10 18:04:21 +02:00

104 lines
3.0 KiB
Go

// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Scrape `SHOW ENGINE INNODB STATUS`.
package collector
import (
"context"
"log/slog"
"regexp"
"strconv"
"strings"
"github.com/prometheus/client_golang/prometheus"
)
const (
// Subsystem.
innodb = "engine_innodb"
// Query.
engineInnodbStatusQuery = `SHOW ENGINE INNODB STATUS`
)
// ScrapeEngineInnodbStatus scrapes from `SHOW ENGINE INNODB STATUS`.
type ScrapeEngineInnodbStatus struct{}
// Name of the Scraper. Should be unique.
func (ScrapeEngineInnodbStatus) Name() string {
return "engine_innodb_status"
}
// Help describes the role of the Scraper.
func (ScrapeEngineInnodbStatus) Help() string {
return "Collect from SHOW ENGINE INNODB STATUS"
}
// Version of MySQL from which scraper is available.
func (ScrapeEngineInnodbStatus) Version() float64 {
return 5.1
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeEngineInnodbStatus) Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error {
db := instance.getDB()
rows, err := db.QueryContext(ctx, engineInnodbStatusQuery)
if err != nil {
return err
}
defer rows.Close()
var typeCol, nameCol, statusCol string
// First row should contain the necessary info. If many rows returned then it's unknown case.
if rows.Next() {
if err := rows.Scan(&typeCol, &nameCol, &statusCol); err != nil {
return err
}
}
// 0 queries inside InnoDB, 0 queries in queue
// 0 read views open inside InnoDB
rQueries, _ := regexp.Compile(`(\d+) queries inside InnoDB, (\d+) queries in queue`)
rViews, _ := regexp.Compile(`(\d+) read views open inside InnoDB`)
for _, line := range strings.Split(statusCol, "\n") {
if data := rQueries.FindStringSubmatch(line); data != nil {
value, _ := strconv.ParseFloat(data[1], 64)
ch <- prometheus.MustNewConstMetric(
newDesc(innodb, "queries_inside_innodb", "Queries inside InnoDB."),
prometheus.GaugeValue,
value,
)
value, _ = strconv.ParseFloat(data[2], 64)
ch <- prometheus.MustNewConstMetric(
newDesc(innodb, "queries_in_queue", "Queries in queue."),
prometheus.GaugeValue,
value,
)
} else if data := rViews.FindStringSubmatch(line); data != nil {
value, _ := strconv.ParseFloat(data[1], 64)
ch <- prometheus.MustNewConstMetric(
newDesc(innodb, "read_views_open_inside_innodb", "Read views open inside InnoDB."),
prometheus.GaugeValue,
value,
)
}
}
return nil
}
// check interface
var _ Scraper = ScrapeEngineInnodbStatus{}