1
0
mirror of https://github.com/prometheus/mysqld_exporter.git synced 2025-07-09 18:21:43 +03:00
Files
mysqld_exporter/collector/info_schema_innodb_cmp.go
TJ Hoplock be5dc65671 chore!: adopt log/slog, drop go-kit/log (#875)
* chore!: adopt log/slog, drop go-kit/log

Requires: prometheus/common#697

This PR includes:
- linter updates to enable `sloglint` linter
- Go dep updates for prometheus/{client_golang,common,exporter-toolkit}
  libs
- refactorings to adopt log/slog in favor of go-kit/log

The bulk of this PR was automated by the following script which is being
used to aid in converting the various exporters/projects to use slog:

https://gist.github.com/tjhop/49f96fb7ebbe55b12deee0b0312d8434

Builds and passes tests locally with go workspaces and up-to-date main
branch of prometheus/common.

Signed-off-by: TJ Hoplock <t.hoplock@gmail.com>

* build(deps): bump prometheus/common to v0.60.0

Signed-off-by: TJ Hoplock <t.hoplock@gmail.com>

---------

Signed-off-by: TJ Hoplock <t.hoplock@gmail.com>
2024-10-10 18:04:21 +02:00

111 lines
4.1 KiB
Go

// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Scrape `information_schema.INNODB_CMP`.
package collector
import (
"context"
"log/slog"
"github.com/prometheus/client_golang/prometheus"
)
const innodbCmpQuery = `
SELECT
page_size, compress_ops, compress_ops_ok, compress_time, uncompress_ops, uncompress_time
FROM information_schema.innodb_cmp
`
// Metric descriptors.
var (
infoSchemaInnodbCmpCompressOps = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_total"),
"Number of times a B-tree page of the size PAGE_SIZE has been compressed.",
[]string{"page_size"}, nil,
)
infoSchemaInnodbCmpCompressOpsOk = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_ok_total"),
"Number of times a B-tree page of the size PAGE_SIZE has been successfully compressed.",
[]string{"page_size"}, nil,
)
infoSchemaInnodbCmpCompressTime = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_time_seconds_total"),
"Total time in seconds spent in attempts to compress B-tree pages.",
[]string{"page_size"}, nil,
)
infoSchemaInnodbCmpUncompressOps = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_uncompress_ops_total"),
"Number of times a B-tree page of the size PAGE_SIZE has been uncompressed.",
[]string{"page_size"}, nil,
)
infoSchemaInnodbCmpUncompressTime = prometheus.NewDesc(
prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_uncompress_time_seconds_total"),
"Total time in seconds spent in uncompressing B-tree pages.",
[]string{"page_size"}, nil,
)
)
// ScrapeInnodbCmp collects from `information_schema.innodb_cmp`.
type ScrapeInnodbCmp struct{}
// Name of the Scraper. Should be unique.
func (ScrapeInnodbCmp) Name() string {
return informationSchema + ".innodb_cmp"
}
// Help describes the role of the Scraper.
func (ScrapeInnodbCmp) Help() string {
return "Collect metrics from information_schema.innodb_cmp"
}
// Version of MySQL from which scraper is available.
func (ScrapeInnodbCmp) Version() float64 {
return 5.5
}
// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeInnodbCmp) Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error {
db := instance.getDB()
informationSchemaInnodbCmpRows, err := db.QueryContext(ctx, innodbCmpQuery)
if err != nil {
return err
}
defer informationSchemaInnodbCmpRows.Close()
var (
page_size string
compress_ops, compress_ops_ok, compress_time, uncompress_ops, uncompress_time float64
)
for informationSchemaInnodbCmpRows.Next() {
if err := informationSchemaInnodbCmpRows.Scan(
&page_size, &compress_ops, &compress_ops_ok, &compress_time, &uncompress_ops, &uncompress_time,
); err != nil {
return err
}
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressOps, prometheus.CounterValue, compress_ops, page_size)
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressOpsOk, prometheus.CounterValue, compress_ops_ok, page_size)
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpCompressTime, prometheus.CounterValue, compress_time, page_size)
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpUncompressOps, prometheus.CounterValue, uncompress_ops, page_size)
ch <- prometheus.MustNewConstMetric(infoSchemaInnodbCmpUncompressTime, prometheus.CounterValue, uncompress_time, page_size)
}
return nil
}
// check interface
var _ Scraper = ScrapeInnodbCmp{}