1
0
mirror of https://github.com/prometheus-community/postgres_exporter.git synced 2025-08-05 06:21:12 +03:00

Start building out the packages.

This commit is contained in:
Will Rouesnel
2020-03-01 01:20:42 +11:00
parent 6fcfe4041a
commit 3ccdfc0777
20 changed files with 673 additions and 608 deletions

View File

@@ -0,0 +1,11 @@
package servers
import (
"github.com/prometheus/client_golang/prometheus"
"time"
)
type cachedMetrics struct {
metrics []prometheus.Metric
lastScrape time.Time
}

View File

@@ -51,4 +51,4 @@ func loggableDSN(dsn string) string {
}
return pDSN.String()
}
}

113
pkg/servers/pg_setting.go Normal file
View File

@@ -0,0 +1,113 @@
package servers
import (
"fmt"
"github.com/wrouesnel/postgres_exporter/pkg/queries/metricmaps"
"math"
"strconv"
"strings"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
// pgSetting is represents a PostgreSQL runtime variable as returned by the
// pg_settings view.
type pgSetting struct {
name, setting, unit, shortDesc, vartype string
}
func (s *pgSetting) metric(labels prometheus.Labels) prometheus.Metric {
var (
err error
name = strings.Replace(s.name, ".", "_", -1)
unit = s.unit // nolint: ineffassign
shortDesc = s.shortDesc
subsystem = "settings"
val float64
)
switch s.vartype {
case "bool":
if s.setting == "on" {
val = 1
}
case "integer", "real":
if val, unit, err = s.normaliseUnit(); err != nil {
// Panic, since we should recognise all units
// and don't want to silently exlude metrics
panic(err)
}
if len(unit) > 0 {
name = fmt.Sprintf("%s_%s", name, unit)
shortDesc = fmt.Sprintf("%s [Units converted to %s.]", shortDesc, unit)
}
default:
// Panic because we got a type we didn't ask for
panic(fmt.Sprintf("Unsupported vartype %q", s.vartype))
}
desc := newDesc(subsystem, name, shortDesc, labels)
return prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, val)
}
// TODO: fix linter override
// nolint: nakedret
func (s *pgSetting) normaliseUnit() (val float64, unit string, err error) {
val, err = strconv.ParseFloat(s.setting, 64)
if err != nil {
return val, unit, fmt.Errorf("Error converting setting %q value %q to float: %s", s.name, s.setting, err)
}
// Units defined in: https://www.postgresql.org/docs/current/static/config-setting.html
switch s.unit {
case "":
return
case "ms", "s", "min", "h", "d":
unit = "seconds"
case "B", "kB", "MB", "GB", "TB", "8kB", "16kB", "32kB", "16MB", "32MB", "64MB":
unit = "bytes"
default:
err = fmt.Errorf("Unknown unit for runtime variable: %q", s.unit)
return
}
// -1 is special, don't modify the value
if val == -1 {
return
}
switch s.unit {
case "ms":
val /= 1000
case "min":
val *= 60
case "h":
val *= 60 * 60
case "d":
val *= 60 * 60 * 24
case "kB":
val *= math.Pow(2, 10)
case "MB":
val *= math.Pow(2, 20)
case "GB":
val *= math.Pow(2, 30)
case "TB":
val *= math.Pow(2, 40)
case "8kB":
val *= math.Pow(2, 13)
case "16kB":
val *= math.Pow(2, 14)
case "32kB":
val *= math.Pow(2, 15)
case "16MB":
val *= math.Pow(2, 24)
case "32MB":
val *= math.Pow(2, 25)
case "64MB":
val *= math.Pow(2, 26)
}
return
}

View File

@@ -0,0 +1,256 @@
// +build !integration
package servers
import (
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
. "gopkg.in/check.v1"
)
type PgSettingSuite struct{}
var _ = Suite(&PgSettingSuite{})
var fixtures = []fixture{
{
p: pgSetting{
name: "seconds_fixture_metric",
setting: "5",
unit: "s",
shortDesc: "Foo foo foo",
vartype: "integer",
},
n: normalised{
val: 5,
unit: "seconds",
err: "",
},
d: `Desc{fqName: "pg_settings_seconds_fixture_metric_seconds", help: "Foo foo foo [Units converted to seconds.]", constLabels: {}, variableLabels: []}`,
v: 5,
},
{
p: pgSetting{
name: "milliseconds_fixture_metric",
setting: "5000",
unit: "ms",
shortDesc: "Foo foo foo",
vartype: "integer",
},
n: normalised{
val: 5,
unit: "seconds",
err: "",
},
d: `Desc{fqName: "pg_settings_milliseconds_fixture_metric_seconds", help: "Foo foo foo [Units converted to seconds.]", constLabels: {}, variableLabels: []}`,
v: 5,
},
{
p: pgSetting{
name: "eight_kb_fixture_metric",
setting: "17",
unit: "8kB",
shortDesc: "Foo foo foo",
vartype: "integer",
},
n: normalised{
val: 139264,
unit: "bytes",
err: "",
},
d: `Desc{fqName: "pg_settings_eight_kb_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`,
v: 139264,
},
{
p: pgSetting{
name: "16_kb_real_fixture_metric",
setting: "3.0",
unit: "16kB",
shortDesc: "Foo foo foo",
vartype: "real",
},
n: normalised{
val: 49152,
unit: "bytes",
err: "",
},
d: `Desc{fqName: "pg_settings_16_kb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`,
v: 49152,
},
{
p: pgSetting{
name: "16_mb_real_fixture_metric",
setting: "3.0",
unit: "16MB",
shortDesc: "Foo foo foo",
vartype: "real",
},
n: normalised{
val: 5.0331648e+07,
unit: "bytes",
err: "",
},
d: `Desc{fqName: "pg_settings_16_mb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`,
v: 5.0331648e+07,
},
{
p: pgSetting{
name: "32_mb_real_fixture_metric",
setting: "3.0",
unit: "32MB",
shortDesc: "Foo foo foo",
vartype: "real",
},
n: normalised{
val: 1.00663296e+08,
unit: "bytes",
err: "",
},
d: `Desc{fqName: "pg_settings_32_mb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`,
v: 1.00663296e+08,
},
{
p: pgSetting{
name: "64_mb_real_fixture_metric",
setting: "3.0",
unit: "64MB",
shortDesc: "Foo foo foo",
vartype: "real",
},
n: normalised{
val: 2.01326592e+08,
unit: "bytes",
err: "",
},
d: `Desc{fqName: "pg_settings_64_mb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`,
v: 2.01326592e+08,
},
{
p: pgSetting{
name: "bool_on_fixture_metric",
setting: "on",
unit: "",
shortDesc: "Foo foo foo",
vartype: "bool",
},
n: normalised{
val: 1,
unit: "",
err: "",
},
d: `Desc{fqName: "pg_settings_bool_on_fixture_metric", help: "Foo foo foo", constLabels: {}, variableLabels: []}`,
v: 1,
},
{
p: pgSetting{
name: "bool_off_fixture_metric",
setting: "off",
unit: "",
shortDesc: "Foo foo foo",
vartype: "bool",
},
n: normalised{
val: 0,
unit: "",
err: "",
},
d: `Desc{fqName: "pg_settings_bool_off_fixture_metric", help: "Foo foo foo", constLabels: {}, variableLabels: []}`,
v: 0,
},
{
p: pgSetting{
name: "special_minus_one_value",
setting: "-1",
unit: "d",
shortDesc: "foo foo foo",
vartype: "integer",
},
n: normalised{
val: -1,
unit: "seconds",
err: "",
},
d: `Desc{fqName: "pg_settings_special_minus_one_value_seconds", help: "foo foo foo [Units converted to seconds.]", constLabels: {}, variableLabels: []}`,
v: -1,
},
{
p: pgSetting{
name: "rds.rds_superuser_reserved_connections",
setting: "2",
unit: "",
shortDesc: "Sets the number of connection slots reserved for rds_superusers.",
vartype: "integer",
},
n: normalised{
val: 2,
unit: "",
err: "",
},
d: `Desc{fqName: "pg_settings_rds_rds_superuser_reserved_connections", help: "Sets the number of connection slots reserved for rds_superusers.", constLabels: {}, variableLabels: []}`,
v: 2,
},
{
p: pgSetting{
name: "unknown_unit",
setting: "10",
unit: "nonexistent",
shortDesc: "foo foo foo",
vartype: "integer",
},
n: normalised{
val: 10,
unit: "",
err: `Unknown unit for runtime variable: "nonexistent"`,
},
},
}
func (s *PgSettingSuite) TestNormaliseUnit(c *C) {
for _, f := range fixtures {
switch f.p.vartype {
case "integer", "real":
val, unit, err := f.p.normaliseUnit()
c.Check(val, Equals, f.n.val)
c.Check(unit, Equals, f.n.unit)
if err == nil {
c.Check("", Equals, f.n.err)
} else {
c.Check(err.Error(), Equals, f.n.err)
}
}
}
}
func (s *PgSettingSuite) TestMetric(c *C) {
defer func() {
if r := recover(); r != nil {
if r.(error).Error() != `Unknown unit for runtime variable: "nonexistent"` {
panic(r)
}
}
}()
for _, f := range fixtures {
d := &dto.Metric{}
m := f.p.metric(prometheus.Labels{})
m.Write(d) // nolint: errcheck
c.Check(m.Desc().String(), Equals, f.d)
c.Check(d.GetGauge().GetValue(), Equals, f.v)
}
}
type normalised struct {
val float64
unit string
err string
}
type fixture struct {
p pgSetting
n normalised
d string
v float64
}

View File

@@ -4,14 +4,17 @@ import (
"database/sql"
"fmt"
"github.com/blang/semver"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/wrouesnel/postgres_exporter/pkg/pgdbconv"
"github.com/wrouesnel/postgres_exporter/pkg/queries/metricmaps"
"sync"
"time"
)
// Server describes a connection to a Postgresql database, and describes the
// metric maps and overrides in-place for it.
// Also it contains metrics map and query overrides.
type Server struct {
db *sql.DB
labels prometheus.Labels
@@ -21,7 +24,7 @@ type Server struct {
// then maps are recalculated.
lastMapVersion semver.Version
// Currently active metric map
metricMap map[string]MetricMapNamespace
metricMap map[string]metricmaps.MetricMapNamespace
// Currently active query overrides
queryOverrides map[string]string
mappingMtx sync.RWMutex
@@ -50,7 +53,7 @@ func NewServer(dsn string, opts ...ServerOpt) (*Server, error) {
db: db,
master: false,
labels: prometheus.Labels{
serverLabelName: fingerprint,
metricmaps.ServerLabelName: fingerprint,
},
metricCache: make(map[string]cachedMetrics),
}
@@ -80,7 +83,7 @@ func (s *Server) Ping() error {
// String returns server's fingerprint.
func (s *Server) String() string {
return s.labels[serverLabelName]
return s.labels[metricmaps.ServerLabelName]
}
// Scrape loads metrics.
@@ -104,9 +107,211 @@ func (s *Server) Scrape(ch chan<- prometheus.Metric, disableSettingsMetrics bool
return err
}
// Query within a namespace mapping and emit metrics. Returns fatal errors if
// the scrape fails, and a slice of errors if they were non-fatal.
func (s *Server) queryNamespaceMapping(namespace string, mapping MetricMapNamespace) ([]prometheus.Metric, []error, error) {
// Check for a query override for this namespace
query, found := s.queryOverrides[namespace]
// Was this query disabled (i.e. nothing sensible can be queried on cu
// version of PostgreSQL?
if query == "" && found {
// Return success (no pertinent data)
return []prometheus.Metric{}, []error{}, nil
}
// Don't fail on a bad scrape of one metric
var rows *sql.Rows
var err error
if !found {
// I've no idea how to avoid this properly at the moment, but this is
// an admin tool so you're not injecting SQL right?
rows, err = s.db.Query(fmt.Sprintf("SELECT * FROM %s;", namespace)) // nolint: gas, safesql
} else {
rows, err = s.db.Query(query) // nolint: safesql
}
if err != nil {
return []prometheus.Metric{}, []error{}, fmt.Errorf("Error running query on database %q: %s %v", s, namespace, err)
}
defer rows.Close() // nolint: errcheck
var columnNames []string
columnNames, err = rows.Columns()
if err != nil {
return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving column list for: ", namespace, err))
}
// Make a lookup map for the column indices
var columnIdx = make(map[string]int, len(columnNames))
for i, n := range columnNames {
columnIdx[n] = i
}
var columnData = make([]interface{}, len(columnNames))
var scanArgs = make([]interface{}, len(columnNames))
for i := range columnData {
scanArgs[i] = &columnData[i]
}
nonfatalErrors := []error{}
metrics := make([]prometheus.Metric, 0)
for rows.Next() {
err = rows.Scan(scanArgs...)
if err != nil {
return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err))
}
// Get the label values for this row.
labels := make([]string, len(mapping.labels))
for idx, label := range mapping.labels {
labels[idx], _ = pgdbconv.DBToString(columnData[columnIdx[label]])
}
// Loop over column names, and match to scan data. Unknown columns
// will be filled with an untyped metric number *if* they can be
// converted to float64s. NULLs are allowed and treated as NaN.
for idx, columnName := range columnNames {
var metric prometheus.Metric
if metricMapping, ok := mapping.columnMappings[columnName]; ok {
// Is this a metricy metric?
if metricMapping.discard {
continue
}
value, ok := pgdbconv.DBToFloat64(columnData[idx])
if !ok {
nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName, columnData[idx])))
continue
}
// Generate the metric
metric = prometheus.MustNewConstMetric(metricMapping.desc, metricMapping.vtype, value, labels...)
} else {
// Unknown metric. Report as untyped if scan to float64 works, else note an error too.
metricLabel := fmt.Sprintf("%s_%s", namespace, columnName)
desc := prometheus.NewDesc(metricLabel, fmt.Sprintf("Unknown metric from %s", namespace), mapping.labels, s.labels)
// Its not an error to fail here, since the values are
// unexpected anyway.
value, ok := pgdbconv.DBToFloat64(columnData[idx])
if !ok {
nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unparseable column type - discarding: ", namespace, columnName, err)))
continue
}
metric = prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, value, labels...)
}
metrics = append(metrics, metric)
}
}
return metrics, nonfatalErrors, nil
}
// Iterate through all the namespace mappings in the exporter and run their
// queries.
func (s *Server) queryNamespaceMappings(ch chan<- prometheus.Metric) map[string]error {
// Return a map of namespace -> errors
namespaceErrors := make(map[string]error)
scrapeStart := time.Now()
for namespace, mapping := range s.metricMap {
log.Debugln("Querying namespace: ", namespace)
if mapping.master && !s.master {
log.Debugln("Query skipped...")
continue
}
scrapeMetric := false
// Check if the metric is cached
s.cacheMtx.Lock()
cachedMetric, found := s.metricCache[namespace]
s.cacheMtx.Unlock()
// If found, check if needs refresh from cache
if found {
if scrapeStart.Sub(cachedMetric.lastScrape).Seconds() > float64(mapping.cacheSeconds) {
scrapeMetric = true
}
} else {
scrapeMetric = true
}
var metrics []prometheus.Metric
var nonFatalErrors []error
var err error
if scrapeMetric {
metrics, nonFatalErrors, err = s.queryNamespaceMapping(namespace, mapping)
} else {
metrics = cachedMetric.metrics
}
// Serious error - a namespace disappeared
if err != nil {
namespaceErrors[namespace] = err
log.Infoln(err)
}
// Non-serious errors - likely version or parsing problems.
if len(nonFatalErrors) > 0 {
for _, err := range nonFatalErrors {
log.Infoln(err.Error())
}
}
// Emit the metrics into the channel
for _, metric := range metrics {
ch <- metric
}
if scrapeMetric {
// Only cache if metric is meaningfully cacheable
if mapping.cacheSeconds > 0 {
s.cacheMtx.Lock()
s.metricCache[namespace] = cachedMetrics{
metrics: metrics,
lastScrape: scrapeStart,
}
s.cacheMtx.Unlock()
}
}
}
return namespaceErrors
}
// AddQueries adds queries to the builtinMetricMaps and queryOverrides maps.
// Added queries do not respect version requirements, because it is assumed
// that the user knows what they are doing with their version of postgres.
func (s *Server) AddQueries() {
}
// Query the pg_settings view containing runtime variables
func (s *Server) querySettings(ch chan<- prometheus.Metric) error {
log.Debugf("Querying pg_setting view on %q", s)
// pg_settings docs: https://www.postgresql.org/docs/current/static/view-pg-settings.html
//
// NOTE: If you add more vartypes here, you must update the supported
// types in normaliseUnit() below
query := "SELECT name, setting, COALESCE(unit, ''), short_desc, vartype FROM pg_settings WHERE vartype IN ('bool', 'integer', 'real');"
rows, err := s.db.Query(query)
if err != nil {
return fmt.Errorf("Error running query on database %q: %s %v", s, metricmaps.ExporterNamespaceLabel, err)
}
defer rows.Close() // nolint: errcheck
for rows.Next() {
setting := &pgSetting{}
err = rows.Scan(&setting.name, &setting.setting, &setting.unit, &setting.shortDesc, &setting.vartype)
if err != nil {
return fmt.Errorf("Error retrieving rows on %q: %s %v", s, metricmaps.ExporterNamespaceLabel, err)
}
ch <- setting.metric(s.labels)
}
return nil
}

View File

@@ -1,5 +1,7 @@
package servers
import "github.com/prometheus/client_golang/prometheus"
type ServerOpt func(*Server)
// ServerWithLabels configures a set of labels.
@@ -9,4 +11,4 @@ func ServerWithLabels(labels prometheus.Labels) ServerOpt {
s.labels[k] = v
}
}
}
}