mirror of
https://github.com/prometheus-community/windows_exporter.git
synced 2025-04-18 19:24:05 +03:00
memory: fix panics if metrics does not exists (#1960)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
This commit is contained in:
parent
88c929ac6f
commit
ecc805f0fa
@ -47,7 +47,11 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
exitCode := run()
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill)
|
||||
|
||||
exitCode := run(ctx, os.Args[1:])
|
||||
|
||||
stop()
|
||||
|
||||
// If we are running as a service, we need to signal the service control manager that we are done.
|
||||
if !IsService {
|
||||
@ -60,9 +64,8 @@ func main() {
|
||||
<-serviceManagerFinishedCh
|
||||
}
|
||||
|
||||
func run() int {
|
||||
func run(ctx context.Context, args []string) int {
|
||||
startTime := time.Now()
|
||||
ctx := context.Background()
|
||||
|
||||
app := kingpin.New("windows_exporter", "A metrics collector for Windows.")
|
||||
|
||||
@ -118,9 +121,9 @@ func run() int {
|
||||
// Initialize collectors before loading and parsing CLI arguments
|
||||
collectors := collector.NewWithFlags(app)
|
||||
|
||||
if err := config.Parse(app, os.Args[1:]); err != nil {
|
||||
if err := config.Parse(app, args); err != nil {
|
||||
//nolint:sloglint // we do not have an logger yet
|
||||
slog.Error("Failed to load configuration",
|
||||
slog.LogAttrs(ctx, slog.LevelError, "Failed to load configuration",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@ -131,8 +134,7 @@ func run() int {
|
||||
|
||||
logger, err := log.New(logConfig)
|
||||
if err != nil {
|
||||
//nolint:sloglint // we do not have an logger yet
|
||||
slog.Error("failed to create logger",
|
||||
logger.LogAttrs(ctx, slog.LevelError, "failed to create logger",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@ -145,8 +147,8 @@ func run() int {
|
||||
logger.InfoContext(ctx, "using configuration file: "+*configFile)
|
||||
}
|
||||
|
||||
if err = setPriorityWindows(logger, os.Getpid(), *processPriority); err != nil {
|
||||
logger.Error("failed to set process priority",
|
||||
if err = setPriorityWindows(ctx, logger, os.Getpid(), *processPriority); err != nil {
|
||||
logger.LogAttrs(ctx, slog.LevelError, "failed to set process priority",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@ -155,7 +157,7 @@ func run() int {
|
||||
|
||||
enabledCollectorList := expandEnabledCollectors(*enabledCollectors)
|
||||
if err := collectors.Enable(enabledCollectorList); err != nil {
|
||||
logger.Error("couldn't enable collectors",
|
||||
logger.LogAttrs(ctx, slog.LevelError, "couldn't enable collectors",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@ -163,9 +165,9 @@ func run() int {
|
||||
}
|
||||
|
||||
// Initialize collectors before loading
|
||||
if err = collectors.Build(logger); err != nil {
|
||||
if err = collectors.Build(ctx, logger); err != nil {
|
||||
for _, err := range utils.SplitError(err) {
|
||||
logger.Error("couldn't initialize collector",
|
||||
logger.LogAttrs(ctx, slog.LevelError, "couldn't initialize collector",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@ -220,17 +222,14 @@ func run() int {
|
||||
close(errCh)
|
||||
}()
|
||||
|
||||
ctx, stop := signal.NotifyContext(ctx, os.Interrupt, os.Kill)
|
||||
defer stop()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Info("Shutting down windows_exporter via kill signal")
|
||||
logger.LogAttrs(ctx, slog.LevelInfo, "Shutting down windows_exporter via kill signal")
|
||||
case <-stopCh:
|
||||
logger.Info("Shutting down windows_exporter via service control")
|
||||
logger.LogAttrs(ctx, slog.LevelInfo, "Shutting down windows_exporter via service control")
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "Failed to start windows_exporter",
|
||||
logger.LogAttrs(ctx, slog.LevelError, "Failed to start windows_exporter",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
@ -241,9 +240,9 @@ func run() int {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_ = server.Shutdown(ctx)
|
||||
_ = server.Shutdown(ctx) //nolint:contextcheck // create a new context for server shutdown
|
||||
|
||||
logger.InfoContext(ctx, "windows_exporter has shut down")
|
||||
logger.LogAttrs(ctx, slog.LevelInfo, "windows_exporter has shut down") //nolint:contextcheck
|
||||
|
||||
return 0
|
||||
}
|
||||
@ -266,7 +265,7 @@ func logCurrentUser(logger *slog.Logger) {
|
||||
}
|
||||
|
||||
// setPriorityWindows sets the priority of the current process to the specified value.
|
||||
func setPriorityWindows(logger *slog.Logger, pid int, priority string) error {
|
||||
func setPriorityWindows(ctx context.Context, logger *slog.Logger, pid int, priority string) error {
|
||||
// Mapping of priority names to uin32 values required by windows.SetPriorityClass.
|
||||
priorityStringToInt := map[string]uint32{
|
||||
"realtime": windows.REALTIME_PRIORITY_CLASS,
|
||||
@ -284,7 +283,7 @@ func setPriorityWindows(logger *slog.Logger, pid int, priority string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.LogAttrs(context.Background(), slog.LevelDebug, "setting process priority to "+priority)
|
||||
logger.LogAttrs(ctx, slog.LevelDebug, "setting process priority to "+priority)
|
||||
|
||||
// https://learn.microsoft.com/en-us/windows/win32/procthread/process-security-and-access-rights
|
||||
handle, err := windows.OpenProcess(
|
||||
|
188
cmd/windows_exporter/main_test.go
Normal file
188
cmd/windows_exporter/main_test.go
Normal file
@ -0,0 +1,188 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//nolint:tparallel
|
||||
func TestRun(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
args []string
|
||||
config string
|
||||
metricsEndpoint string
|
||||
exitCode int
|
||||
}{
|
||||
{
|
||||
name: "default",
|
||||
args: []string{},
|
||||
metricsEndpoint: "http://127.0.0.1:9182/metrics",
|
||||
},
|
||||
{
|
||||
name: "web.listen-address",
|
||||
args: []string{"--web.listen-address=127.0.0.1:8080"},
|
||||
metricsEndpoint: "http://127.0.0.1:8080/metrics",
|
||||
},
|
||||
{
|
||||
name: "web.listen-address",
|
||||
args: []string{"--web.listen-address=127.0.0.1:8081", "--web.listen-address=[::1]:8081"},
|
||||
metricsEndpoint: "http://[::1]:8081/metrics",
|
||||
},
|
||||
{
|
||||
name: "config",
|
||||
args: []string{"--config.file=config.yaml"},
|
||||
config: `{"web":{"listen-address":"127.0.0.1:8082"}}`,
|
||||
metricsEndpoint: "http://127.0.0.1:8082/metrics",
|
||||
},
|
||||
{
|
||||
name: "web.listen-address with config",
|
||||
args: []string{"--config.file=config.yaml", "--web.listen-address=127.0.0.1:8084"},
|
||||
config: `{"web":{"listen-address":"127.0.0.1:8083"}}`,
|
||||
metricsEndpoint: "http://127.0.0.1:8084/metrics",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
if tc.config != "" {
|
||||
// Create a temporary config file.
|
||||
tmpfile, err := os.CreateTemp(t.TempDir(), "config-*.yaml")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, tmpfile.Close())
|
||||
})
|
||||
|
||||
_, err = tmpfile.WriteString(tc.config)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, arg := range tc.args {
|
||||
tc.args[i] = strings.ReplaceAll(arg, "config.yaml", tmpfile.Name())
|
||||
}
|
||||
}
|
||||
|
||||
exitCodeCh := make(chan int)
|
||||
|
||||
var stdout string
|
||||
|
||||
go func() {
|
||||
stdout = captureOutput(t, func() {
|
||||
// Simulate the service control manager signaling that we are done.
|
||||
exitCodeCh <- run(ctx, tc.args)
|
||||
})
|
||||
}()
|
||||
|
||||
t.Cleanup(func() {
|
||||
select {
|
||||
case exitCode := <-exitCodeCh:
|
||||
require.Equal(t, tc.exitCode, exitCode)
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatalf("timed out waiting for exit code, want %d", tc.exitCode)
|
||||
}
|
||||
})
|
||||
|
||||
if tc.exitCode != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
uri, err := url.Parse(tc.metricsEndpoint)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = waitUntilListening(t, "tcp", uri.Host)
|
||||
require.NoError(t, err, "LOGS:\n%s", stdout)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, tc.metricsEndpoint, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err, "LOGS:\n%s", stdout)
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = resp.Body.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotEmpty(t, body)
|
||||
require.Contains(t, string(body), "# HELP windows_exporter_build_info")
|
||||
|
||||
cancel()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func captureOutput(tb testing.TB, f func()) string {
|
||||
tb.Helper()
|
||||
|
||||
orig := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
f()
|
||||
|
||||
os.Stdout = orig
|
||||
|
||||
_ = w.Close()
|
||||
|
||||
out, _ := io.ReadAll(r)
|
||||
|
||||
return string(out)
|
||||
}
|
||||
|
||||
func waitUntilListening(tb testing.TB, network, address string) error {
|
||||
tb.Helper()
|
||||
|
||||
var (
|
||||
conn net.Conn
|
||||
err error
|
||||
)
|
||||
|
||||
for range 10 {
|
||||
conn, err = net.DialTimeout(network, address, 100*time.Millisecond)
|
||||
if err == nil {
|
||||
_ = conn.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if errors.Is(err, windows.Errno(10061)) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("listener not listening: %w", err)
|
||||
}
|
@ -130,13 +130,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DirectoryServices", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DirectoryServices collector: %w", err)
|
||||
}
|
||||
|
||||
c.addressBookOperationsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "address_book_operations_total"),
|
||||
"",
|
||||
@ -511,6 +504,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DirectoryServices", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DirectoryServices collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -82,13 +82,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Certification Authority", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Certification Authority collector: %w", err)
|
||||
}
|
||||
|
||||
c.requestsPerSecond = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "requests_total"),
|
||||
"Total certificate requests processed",
|
||||
@ -168,6 +161,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Certification Authority", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Certification Authority collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -112,13 +112,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "AD FS", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create AD FS collector: %w", err)
|
||||
}
|
||||
|
||||
c.adLoginConnectionFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "ad_login_connection_failures_total"),
|
||||
"Total number of connection failures to an Active Directory domain controller",
|
||||
@ -378,6 +371,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "AD FS", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create AD FS collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
14
internal/collector/cache/cache.go
vendored
14
internal/collector/cache/cache.go
vendored
@ -98,13 +98,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Cache", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Cache collector: %w", err)
|
||||
}
|
||||
|
||||
c.asyncCopyReadsTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "async_copy_reads_total"),
|
||||
"(AsyncCopyReadsTotal)",
|
||||
@ -280,6 +273,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Cache", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Cache collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -89,15 +89,8 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.mu = sync.Mutex{}
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Processor Information", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Processor Information collector: %w", err)
|
||||
}
|
||||
|
||||
c.logicalProcessors = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "logical_processor"),
|
||||
"Total number of logical processors",
|
||||
@ -186,6 +179,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
c.processorRTCValues = map[string]utils.Counter{}
|
||||
c.processorMPerfValues = map[string]utils.Counter{}
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Processor Information", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Processor Information collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -75,18 +75,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name, ThreadCount, NumberOfCores, NumberOfEnabledCore, NumberOfLogicalProcessors FROM Win32_Processor")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQuery = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
c.cpuInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, "", Name),
|
||||
"Labelled CPU information as provided by Win32_Processor",
|
||||
@ -148,6 +136,18 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name, ThreadCount, NumberOfCores, NumberOfEnabledCore, NumberOfLogicalProcessors FROM Win32_Processor")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQuery = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
var dst []miProcessor
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
|
@ -160,29 +160,6 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
|
||||
logger.Info("dfsr collector is in an experimental state! Metrics for this collector have not been tested.")
|
||||
|
||||
var err error
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "connection") {
|
||||
c.perfDataCollectorConnection, err = pdh.NewCollector[perfDataCounterValuesConnection](pdh.CounterTypeRaw, "DFS Replication Connections", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "folder") {
|
||||
c.perfDataCollectorFolder, err = pdh.NewCollector[perfDataCounterValuesFolder](pdh.CounterTypeRaw, "DFS Replicated Folders", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "volume") {
|
||||
c.perfDataCollectorVolume, err = pdh.NewCollector[perfDataCounterValuesVolume](pdh.CounterTypeRaw, "DFS Replication Service Volumes", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// connection
|
||||
c.connectionBandwidthSavingsUsingDFSReplicationTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "connection_bandwidth_savings_using_dfs_replication_bytes_total"),
|
||||
@ -473,13 +450,36 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "connection") {
|
||||
c.perfDataCollectorConnection, err = pdh.NewCollector[perfDataCounterValuesConnection](pdh.CounterTypeRaw, "DFS Replication Connections", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "folder") {
|
||||
c.perfDataCollectorFolder, err = pdh.NewCollector[perfDataCounterValuesFolder](pdh.CounterTypeRaw, "DFS Replicated Folders", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "volume") {
|
||||
c.perfDataCollectorVolume, err = pdh.NewCollector[perfDataCounterValuesVolume](pdh.CounterTypeRaw, "DFS Replication Service Volumes", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect implements the Collector interface.
|
||||
// Sends metric values for each metric to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 3)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "connection") {
|
||||
errs = append(errs, c.collectPDHConnection(ch))
|
||||
|
@ -148,12 +148,79 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorServerMetrics) {
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DHCP Server", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DHCP Server collector: %w", err)
|
||||
}
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorScopeMetrics) {
|
||||
c.scopeInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_info"),
|
||||
"DHCP Scope information",
|
||||
[]string{"name", "superscope_name", "superscope_id", "scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_state"),
|
||||
"DHCP Scope state",
|
||||
[]string{"scope", "state"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesFreeTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free"),
|
||||
"DHCP Scope free addresses",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesFreeOnPartnerServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_partner_server"),
|
||||
"DHCP Scope free addresses on partner server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesFreeOnThisServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_this_server"),
|
||||
"DHCP Scope free addresses on this server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesInUseTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use"),
|
||||
"DHCP Scope addresses in use",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesInUseOnPartnerServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_partner_server"),
|
||||
"DHCP Scope addresses in use on partner server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesInUseOnThisServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_this_server"),
|
||||
"DHCP Scope addresses in use on this server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopePendingOffersTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_pending_offers"),
|
||||
"DHCP Scope pending offers",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeReservedAddressTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_reserved_address"),
|
||||
"DHCP Scope reserved addresses",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorServerMetrics) {
|
||||
c.packetsReceivedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "packets_received_total"),
|
||||
"Total number of packets received by the DHCP server (PacketsReceivedTotal)",
|
||||
@ -304,78 +371,11 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorScopeMetrics) {
|
||||
c.scopeInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_info"),
|
||||
"DHCP Scope information",
|
||||
[]string{"name", "superscope_name", "superscope_id", "scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeState = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_state"),
|
||||
"DHCP Scope state",
|
||||
[]string{"scope", "state"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesFreeTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free"),
|
||||
"DHCP Scope free addresses",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesFreeOnPartnerServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_partner_server"),
|
||||
"DHCP Scope free addresses on partner server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesFreeOnThisServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_free_on_this_server"),
|
||||
"DHCP Scope free addresses on this server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesInUseTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use"),
|
||||
"DHCP Scope addresses in use",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesInUseOnPartnerServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_partner_server"),
|
||||
"DHCP Scope addresses in use on partner server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeAddressesInUseOnThisServerTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_addresses_in_use_on_this_server"),
|
||||
"DHCP Scope addresses in use on this server",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopePendingOffersTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_pending_offers"),
|
||||
"DHCP Scope pending offers",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
|
||||
c.scopeReservedAddressTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "scope_reserved_address"),
|
||||
"DHCP Scope reserved addresses",
|
||||
[]string{"scope"},
|
||||
nil,
|
||||
)
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DHCP Server", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DHCP Server collector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -72,18 +72,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT DeviceID, Model, Caption, Name, Partitions, Size, Status, Availability FROM WIN32_DiskDrive")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQuery = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
c.diskInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "info"),
|
||||
"General drive information",
|
||||
@ -120,6 +108,18 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT DeviceID, Model, Caption, Name, Partitions, Size, Status, Availability FROM WIN32_DiskDrive")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQuery = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
var dst []diskDrive
|
||||
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil {
|
||||
return fmt.Errorf("WMI query failed: %w", err)
|
||||
|
@ -91,13 +91,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DNS", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DNS collector: %w", err)
|
||||
}
|
||||
|
||||
c.zoneTransferRequestsReceived = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_requests_received_total"),
|
||||
"Number of zone transfer requests (AXFR/IXFR) received by the master DNS server",
|
||||
@ -231,6 +224,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DNS", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DNS collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -167,7 +167,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
prometheus.Labels{"version": fmt.Sprintf("%d.%d", c.iisVersion.major, c.iisVersion.minor)},
|
||||
)
|
||||
|
||||
errs := make([]error, 0, 4)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.buildWebService(); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to build Web Service collector: %w", err))
|
||||
@ -247,7 +247,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
1,
|
||||
)
|
||||
|
||||
errs := make([]error, 0, 4)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.collectWebService(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to collect Web Service metrics: %w", err))
|
||||
|
@ -150,13 +150,6 @@ func (c *Collector) Close() error {
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
c.logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "LogicalDisk", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
|
||||
}
|
||||
|
||||
c.information = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "info"),
|
||||
"A metric with a constant '1' value labeled with logical disk information",
|
||||
@ -281,6 +274,13 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "LogicalDisk", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -110,13 +110,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Memory", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Memory collector: %w", err)
|
||||
}
|
||||
|
||||
c.availableBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "available_bytes"),
|
||||
"The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+
|
||||
@ -340,13 +333,20 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Memory", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Memory collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.collectPDH(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting memory metrics: %w", err))
|
||||
@ -390,6 +390,8 @@ func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
|
||||
err := c.perfDataCollector.Collect(&c.perfDataObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect Memory metrics: %w", err)
|
||||
} else if len(c.perfDataObject) == 0 {
|
||||
return fmt.Errorf("failed to collect Memory metrics: %w", types.ErrNoDataUnexpected)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
@ -122,7 +122,7 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
|
||||
c.miSession = miSession
|
||||
|
||||
errs := make([]error, 0, 5)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorCluster) {
|
||||
if err := c.buildCluster(); err != nil {
|
||||
@ -227,7 +227,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
wg.Wait()
|
||||
close(errCh)
|
||||
|
||||
errs := make([]error, 0, 5)
|
||||
errs := make([]error, 0)
|
||||
|
||||
for err := range errCh {
|
||||
errs = append(errs, err)
|
||||
|
@ -74,13 +74,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "MSMQ Queue", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSMQ Queue collector: %w", err)
|
||||
}
|
||||
|
||||
c.bytesInJournalQueue = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "bytes_in_journal_queue"),
|
||||
"Size of queue journal in bytes",
|
||||
@ -106,6 +99,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "MSMQ Queue", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSMQ Queue collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -164,6 +164,14 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
for _, collector := range c.config.CollectorsEnabled {
|
||||
if !slices.Contains([]string{subCollectorMetrics, subCollectorNicInfo}, collector) {
|
||||
return fmt.Errorf("unknown sub collector: %s. Possible values: %s", collector,
|
||||
strings.Join([]string{subCollectorMetrics, subCollectorNicInfo}, ", "),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
c.bytesReceivedTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "bytes_received_total"),
|
||||
"(Network.BytesReceivedPerSec)",
|
||||
@ -286,7 +294,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, subCollectorMetrics) {
|
||||
if err := c.collect(ch); err != nil {
|
||||
|
@ -94,20 +94,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
errs := make([]error, 0, 2)
|
||||
|
||||
c.accessPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccess](pdh.CounterTypeRaw, "NPS Authentication Server", nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create NPS Authentication Server collector: %w", err))
|
||||
}
|
||||
|
||||
c.accountingPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccounting](pdh.CounterTypeRaw, "NPS Accounting Server", nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create NPS Accounting Server collector: %w", err))
|
||||
}
|
||||
|
||||
c.accessAccepts = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "access_accepts"),
|
||||
"(AccessAccepts)",
|
||||
@ -260,13 +246,27 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
errs := make([]error, 0)
|
||||
|
||||
c.accessPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccess](pdh.CounterTypeRaw, "NPS Authentication Server", nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create NPS Authentication Server collector: %w", err))
|
||||
}
|
||||
|
||||
c.accountingPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesAccounting](pdh.CounterTypeRaw, "NPS Accounting Server", nil)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create NPS Accounting Server collector: %w", err))
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.collectAccept(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting NPS accept data: %w", err))
|
||||
|
@ -209,7 +209,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 4)
|
||||
errs := make([]error, 0)
|
||||
|
||||
c.collect(ch)
|
||||
|
||||
|
@ -74,13 +74,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Paging File", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Paging File collector: %w", err)
|
||||
}
|
||||
|
||||
c.pagingLimitBytes = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "limit_bytes"),
|
||||
"Number of bytes that can be stored in the operating system paging files. 0 (zero) indicates that there are no paging files",
|
||||
@ -95,6 +88,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Paging File", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Paging File collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -127,13 +127,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "PhysicalDisk", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create PhysicalDisk collector: %w", err)
|
||||
}
|
||||
|
||||
c.requestsQueued = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "requests_queued"),
|
||||
"The number of requests queued to the disk (PhysicalDisk.CurrentDiskQueueLength)",
|
||||
@ -218,6 +211,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "PhysicalDisk", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create PhysicalDisk collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -126,25 +126,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT Name, Default, PrinterStatus, JobCountSinceLastReset FROM win32_Printer")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQueryPrinter = miQuery
|
||||
|
||||
miQuery, err = mi.NewQuery("SELECT Name, Status FROM win32_PrintJob")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQueryPrinterJobs = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
c.printerJobStatus = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "job_status"),
|
||||
"A counter of printer jobs by status",
|
||||
@ -164,6 +145,25 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
miQuery, err := mi.NewQuery("SELECT Name, Default, PrinterStatus, JobCountSinceLastReset FROM win32_Printer")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQueryPrinter = miQuery
|
||||
|
||||
miQuery, err = mi.NewQuery("SELECT Name, Status FROM win32_PrintJob")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create WMI query: %w", err)
|
||||
}
|
||||
|
||||
c.miQueryPrinterJobs = miQuery
|
||||
c.miSession = miSession
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -102,18 +102,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(*slog.Logger, *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorNetwork, err = pdh.NewCollector[perfDataCounterValuesNetwork](pdh.CounterTypeRaw, "RemoteFX Network", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create RemoteFX Network collector: %w", err)
|
||||
}
|
||||
|
||||
c.perfDataCollectorGraphics, err = pdh.NewCollector[perfDataCounterValuesGraphics](pdh.CounterTypeRaw, "RemoteFX Graphics", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create RemoteFX Graphics collector: %w", err)
|
||||
}
|
||||
|
||||
// net
|
||||
c.baseTCPRTT = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_base_tcp_rtt_seconds"),
|
||||
@ -238,13 +226,27 @@ func (c *Collector) Build(*slog.Logger, *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
var err error
|
||||
|
||||
errs := make([]error, 0)
|
||||
|
||||
c.perfDataCollectorNetwork, err = pdh.NewCollector[perfDataCounterValuesNetwork](pdh.CounterTypeRaw, "RemoteFX Network", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create RemoteFX Network collector: %w", err))
|
||||
}
|
||||
|
||||
c.perfDataCollectorGraphics, err = pdh.NewCollector[perfDataCounterValuesGraphics](pdh.CounterTypeRaw, "RemoteFX Graphics", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to create RemoteFX Graphics collector: %w", err))
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.collectRemoteFXNetworkCount(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting RemoteFX Network metrics: %w", err))
|
||||
|
@ -76,13 +76,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Server Shares", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMB Server Shares collector: %w", err)
|
||||
}
|
||||
|
||||
c.currentOpenFileCount = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "server_shares_current_open_file_count"),
|
||||
"Current total count open files on the SMB Server Share",
|
||||
@ -132,6 +125,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Server Shares", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMB Server Shares collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -91,13 +91,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Client Shares", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMB Client Shares collector: %w", err)
|
||||
}
|
||||
|
||||
// desc creates a new prometheus description
|
||||
desc := func(metricName string, description string, labels []string) *prometheus.Desc {
|
||||
return prometheus.NewDesc(
|
||||
@ -193,6 +186,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
[]string{"server", "share"},
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMB Client Shares", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMB Client Shares collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -157,13 +157,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMTP Server", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMTP Server collector: %w", err)
|
||||
}
|
||||
|
||||
logger.Info("smtp collector is in an experimental state! Metrics for this collector have not been tested.",
|
||||
slog.String("collector", Name),
|
||||
)
|
||||
@ -421,6 +414,13 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "SMTP Server", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMTP Server collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -77,13 +77,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "System", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create System collector: %w", err)
|
||||
}
|
||||
|
||||
c.bootTime = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "boot_time_timestamp_seconds"),
|
||||
"Unix timestamp of system boot time",
|
||||
@ -134,6 +127,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "System", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create System collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -118,18 +118,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "TCPv4", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create TCPv4 collector: %w", err)
|
||||
}
|
||||
|
||||
c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "TCPv6", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create TCPv6 collector: %w", err)
|
||||
}
|
||||
|
||||
c.connectionFailures = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "connection_failures_total"),
|
||||
"(TCP.ConnectionFailures)",
|
||||
@ -190,13 +178,25 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
[]string{"af", "state"}, nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "TCPv4", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create TCPv4 collector: %w", err)
|
||||
}
|
||||
|
||||
c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "TCPv6", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create TCPv6 collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "metrics") {
|
||||
if err := c.collect(ch); err != nil {
|
||||
|
@ -133,25 +133,8 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
c.logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
c.connectionBrokerEnabled = isConnectionBrokerServer(miSession)
|
||||
|
||||
if c.connectionBrokerEnabled {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorBroker, err = pdh.NewCollector[perfDataCounterValuesBroker](pdh.CounterTypeRaw, "Remote Desktop Connection Broker Counterset", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Remote Desktop Connection Broker Counterset collector: %w", err)
|
||||
}
|
||||
} else {
|
||||
logger.Debug("host is not a connection broker skipping Connection Broker performance metrics.")
|
||||
}
|
||||
|
||||
c.sessionInfo = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "session_info"),
|
||||
"Terminal Services sessions info",
|
||||
@ -243,8 +226,23 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
if miSession == nil {
|
||||
return errors.New("miSession is nil")
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
c.connectionBrokerEnabled = isConnectionBrokerServer(miSession)
|
||||
|
||||
if c.connectionBrokerEnabled {
|
||||
c.perfDataCollectorBroker, err = pdh.NewCollector[perfDataCounterValuesBroker](pdh.CounterTypeRaw, "Remote Desktop Connection Broker Counterset", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Remote Desktop Connection Broker Counterset collector: %w", err)
|
||||
}
|
||||
} else {
|
||||
logger.Debug("host is not a connection broker skipping Connection Broker performance metrics.")
|
||||
}
|
||||
|
||||
c.hServer, err = wtsapi32.WTSOpenServer("")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open WTS server: %w", err)
|
||||
@ -261,7 +259,7 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 3)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.collectWTSSessions(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting terminal services session infos: %w", err))
|
||||
|
@ -44,7 +44,7 @@ func TestMultipleDirectories(t *testing.T) {
|
||||
})
|
||||
|
||||
collectors := collector.New(map[string]collector.Collector{textfile.Name: textFileCollector})
|
||||
require.NoError(t, collectors.Build(logger))
|
||||
require.NoError(t, collectors.Build(t.Context(), logger))
|
||||
|
||||
metrics := make(chan prometheus.Metric)
|
||||
got := ""
|
||||
@ -81,7 +81,7 @@ func TestDuplicateFileName(t *testing.T) {
|
||||
})
|
||||
|
||||
collectors := collector.New(map[string]collector.Collector{textfile.Name: textFileCollector})
|
||||
require.NoError(t, collectors.Build(logger))
|
||||
require.NoError(t, collectors.Build(t.Context(), logger))
|
||||
|
||||
metrics := make(chan prometheus.Metric)
|
||||
got := ""
|
||||
|
@ -70,13 +70,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Thermal Zone Information", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Thermal Zone Information collector: %w", err)
|
||||
}
|
||||
|
||||
c.temperature = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "temperature_celsius"),
|
||||
"(Temperature)",
|
||||
@ -102,6 +95,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Thermal Zone Information", pdh.InstancesAll)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Thermal Zone Information collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -200,7 +200,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, collectorSystemTime) {
|
||||
if err := c.collectTime(ch); err != nil {
|
||||
|
@ -80,18 +80,6 @@ func (c *Collector) Close() error {
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "UDPv4", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create UDPv4 collector: %w", err)
|
||||
}
|
||||
|
||||
c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "UDPv6", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create UDPv6 collector: %w", err)
|
||||
}
|
||||
|
||||
c.datagramsNoPortTotal = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "datagram_no_port_total"),
|
||||
"Number of received UDP datagrams for which there was no application at the destination port",
|
||||
@ -117,6 +105,18 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
nil,
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector4, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "UDPv4", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create UDPv4 collector: %w", err)
|
||||
}
|
||||
|
||||
c.perfDataCollector6, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "UDPv6", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create UDPv6 collector: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -230,7 +230,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
errs := make([]error, 0)
|
||||
|
||||
if err := c.collectCpu(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting vmware cpu metrics: %w", err))
|
||||
|
@ -18,4 +18,5 @@ import "errors"
|
||||
var (
|
||||
ErrCollectorNotInitialized = errors.New("collector not initialized")
|
||||
ErrNoData = errors.New("no data")
|
||||
ErrNoDataUnexpected = errors.New("no data")
|
||||
)
|
||||
|
@ -47,7 +47,7 @@ func FuncBenchmarkCollector[C collector.Collector](b *testing.B, name string, co
|
||||
}
|
||||
|
||||
collectors := collector.New(map[string]collector.Collector{name: c})
|
||||
require.NoError(b, collectors.Build(logger))
|
||||
require.NoError(b, collectors.Build(b.Context(), logger))
|
||||
|
||||
metrics := make(chan prometheus.Metric)
|
||||
|
||||
|
@ -201,7 +201,7 @@ func (c *Collection) Enable(enabledCollectors []string) error {
|
||||
// Build To be called by the exporter for collector initialization.
|
||||
// Instead, fail fast, it will try to build all collectors and return all errors.
|
||||
// errors are joined with errors.Join.
|
||||
func (c *Collection) Build(logger *slog.Logger) error {
|
||||
func (c *Collection) Build(ctx context.Context, logger *slog.Logger) error {
|
||||
c.startTime = gotime.Now()
|
||||
|
||||
err := c.initMI()
|
||||
@ -236,7 +236,7 @@ func (c *Collection) Build(logger *slog.Logger) error {
|
||||
errors.Is(err, pdh.NewPdhError(pdh.CstatusNoObject)) ||
|
||||
errors.Is(err, pdh.NewPdhError(pdh.CstatusNoCounter)) ||
|
||||
errors.Is(err, mi.MI_RESULT_INVALID_NAMESPACE) {
|
||||
logger.LogAttrs(context.Background(), slog.LevelWarn, "couldn't initialize collector", slog.Any("err", err))
|
||||
logger.LogAttrs(ctx, slog.LevelWarn, "couldn't initialize collector", slog.Any("err", err))
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -65,7 +65,10 @@
|
||||
},
|
||||
{
|
||||
"customType": "regex",
|
||||
"fileMatch": ["Makefile"],
|
||||
"fileMatch": [
|
||||
"Makefile",
|
||||
"(^|/).+\\.go"
|
||||
],
|
||||
"matchStrings": [
|
||||
"go run (?<depName>\\S+)@(?<currentValue>\\S+)"
|
||||
],
|
||||
|
Loading…
x
Reference in New Issue
Block a user