1
0
mirror of https://github.com/prometheus-community/windows_exporter.git synced 2025-04-18 19:24:05 +03:00

chore: Remove registry based perfdata collector (#1742)

Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
This commit is contained in:
Jan-Otto Kröpke 2024-11-17 21:51:12 +01:00 committed by GitHub
parent 6206b695c6
commit e6a15d4ec4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
213 changed files with 8079 additions and 12405 deletions

1
.gitignore vendored
View File

@ -9,3 +9,4 @@ output/
installer/*.msi
installer/*.wixpdb
local/
!.idea/inspectionProfiles/Project_Default.xml

View File

@ -36,7 +36,7 @@ linters-settings:
gci:
sections:
- prefix(github.com/prometheus-community/windows_exporter/internal/initiate)
- prefix(github.com/prometheus-community/windows_exporter/internal/windowsservice)
- standard # Standard section: captures all standard packages.
- default # Default section: contains all imports that could not be matched to another section type.
custom-order: true

View File

@ -7,7 +7,8 @@ package main
//goland:noinspection GoUnsortedImport
//nolint:gofumpt
import (
"github.com/prometheus-community/windows_exporter/internal/initiate"
// Its important that we do these first so that we can register with the Windows service control ASAP to avoid timeouts.
"github.com/prometheus-community/windows_exporter/internal/windowsservice"
"context"
"errors"
@ -19,7 +20,7 @@ import (
"os/signal"
"os/user"
"runtime"
"sort"
"slices"
"strings"
"time"
@ -28,9 +29,6 @@ import (
"github.com/prometheus-community/windows_exporter/internal/httphandler"
"github.com/prometheus-community/windows_exporter/internal/log"
"github.com/prometheus-community/windows_exporter/internal/log/flag"
"github.com/prometheus-community/windows_exporter/internal/toggle"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus-community/windows_exporter/pkg/collector"
"github.com/prometheus/common/version"
"github.com/prometheus/exporter-toolkit/web"
@ -42,14 +40,14 @@ func main() {
exitCode := run()
// If we are running as a service, we need to signal the service control manager that we are done.
if !initiate.IsService {
if !windowsservice.IsService {
os.Exit(exitCode)
}
initiate.ExitCodeCh <- exitCode
windowsservice.ExitCodeCh <- exitCode
// Wait for the service control manager to signal that we are done.
<-initiate.StopCh
<-windowsservice.StopCh
}
func run() int {
@ -80,11 +78,7 @@ func run() int {
enabledCollectors = app.Flag(
"collectors.enabled",
"Comma-separated list of collectors to use. Use '[defaults]' as a placeholder for all the collectors enabled by default.").
Default(types.DefaultCollectors).String()
printCollectors = app.Flag(
"collectors.print",
"If true, print available collectors and exit.",
).Bool()
Default(collector.DefaultCollectors).String()
timeoutMargin = app.Flag(
"scrape.timeout-margin",
"Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads.",
@ -97,11 +91,6 @@ func run() int {
"process.priority",
"Priority of the exporter process. Higher priorities may improve exporter responsiveness during periods of system load. Can be one of [\"realtime\", \"high\", \"abovenormal\", \"normal\", \"belownormal\", \"low\"]",
).Default("normal").String()
togglePDH = app.Flag(
"perfcounter.engine",
"EXPERIMENTAL: Performance counter engine to use. Can be one of \"pdh\", \"registry\". PDH is in experimental state. This flag will be removed in 0.31.",
).Default("registry").String()
)
logConfig := &log.Config{}
@ -179,18 +168,6 @@ func run() int {
logger.Debug("Logging has Started")
if v, ok := os.LookupEnv("WINDOWS_EXPORTER_PERF_COUNTERS_ENGINE"); ok && v == "pdh" || *togglePDH == "pdh" {
logger.Info("Using performance data helper from PHD.dll for performance counter collection. This is in experimental state.")
toggle.PHDEnabled = true
}
if *printCollectors {
printCollectorsToStdout()
return 0
}
if err = setPriorityWindows(logger, os.Getpid(), *processPriority); err != nil {
logger.Error("failed to set process priority",
slog.Any("err", err),
@ -199,9 +176,11 @@ func run() int {
return 1
}
enabledCollectorList := utils.ExpandEnabledCollectors(*enabledCollectors)
enabledCollectorList := expandEnabledCollectors(*enabledCollectors)
if err := collectors.Enable(enabledCollectorList); err != nil {
logger.Error(err.Error())
logger.Error("Couldn't enable collectors",
slog.Any("err", err),
)
return 1
}
@ -215,14 +194,6 @@ func run() int {
return 1
}
if err = collectors.SetPerfCounterQuery(logger); err != nil {
logger.Error("Couldn't set performance counter query",
slog.Any("err", err),
)
return 1
}
logCurrentUser(logger)
logger.Info("Enabled collectors: " + strings.Join(enabledCollectorList, ", "))
@ -268,7 +239,7 @@ func run() int {
errCh <- err
}
errCh <- nil
close(errCh)
}()
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill)
@ -277,7 +248,7 @@ func run() int {
select {
case <-ctx.Done():
logger.Info("Shutting down windows_exporter via kill signal")
case <-initiate.StopCh:
case <-windowsservice.StopCh:
logger.Info("Shutting down windows_exporter via service control")
case err := <-errCh:
if err != nil {
@ -299,17 +270,6 @@ func run() int {
return 0
}
func printCollectorsToStdout() {
collectorNames := collector.Available()
sort.Strings(collectorNames)
fmt.Println("Available collectors:") //nolint:forbidigo
for _, n := range collectorNames {
fmt.Printf(" - %s\n", n) //nolint:forbidigo
}
}
func logCurrentUser(logger *slog.Logger) {
u, err := user.Current()
if err != nil {
@ -367,3 +327,9 @@ func setPriorityWindows(logger *slog.Logger, pid int, priority string) error {
return nil
}
func expandEnabledCollectors(enabled string) []string {
expanded := strings.ReplaceAll(enabled, "[defaults]", collector.DefaultCollectors)
return slices.Compact(strings.Split(expanded, ","))
}

View File

@ -10,7 +10,6 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/toggle"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -21,11 +20,10 @@ type Config struct{}
var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI Win32_PerfRawData_DirectoryServices_DirectoryServices metrics.
type Collector struct {
config Config
perfDataCollector perfdata.Collector
perfDataCollector *perfdata.Collector
addressBookClientSessions *prometheus.Desc
addressBookOperationsTotal *prometheus.Desc
@ -111,14 +109,8 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
c.perfDataCollector.Close()
}
func (c *Collector) Close() error {
c.perfDataCollector.Close()
return nil
}
@ -273,7 +265,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "DirectoryServices", perfdata.AllInstances, counters)
c.perfDataCollector, err = perfdata.NewCollector("DirectoryServices", perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create DirectoryServices collector: %w", err)
}
@ -657,11 +649,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, _ *slog.Logger, ch chan<- prometheus.Metric) error {
return c.collect(ch)
}
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect DirectoryServices (AD) metrics: %w", err)

View File

@ -1,10 +1,12 @@
//go:build windows
package ad_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/ad"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -205,155 +205,3 @@ const (
_ = "Warning eventlogs since boot"
_ = "Warning events since boot"
)
// Win32_PerfRawData_DirectoryServices_DirectoryServices docs:
// - https://msdn.microsoft.com/en-us/library/ms803980.aspx
type Win32_PerfRawData_DirectoryServices_DirectoryServices struct {
Name string
ABANRPersec uint32
ABBrowsesPersec uint32
ABClientSessions uint32
ABMatchesPersec uint32
ABPropertyReadsPersec uint32
ABProxyLookupsPersec uint32
ABSearchesPersec uint32
ApproximatehighestDNT uint32
ATQEstimatedQueueDelay uint32
ATQOutstandingQueuedRequests uint32
ATQRequestLatency uint32
ATQThreadsLDAP uint32
ATQThreadsOther uint32
ATQThreadsTotal uint32
BasesearchesPersec uint32
DatabaseaddsPersec uint32
DatabasedeletesPersec uint32
DatabasemodifysPersec uint32
DatabaserecyclesPersec uint32
DigestBindsPersec uint32
DRAHighestUSNCommittedHighpart uint64
DRAHighestUSNCommittedLowpart uint64
DRAHighestUSNIssuedHighpart uint64
DRAHighestUSNIssuedLowpart uint64
DRAInboundBytesCompressedBetweenSitesAfterCompressionPersec uint32
DRAInboundBytesCompressedBetweenSitesAfterCompressionSinceBoot uint32
DRAInboundBytesCompressedBetweenSitesBeforeCompressionPersec uint32
DRAInboundBytesCompressedBetweenSitesBeforeCompressionSinceBoot uint32
DRAInboundBytesNotCompressedWithinSitePersec uint32
DRAInboundBytesNotCompressedWithinSiteSinceBoot uint32
DRAInboundBytesTotalPersec uint32
DRAInboundBytesTotalSinceBoot uint32
DRAInboundFullSyncObjectsRemaining uint32
DRAInboundLinkValueUpdatesRemaininginPacket uint32
DRAInboundObjectsAppliedPersec uint32
DRAInboundObjectsFilteredPersec uint32
DRAInboundObjectsPersec uint32
DRAInboundObjectUpdatesRemaininginPacket uint32
DRAInboundPropertiesAppliedPersec uint32
DRAInboundPropertiesFilteredPersec uint32
DRAInboundPropertiesTotalPersec uint32
DRAInboundTotalUpdatesRemaininginPacket uint32
DRAInboundValuesDNsonlyPersec uint32
DRAInboundValuesTotalPersec uint32
DRAOutboundBytesCompressedBetweenSitesAfterCompressionPersec uint32
DRAOutboundBytesCompressedBetweenSitesAfterCompressionSinceBoot uint32
DRAOutboundBytesCompressedBetweenSitesBeforeCompressionPersec uint32
DRAOutboundBytesCompressedBetweenSitesBeforeCompressionSinceBoot uint32
DRAOutboundBytesNotCompressedWithinSitePersec uint32
DRAOutboundBytesNotCompressedWithinSiteSinceBoot uint32
DRAOutboundBytesTotalPersec uint32
DRAOutboundBytesTotalSinceBoot uint32
DRAOutboundObjectsFilteredPersec uint32
DRAOutboundObjectsPersec uint32
DRAOutboundPropertiesPersec uint32
DRAOutboundValuesDNsonlyPersec uint32
DRAOutboundValuesTotalPersec uint32
DRAPendingReplicationOperations uint32
DRAPendingReplicationSynchronizations uint32
DRASyncFailuresonSchemaMismatch uint32
DRASyncRequestsMade uint32
DRASyncRequestsSuccessful uint32
DRAThreadsGettingNCChanges uint32
DRAThreadsGettingNCChangesHoldingSemaphore uint32
DSClientBindsPersec uint32
DSClientNameTranslationsPersec uint32
DSDirectoryReadsPersec uint32
DSDirectorySearchesPersec uint32
DSDirectoryWritesPersec uint32
DSMonitorListSize uint32
DSNameCachehitrate uint32
DSNameCachehitrate_Base uint32
DSNotifyQueueSize uint32
DSPercentReadsfromDRA uint32
DSPercentReadsfromKCC uint32
DSPercentReadsfromLSA uint32
DSPercentReadsfromNSPI uint32
DSPercentReadsfromNTDSAPI uint32
DSPercentReadsfromSAM uint32
DSPercentReadsOther uint32
DSPercentSearchesfromDRA uint32
DSPercentSearchesfromKCC uint32
DSPercentSearchesfromLDAP uint32
DSPercentSearchesfromLSA uint32
DSPercentSearchesfromNSPI uint32
DSPercentSearchesfromNTDSAPI uint32
DSPercentSearchesfromSAM uint32
DSPercentSearchesOther uint32
DSPercentWritesfromDRA uint32
DSPercentWritesfromKCC uint32
DSPercentWritesfromLDAP uint32
DSPercentWritesfromLSA uint32
DSPercentWritesfromNSPI uint32
DSPercentWritesfromNTDSAPI uint32
DSPercentWritesfromSAM uint32
DSPercentWritesOther uint32
DSSearchsuboperationsPersec uint32
DSSecurityDescriptorPropagationsEvents uint32
DSSecurityDescriptorPropagatorAverageExclusionTime uint32
DSSecurityDescriptorPropagatorRuntimeQueue uint32
DSSecurityDescriptorsuboperationsPersec uint32
DSServerBindsPersec uint32
DSServerNameTranslationsPersec uint32
DSThreadsinUse uint32
ExternalBindsPersec uint32
FastBindsPersec uint32
LDAPActiveThreads uint32
LDAPBindTime uint32
LDAPClientSessions uint32
LDAPClosedConnectionsPersec uint32
LDAPNewConnectionsPersec uint32
LDAPNewSSLConnectionsPersec uint32
LDAPSearchesPersec uint32
LDAPSuccessfulBindsPersec uint32
LDAPUDPoperationsPersec uint32
LDAPWritesPersec uint32
LinkValuesCleanedPersec uint32
NegotiatedBindsPersec uint32
NTLMBindsPersec uint32
OnelevelsearchesPersec uint32
PhantomsCleanedPersec uint32
PhantomsVisitedPersec uint32
SAMAccountGroupEvaluationLatency uint32
SAMDisplayInformationQueriesPersec uint32
SAMDomainLocalGroupMembershipEvaluationsPersec uint32
SAMEnumerationsPersec uint32
SAMGCEvaluationsPersec uint32
SAMGlobalGroupMembershipEvaluationsPersec uint32
SAMMachineCreationAttemptsPersec uint32
SAMMembershipChangesPersec uint32
SAMNonTransitiveMembershipEvaluationsPersec uint32
SAMPasswordChangesPersec uint32
SAMResourceGroupEvaluationLatency uint32
SAMSuccessfulComputerCreationsPersecIncludesallrequests uint32
SAMSuccessfulUserCreationsPersec uint32
SAMTransitiveMembershipEvaluationsPersec uint32
SAMUniversalGroupMembershipEvaluationsPersec uint32
SAMUserCreationAttemptsPersec uint32
SimpleBindsPersec uint32
SubtreesearchesPersec uint32
TombstonesGarbageCollectedPersec uint32
TombstonesVisitedPersec uint32
Transitiveoperationsmillisecondsrun uint32
TransitiveoperationsPersec uint32
TransitivesuboperationsPersec uint32
}

View File

@ -10,8 +10,6 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/toggle"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
@ -26,7 +24,7 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
perfDataCollector perfdata.Collector
perfDataCollector *perfdata.Collector
challengeResponseProcessingTime *prometheus.Desc
challengeResponsesPerSecond *prometheus.Desc
@ -63,46 +61,32 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if toggle.IsPDHEnabled() {
return []string{}, nil
}
return []string{"Certification Authority"}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
c.perfDataCollector.Close()
}
func (c *Collector) Close() error {
c.perfDataCollector.Close()
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if toggle.IsPDHEnabled() {
counters := []string{
requestsPerSecond,
requestProcessingTime,
retrievalsPerSecond,
retrievalProcessingTime,
failedRequestsPerSecond,
issuedRequestsPerSecond,
pendingRequestsPerSecond,
requestCryptographicSigningTime,
requestPolicyModuleProcessingTime,
challengeResponsesPerSecond,
challengeResponseProcessingTime,
signedCertificateTimestampListsPerSecond,
signedCertificateTimestampListProcessingTime,
}
var err error
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Certification Authority", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create Certification Authority collector: %w", err)
}
c.perfDataCollector, err = perfdata.NewCollector("Certification Authority", perfdata.InstanceAll, []string{
requestsPerSecond,
requestProcessingTime,
retrievalsPerSecond,
retrievalProcessingTime,
failedRequestsPerSecond,
issuedRequestsPerSecond,
pendingRequestsPerSecond,
requestCryptographicSigningTime,
requestPolicyModuleProcessingTime,
challengeResponsesPerSecond,
challengeResponseProcessingTime,
signedCertificateTimestampListsPerSecond,
signedCertificateTimestampListProcessingTime,
})
if err != nil {
return fmt.Errorf("failed to create Certification Authority collector: %w", err)
}
c.requestsPerSecond = prometheus.NewDesc(
@ -187,128 +171,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
return nil
}
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
if toggle.IsPDHEnabled() {
return c.collectPDH(ch)
}
logger = logger.With(slog.String("collector", Name))
if err := c.collectADCSCounters(ctx, logger, ch); err != nil {
logger.Error("failed collecting ADCS metrics",
slog.Any("err", err),
)
return err
}
return nil
}
func (c *Collector) collectADCSCounters(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
dst := make([]perflibADCS, 0)
if _, ok := ctx.PerfObjects["Certification Authority"]; !ok {
return errors.New("perflib did not contain an entry for Certification Authority")
}
err := v1.UnmarshalObject(ctx.PerfObjects["Certification Authority"], &dst, logger)
if err != nil {
return err
}
if len(dst) == 0 {
return errors.New("perflib query for Certification Authority (ADCS) returned empty result set")
}
for _, d := range dst {
if d.Name == "" {
continue
}
ch <- prometheus.MustNewConstMetric(
c.requestsPerSecond,
prometheus.CounterValue,
d.RequestsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.requestProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(d.RequestProcessingTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.retrievalsPerSecond,
prometheus.CounterValue,
d.RetrievalsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.retrievalProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(d.RetrievalProcessingTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.failedRequestsPerSecond,
prometheus.CounterValue,
d.FailedRequestsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.issuedRequestsPerSecond,
prometheus.CounterValue,
d.IssuedRequestsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.pendingRequestsPerSecond,
prometheus.CounterValue,
d.PendingRequestsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.requestCryptographicSigningTime,
prometheus.GaugeValue,
utils.MilliSecToSec(d.RequestCryptographicSigningTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.requestPolicyModuleProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(d.RequestPolicyModuleProcessingTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.challengeResponsesPerSecond,
prometheus.CounterValue,
d.ChallengeResponsesPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.challengeResponseProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(d.ChallengeResponseProcessingTime),
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.signedCertificateTimestampListsPerSecond,
prometheus.CounterValue,
d.SignedCertificateTimestampListsPerSecond,
d.Name,
)
ch <- prometheus.MustNewConstMetric(
c.signedCertificateTimestampListProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(d.SignedCertificateTimestampListProcessingTime),
d.Name,
)
}
return nil
}
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect Certification Authority (ADCS) metrics: %w", err)

View File

@ -1,10 +1,12 @@
//go:build windows
package adcs_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/adcs"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -1,3 +1,5 @@
//go:build windows
package adcs
const (
@ -15,20 +17,3 @@ const (
signedCertificateTimestampListProcessingTime = "Signed Certificate Timestamp List processing time (ms)"
signedCertificateTimestampListsPerSecond = "Signed Certificate Timestamp Lists/sec"
)
type perflibADCS struct {
Name string
RequestsPerSecond float64 `perflib:"Requests/sec"`
RequestProcessingTime float64 `perflib:"Request processing time (ms)"`
RetrievalsPerSecond float64 `perflib:"Retrievals/sec"`
RetrievalProcessingTime float64 `perflib:"Retrieval processing time (ms)"`
FailedRequestsPerSecond float64 `perflib:"Failed Requests/sec"`
IssuedRequestsPerSecond float64 `perflib:"Issued Requests/sec"`
PendingRequestsPerSecond float64 `perflib:"Pending Requests/sec"`
RequestCryptographicSigningTime float64 `perflib:"Request cryptographic signing time (ms)"`
RequestPolicyModuleProcessingTime float64 `perflib:"Request policy module processing time (ms)"`
ChallengeResponsesPerSecond float64 `perflib:"Challenge Responses/sec"`
ChallengeResponseProcessingTime float64 `perflib:"Challenge Response processing time (ms)"`
SignedCertificateTimestampListsPerSecond float64 `perflib:"Signed Certificate Timestamp Lists/sec"`
SignedCertificateTimestampListProcessingTime float64 `perflib:"Signed Certificate Timestamp List processing time (ms)"`
}

View File

@ -13,8 +13,6 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/toggle"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -28,7 +26,7 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
perfDataCollector perfdata.Collector
perfDataCollector *perfdata.Collector
adLoginConnectionFailures *prometheus.Desc
artifactDBFailures *prometheus.Desc
@ -95,76 +93,62 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if toggle.IsPDHEnabled() {
return []string{}, nil
}
return []string{"AD FS"}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
c.perfDataCollector.Close()
}
func (c *Collector) Close() error {
c.perfDataCollector.Close()
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if toggle.IsPDHEnabled() {
counters := []string{
adLoginConnectionFailures,
certificateAuthentications,
deviceAuthentications,
extranetAccountLockouts,
federatedAuthentications,
passportAuthentications,
passiveRequests,
passwordChangeFailed,
passwordChangeSucceeded,
tokenRequests,
windowsIntegratedAuthentications,
oAuthAuthZRequests,
oAuthClientAuthentications,
oAuthClientAuthenticationFailures,
oAuthClientCredentialRequestFailures,
oAuthClientCredentialRequests,
oAuthClientPrivateKeyJWTAuthenticationFailures,
oAuthClientPrivateKeyJWTAuthentications,
oAuthClientBasicAuthenticationFailures,
oAuthClientBasicAuthentications,
oAuthClientSecretPostAuthenticationFailures,
oAuthClientSecretPostAuthentications,
oAuthClientWindowsAuthenticationFailures,
oAuthClientWindowsAuthentications,
oAuthLogonCertRequestFailures,
oAuthLogonCertTokenRequests,
oAuthPasswordGrantRequestFailures,
oAuthPasswordGrantRequests,
oAuthTokenRequests,
samlPTokenRequests,
ssoAuthenticationFailures,
ssoAuthentications,
wsFedTokenRequests,
wsTrustTokenRequests,
usernamePasswordAuthenticationFailures,
usernamePasswordAuthentications,
externalAuthentications,
externalAuthNFailures,
artifactDBFailures,
avgArtifactDBQueryTime,
configDBFailures,
avgConfigDBQueryTime,
federationMetadataRequests,
}
var err error
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "AD FS", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create AD FS collector: %w", err)
}
c.perfDataCollector, err = perfdata.NewCollector("AD FS", perfdata.InstanceAll, []string{
adLoginConnectionFailures,
certificateAuthentications,
deviceAuthentications,
extranetAccountLockouts,
federatedAuthentications,
passportAuthentications,
passiveRequests,
passwordChangeFailed,
passwordChangeSucceeded,
tokenRequests,
windowsIntegratedAuthentications,
oAuthAuthZRequests,
oAuthClientAuthentications,
oAuthClientAuthenticationFailures,
oAuthClientCredentialRequestFailures,
oAuthClientCredentialRequests,
oAuthClientPrivateKeyJWTAuthenticationFailures,
oAuthClientPrivateKeyJWTAuthentications,
oAuthClientBasicAuthenticationFailures,
oAuthClientBasicAuthentications,
oAuthClientSecretPostAuthenticationFailures,
oAuthClientSecretPostAuthentications,
oAuthClientWindowsAuthenticationFailures,
oAuthClientWindowsAuthentications,
oAuthLogonCertRequestFailures,
oAuthLogonCertTokenRequests,
oAuthPasswordGrantRequestFailures,
oAuthPasswordGrantRequests,
oAuthTokenRequests,
samlPTokenRequests,
ssoAuthenticationFailures,
ssoAuthentications,
wsFedTokenRequests,
wsTrustTokenRequests,
usernamePasswordAuthenticationFailures,
usernamePasswordAuthentications,
externalAuthentications,
externalAuthNFailures,
artifactDBFailures,
avgArtifactDBQueryTime,
configDBFailures,
avgConfigDBQueryTime,
federationMetadataRequests,
})
if err != nil {
return fmt.Errorf("failed to create AD FS collector: %w", err)
}
c.adLoginConnectionFailures = prometheus.NewDesc(
@ -429,286 +413,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
return nil
}
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
if toggle.IsPDHEnabled() {
return c.collectPDH(ch)
}
logger = logger.With(slog.String("collector", Name))
return c.collect(ctx, logger, ch)
}
func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var adfsData []perflibADFS
err := v1.UnmarshalObject(ctx.PerfObjects["AD FS"], &adfsData, logger)
if err != nil {
return err
}
ch <- prometheus.MustNewConstMetric(
c.adLoginConnectionFailures,
prometheus.CounterValue,
adfsData[0].AdLoginConnectionFailures,
)
ch <- prometheus.MustNewConstMetric(
c.certificateAuthentications,
prometheus.CounterValue,
adfsData[0].CertificateAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.deviceAuthentications,
prometheus.CounterValue,
adfsData[0].DeviceAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.extranetAccountLockouts,
prometheus.CounterValue,
adfsData[0].ExtranetAccountLockouts,
)
ch <- prometheus.MustNewConstMetric(
c.federatedAuthentications,
prometheus.CounterValue,
adfsData[0].FederatedAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.passportAuthentications,
prometheus.CounterValue,
adfsData[0].PassportAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.passiveRequests,
prometheus.CounterValue,
adfsData[0].PassiveRequests,
)
ch <- prometheus.MustNewConstMetric(
c.passwordChangeFailed,
prometheus.CounterValue,
adfsData[0].PasswordChangeFailed,
)
ch <- prometheus.MustNewConstMetric(
c.passwordChangeSucceeded,
prometheus.CounterValue,
adfsData[0].PasswordChangeSucceeded,
)
ch <- prometheus.MustNewConstMetric(
c.tokenRequests,
prometheus.CounterValue,
adfsData[0].TokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.windowsIntegratedAuthentications,
prometheus.CounterValue,
adfsData[0].WindowsIntegratedAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthAuthZRequests,
prometheus.CounterValue,
adfsData[0].OAuthAuthZRequests,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientAuthentications,
prometheus.CounterValue,
adfsData[0].OAuthClientAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientAuthenticationsFailures,
prometheus.CounterValue,
adfsData[0].OAuthClientAuthenticationFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientCredentialsRequestFailures,
prometheus.CounterValue,
adfsData[0].OAuthClientCredentialRequestFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientCredentialsRequests,
prometheus.CounterValue,
adfsData[0].OAuthClientCredentialRequests,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientPrivateKeyJwtAuthenticationFailures,
prometheus.CounterValue,
adfsData[0].OAuthClientPrivKeyJWTAuthnFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientPrivateKeyJwtAuthentications,
prometheus.CounterValue,
adfsData[0].OAuthClientPrivKeyJWTAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientSecretBasicAuthenticationFailures,
prometheus.CounterValue,
adfsData[0].OAuthClientBasicAuthnFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientSecretBasicAuthentications,
prometheus.CounterValue,
adfsData[0].OAuthClientBasicAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientSecretPostAuthenticationFailures,
prometheus.CounterValue,
adfsData[0].OAuthClientSecretPostAuthnFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientSecretPostAuthentications,
prometheus.CounterValue,
adfsData[0].OAuthClientSecretPostAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientWindowsIntegratedAuthenticationFailures,
prometheus.CounterValue,
adfsData[0].OAuthClientWindowsAuthnFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthClientWindowsIntegratedAuthentications,
prometheus.CounterValue,
adfsData[0].OAuthClientWindowsAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthLogonCertificateRequestFailures,
prometheus.CounterValue,
adfsData[0].OAuthLogonCertRequestFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthLogonCertificateTokenRequests,
prometheus.CounterValue,
adfsData[0].OAuthLogonCertTokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthPasswordGrantRequestFailures,
prometheus.CounterValue,
adfsData[0].OAuthPasswordGrantRequestFailures,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthPasswordGrantRequests,
prometheus.CounterValue,
adfsData[0].OAuthPasswordGrantRequests,
)
ch <- prometheus.MustNewConstMetric(
c.oAuthTokenRequests,
prometheus.CounterValue,
adfsData[0].OAuthTokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.samlPTokenRequests,
prometheus.CounterValue,
adfsData[0].SAMLPTokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.ssoAuthenticationFailures,
prometheus.CounterValue,
adfsData[0].SSOAuthenticationFailures,
)
ch <- prometheus.MustNewConstMetric(
c.ssoAuthentications,
prometheus.CounterValue,
adfsData[0].SSOAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.wsFedTokenRequests,
prometheus.CounterValue,
adfsData[0].WSFedTokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.wsTrustTokenRequests,
prometheus.CounterValue,
adfsData[0].WSTrustTokenRequests,
)
ch <- prometheus.MustNewConstMetric(
c.upAuthenticationFailures,
prometheus.CounterValue,
adfsData[0].UsernamePasswordAuthnFailures,
)
ch <- prometheus.MustNewConstMetric(
c.upAuthentications,
prometheus.CounterValue,
adfsData[0].UsernamePasswordAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.externalAuthenticationFailures,
prometheus.CounterValue,
adfsData[0].ExternalAuthNFailures,
)
ch <- prometheus.MustNewConstMetric(
c.externalAuthentications,
prometheus.CounterValue,
adfsData[0].ExternalAuthentications,
)
ch <- prometheus.MustNewConstMetric(
c.artifactDBFailures,
prometheus.CounterValue,
adfsData[0].ArtifactDBFailures,
)
ch <- prometheus.MustNewConstMetric(
c.avgArtifactDBQueryTime,
prometheus.CounterValue,
adfsData[0].AvgArtifactDBQueryTime*math.Pow(10, -8),
)
ch <- prometheus.MustNewConstMetric(
c.configDBFailures,
prometheus.CounterValue,
adfsData[0].ConfigDBFailures,
)
ch <- prometheus.MustNewConstMetric(
c.avgConfigDBQueryTime,
prometheus.CounterValue,
adfsData[0].AvgConfigDBQueryTime*math.Pow(10, -8),
)
ch <- prometheus.MustNewConstMetric(
c.federationMetadataRequests,
prometheus.CounterValue,
adfsData[0].FederationMetadataRequests,
)
return nil
}
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
data, err := c.perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect ADFS metrics: %w", err)

View File

@ -1,10 +1,12 @@
//go:build windows
package adfs_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/adfs"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -1,3 +1,5 @@
//go:build windows
package adfs
const (
@ -45,49 +47,3 @@ const (
wsFedTokenRequests = "WS-Fed Token Requests"
wsTrustTokenRequests = "WS-Trust Token Requests"
)
type perflibADFS struct {
AdLoginConnectionFailures float64 `perflib:"AD Login Connection Failures"`
CertificateAuthentications float64 `perflib:"Certificate Authentications"`
DeviceAuthentications float64 `perflib:"Device Authentications"`
ExtranetAccountLockouts float64 `perflib:"Extranet Account Lockouts"`
FederatedAuthentications float64 `perflib:"Federated Authentications"`
PassportAuthentications float64 `perflib:"Microsoft Passport Authentications"`
PassiveRequests float64 `perflib:"Passive Requests"`
PasswordChangeFailed float64 `perflib:"Password Change Failed Requests"`
PasswordChangeSucceeded float64 `perflib:"Password Change Successful Requests"`
TokenRequests float64 `perflib:"Token Requests"`
WindowsIntegratedAuthentications float64 `perflib:"Windows Integrated Authentications"`
OAuthAuthZRequests float64 `perflib:"OAuth AuthZ Requests"`
OAuthClientAuthentications float64 `perflib:"OAuth Client Authentications"`
OAuthClientAuthenticationFailures float64 `perflib:"OAuth Client Authentications Failures"`
OAuthClientCredentialRequestFailures float64 `perflib:"OAuth Client Credentials Request Failures"`
OAuthClientCredentialRequests float64 `perflib:"OAuth Client Credentials Requests"`
OAuthClientPrivKeyJWTAuthnFailures float64 `perflib:"OAuth Client Private Key Jwt Authentication Failures"`
OAuthClientPrivKeyJWTAuthentications float64 `perflib:"OAuth Client Private Key Jwt Authentications"`
OAuthClientBasicAuthnFailures float64 `perflib:"OAuth Client Secret Basic Authentication Failures"`
OAuthClientBasicAuthentications float64 `perflib:"OAuth Client Secret Basic Authentication Requests"`
OAuthClientSecretPostAuthnFailures float64 `perflib:"OAuth Client Secret Post Authentication Failures"`
OAuthClientSecretPostAuthentications float64 `perflib:"OAuth Client Secret Post Authentications"`
OAuthClientWindowsAuthnFailures float64 `perflib:"OAuth Client Windows Integrated Authentication Failures"`
OAuthClientWindowsAuthentications float64 `perflib:"OAuth Client Windows Integrated Authentications"`
OAuthLogonCertRequestFailures float64 `perflib:"OAuth Logon Certificate Request Failures"`
OAuthLogonCertTokenRequests float64 `perflib:"OAuth Logon Certificate Token Requests"`
OAuthPasswordGrantRequestFailures float64 `perflib:"OAuth Password Grant Request Failures"`
OAuthPasswordGrantRequests float64 `perflib:"OAuth Password Grant Requests"`
OAuthTokenRequests float64 `perflib:"OAuth Token Requests"`
SAMLPTokenRequests float64 `perflib:"SAML-P Token Requests"`
SSOAuthenticationFailures float64 `perflib:"SSO Authentication Failures"`
SSOAuthentications float64 `perflib:"SSO Authentications"`
WSFedTokenRequests float64 `perflib:"WS-Fed Token Requests"`
WSTrustTokenRequests float64 `perflib:"WS-Trust Token Requests"`
UsernamePasswordAuthnFailures float64 `perflib:"U/P Authentication Failures"`
UsernamePasswordAuthentications float64 `perflib:"U/P Authentications"`
ExternalAuthentications float64 `perflib:"External Authentications"`
ExternalAuthNFailures float64 `perflib:"External Authentication Failures"`
ArtifactDBFailures float64 `perflib:"Artifact Database Connection Failures"`
AvgArtifactDBQueryTime float64 `perflib:"Average Artifact Database Query Time"`
ConfigDBFailures float64 `perflib:"Configuration Database Connection Failures"`
AvgConfigDBQueryTime float64 `perflib:"Average Config Database Query Time"`
FederationMetadataRequests float64 `perflib:"Federation Metadata Requests"`
}

View File

@ -10,9 +10,6 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/perfdata/perftypes"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/toggle"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -27,7 +24,7 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
perfDataCollector perfdata.Collector
perfDataCollector *perfdata.Collector
asyncCopyReadsTotal *prometheus.Desc
asyncDataMapsTotal *prometheus.Desc
@ -80,62 +77,48 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if toggle.IsPDHEnabled() {
return []string{}, nil
}
return []string{"Cache"}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
c.perfDataCollector.Close()
}
func (c *Collector) Close() error {
c.perfDataCollector.Close()
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if toggle.IsPDHEnabled() {
counters := []string{
asyncCopyReadsTotal,
asyncDataMapsTotal,
asyncFastReadsTotal,
asyncMDLReadsTotal,
asyncPinReadsTotal,
copyReadHitsTotal,
copyReadsTotal,
dataFlushesTotal,
dataFlushPagesTotal,
dataMapHitsPercent,
dataMapPinsTotal,
dataMapsTotal,
dirtyPages,
dirtyPageThreshold,
fastReadNotPossiblesTotal,
fastReadResourceMissesTotal,
fastReadsTotal,
lazyWriteFlushesTotal,
lazyWritePagesTotal,
mdlReadHitsTotal,
mdlReadsTotal,
pinReadHitsTotal,
pinReadsTotal,
readAheadsTotal,
syncCopyReadsTotal,
syncDataMapsTotal,
syncFastReadsTotal,
syncMDLReadsTotal,
syncPinReadsTotal,
}
var err error
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Cache", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create Cache collector: %w", err)
}
c.perfDataCollector, err = perfdata.NewCollector("Cache", perfdata.InstanceAll, []string{
asyncCopyReadsTotal,
asyncDataMapsTotal,
asyncFastReadsTotal,
asyncMDLReadsTotal,
asyncPinReadsTotal,
copyReadHitsTotal,
copyReadsTotal,
dataFlushesTotal,
dataFlushPagesTotal,
dataMapHitsPercent,
dataMapPinsTotal,
dataMapsTotal,
dirtyPages,
dirtyPageThreshold,
fastReadNotPossiblesTotal,
fastReadResourceMissesTotal,
fastReadsTotal,
lazyWriteFlushesTotal,
lazyWritePagesTotal,
mdlReadHitsTotal,
mdlReadsTotal,
pinReadHitsTotal,
pinReadsTotal,
readAheadsTotal,
syncCopyReadsTotal,
syncDataMapsTotal,
syncFastReadsTotal,
syncMDLReadsTotal,
syncPinReadsTotal,
})
if err != nil {
return fmt.Errorf("failed to create Cache collector: %w", err)
}
c.asyncCopyReadsTotal = prometheus.NewDesc(
@ -317,218 +300,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
}
// Collect implements the Collector interface.
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
if toggle.IsPDHEnabled() {
return c.collectPDH(ch)
}
logger = logger.With(slog.String("collector", Name))
if err := c.collect(ctx, logger, ch); err != nil {
logger.Error("failed collecting cache metrics",
slog.Any("err", err),
)
return err
}
return nil
}
func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var dst []perflibCache // Single-instance class, array is required but will have single entry.
if err := v1.UnmarshalObject(ctx.PerfObjects["Cache"], &dst, logger); err != nil {
return err
}
if len(dst) != 1 {
return errors.New("expected single instance of Cache")
}
ch <- prometheus.MustNewConstMetric(
c.asyncCopyReadsTotal,
prometheus.CounterValue,
dst[0].AsyncCopyReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.asyncDataMapsTotal,
prometheus.CounterValue,
dst[0].AsyncDataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.asyncFastReadsTotal,
prometheus.CounterValue,
dst[0].AsyncFastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.asyncMDLReadsTotal,
prometheus.CounterValue,
dst[0].AsyncMDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.asyncPinReadsTotal,
prometheus.CounterValue,
dst[0].AsyncPinReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.copyReadHitsTotal,
prometheus.GaugeValue,
dst[0].CopyReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.copyReadsTotal,
prometheus.CounterValue,
dst[0].CopyReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.dataFlushesTotal,
prometheus.CounterValue,
dst[0].DataFlushesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.dataFlushPagesTotal,
prometheus.CounterValue,
dst[0].DataFlushPagesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.dataMapHitsPercent,
prometheus.GaugeValue,
dst[0].DataMapHitsPercent,
)
ch <- prometheus.MustNewConstMetric(
c.dataMapPinsTotal,
prometheus.CounterValue,
dst[0].DataMapPinsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.dataMapsTotal,
prometheus.CounterValue,
dst[0].DataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.dirtyPages,
prometheus.GaugeValue,
dst[0].DirtyPages,
)
ch <- prometheus.MustNewConstMetric(
c.dirtyPageThreshold,
prometheus.GaugeValue,
dst[0].DirtyPageThreshold,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadNotPossiblesTotal,
prometheus.CounterValue,
dst[0].FastReadNotPossiblesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadResourceMissesTotal,
prometheus.CounterValue,
dst[0].FastReadResourceMissesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.fastReadsTotal,
prometheus.CounterValue,
dst[0].FastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.lazyWriteFlushesTotal,
prometheus.CounterValue,
dst[0].LazyWriteFlushesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.lazyWritePagesTotal,
prometheus.CounterValue,
dst[0].LazyWritePagesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.mdlReadHitsTotal,
prometheus.CounterValue,
dst[0].MDLReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.mdlReadsTotal,
prometheus.CounterValue,
dst[0].MDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.pinReadHitsTotal,
prometheus.CounterValue,
dst[0].PinReadHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.pinReadsTotal,
prometheus.CounterValue,
dst[0].PinReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.readAheadsTotal,
prometheus.CounterValue,
dst[0].ReadAheadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncCopyReadsTotal,
prometheus.CounterValue,
dst[0].SyncCopyReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncDataMapsTotal,
prometheus.CounterValue,
dst[0].SyncDataMapsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncFastReadsTotal,
prometheus.CounterValue,
dst[0].SyncFastReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncMDLReadsTotal,
prometheus.CounterValue,
dst[0].SyncMDLReadsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.syncPinReadsTotal,
prometheus.CounterValue,
dst[0].SyncPinReadsTotal,
)
return nil
}
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
data, err := c.perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect Cache metrics: %w", err)
}
cacheData, ok := data[perftypes.EmptyInstance]
cacheData, ok := data[perfdata.EmptyInstance]
if !ok {
return errors.New("perflib query for Cache returned empty result set")

View File

@ -1,10 +1,12 @@
//go:build windows
package cache_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/cache"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -1,3 +1,5 @@
//go:build windows
package cache
// Perflib "Cache":
@ -33,37 +35,3 @@ const (
syncMDLReadsTotal = "Sync MDL Reads/sec"
syncPinReadsTotal = "Sync Pin Reads/sec"
)
// Perflib "Cache":
// - https://docs.microsoft.com/en-us/previous-versions/aa394267(v=vs.85)
type perflibCache struct {
AsyncCopyReadsTotal float64 `perflib:"Async Copy Reads/sec"`
AsyncDataMapsTotal float64 `perflib:"Async Data Maps/sec"`
AsyncFastReadsTotal float64 `perflib:"Async Fast Reads/sec"`
AsyncMDLReadsTotal float64 `perflib:"Async MDL Reads/sec"`
AsyncPinReadsTotal float64 `perflib:"Async Pin Reads/sec"`
CopyReadHitsTotal float64 `perflib:"Copy Read Hits %"`
CopyReadsTotal float64 `perflib:"Copy Reads/sec"`
DataFlushesTotal float64 `perflib:"Data Flushes/sec"`
DataFlushPagesTotal float64 `perflib:"Data Flush Pages/sec"`
DataMapHitsPercent float64 `perflib:"Data Map Hits %"`
DataMapPinsTotal float64 `perflib:"Data Map Pins/sec"`
DataMapsTotal float64 `perflib:"Data Maps/sec"`
DirtyPages float64 `perflib:"Dirty Pages"`
DirtyPageThreshold float64 `perflib:"Dirty Page Threshold"`
FastReadNotPossiblesTotal float64 `perflib:"Fast Read Not Possibles/sec"`
FastReadResourceMissesTotal float64 `perflib:"Fast Read Resource Misses/sec"`
FastReadsTotal float64 `perflib:"Fast Reads/sec"`
LazyWriteFlushesTotal float64 `perflib:"Lazy Write Flushes/sec"`
LazyWritePagesTotal float64 `perflib:"Lazy Write Pages/sec"`
MDLReadHitsTotal float64 `perflib:"MDL Read Hits %"`
MDLReadsTotal float64 `perflib:"MDL Reads/sec"`
PinReadHitsTotal float64 `perflib:"Pin Read Hits %"`
PinReadsTotal float64 `perflib:"Pin Reads/sec"`
ReadAheadsTotal float64 `perflib:"Read Aheads/sec"`
SyncCopyReadsTotal float64 `perflib:"Sync Copy Reads/sec"`
SyncDataMapsTotal float64 `perflib:"Sync Data Maps/sec"`
SyncFastReadsTotal float64 `perflib:"Sync Fast Reads/sec"`
SyncMDLReadsTotal float64 `perflib:"Sync MDL Reads/sec"`
SyncPinReadsTotal float64 `perflib:"Sync Pin Reads/sec"`
}

View File

@ -11,7 +11,7 @@ import (
"github.com/Microsoft/hcsshim"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata/perftypes"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -26,6 +26,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
logger *slog.Logger
// Presence
containerAvailable *prometheus.Desc
@ -78,15 +80,13 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.logger = logger.With(slog.String("collector", Name))
c.containerAvailable = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "available"),
"Available",
@ -201,28 +201,11 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
if err := c.collect(logger, ch); err != nil {
logger.Error("failed collecting collector metrics",
slog.Any("err", err),
)
return err
}
return nil
}
func (c *Collector) collect(logger *slog.Logger, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
// Types Container is passed to get the containers compute systems only
containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}})
if err != nil {
logger.Error("Err in Getting containers",
slog.Any("err", err),
)
return err
return fmt.Errorf("error in fetching containers: %w", err)
}
count := len(containers)
@ -243,14 +226,14 @@ func (c *Collector) collect(logger *slog.Logger, ch chan<- prometheus.Metric) er
for _, containerDetails := range containers {
containerIdWithPrefix := getContainerIdWithPrefix(containerDetails)
if err = c.collectContainer(logger, ch, containerDetails, containerIdWithPrefix); err != nil {
if err = c.collectContainer(ch, containerDetails, containerIdWithPrefix); err != nil {
if hcsshim.IsNotExist(err) {
logger.Debug("err in fetching container statistics",
c.logger.Debug("err in fetching container statistics",
slog.String("container_id", containerDetails.ID),
slog.Any("err", err),
)
} else {
logger.Error("err in fetching container statistics",
c.logger.Error("err in fetching container statistics",
slog.String("container_id", containerDetails.ID),
slog.Any("err", err),
)
@ -264,7 +247,7 @@ func (c *Collector) collect(logger *slog.Logger, ch chan<- prometheus.Metric) er
containerPrefixes[containerDetails.ID] = containerIdWithPrefix
}
if err = c.collectNetworkMetrics(logger, ch, containerPrefixes); err != nil {
if err = c.collectNetworkMetrics(ch, containerPrefixes); err != nil {
return fmt.Errorf("error in fetching container network statistics: %w", err)
}
@ -275,7 +258,7 @@ func (c *Collector) collect(logger *slog.Logger, ch chan<- prometheus.Metric) er
return nil
}
func (c *Collector) collectContainer(logger *slog.Logger, ch chan<- prometheus.Metric, containerDetails hcsshim.ContainerProperties, containerIdWithPrefix string) error {
func (c *Collector) collectContainer(ch chan<- prometheus.Metric, containerDetails hcsshim.ContainerProperties, containerIdWithPrefix string) error {
container, err := hcsshim.OpenContainer(containerDetails.ID)
if err != nil {
return fmt.Errorf("error in opening container: %w", err)
@ -287,7 +270,7 @@ func (c *Collector) collectContainer(logger *slog.Logger, ch chan<- prometheus.M
}
if err := container.Close(); err != nil {
logger.Error("error in closing container",
c.logger.Error("error in closing container",
slog.Any("err", err),
)
}
@ -325,19 +308,19 @@ func (c *Collector) collectContainer(logger *slog.Logger, ch chan<- prometheus.M
ch <- prometheus.MustNewConstMetric(
c.runtimeTotal,
prometheus.CounterValue,
float64(containerStats.Processor.TotalRuntime100ns)*perftypes.TicksToSecondScaleFactor,
float64(containerStats.Processor.TotalRuntime100ns)*perfdata.TicksToSecondScaleFactor,
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.runtimeUser,
prometheus.CounterValue,
float64(containerStats.Processor.RuntimeUser100ns)*perftypes.TicksToSecondScaleFactor,
float64(containerStats.Processor.RuntimeUser100ns)*perfdata.TicksToSecondScaleFactor,
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
c.runtimeKernel,
prometheus.CounterValue,
float64(containerStats.Processor.RuntimeKernel100ns)*perftypes.TicksToSecondScaleFactor,
float64(containerStats.Processor.RuntimeKernel100ns)*perfdata.TicksToSecondScaleFactor,
containerIdWithPrefix,
)
ch <- prometheus.MustNewConstMetric(
@ -372,24 +355,20 @@ func (c *Collector) collectContainer(logger *slog.Logger, ch chan<- prometheus.M
// With HNSv2, the network stats must be collected from hcsshim.HNSListEndpointRequest.
// Network statistics from the container.Statistics() are providing data only, if HNSv1 is used.
// Ref: https://github.com/prometheus-community/windows_exporter/pull/1218
func (c *Collector) collectNetworkMetrics(logger *slog.Logger, ch chan<- prometheus.Metric, containerPrefixes map[string]string) error {
func (c *Collector) collectNetworkMetrics(ch chan<- prometheus.Metric, containerPrefixes map[string]string) error {
hnsEndpoints, err := hcsshim.HNSListEndpointRequest()
if err != nil {
logger.Warn("Failed to collect network stats for containers")
return err
return fmt.Errorf("error in fetching HNS endpoints: %w", err)
}
if len(hnsEndpoints) == 0 {
logger.Info("No network stats for containers to collect")
return nil
return errors.New("no network stats for containers to collect")
}
for _, endpoint := range hnsEndpoints {
endpointStats, err := hcsshim.GetHNSEndpointStats(endpoint.Id)
if err != nil {
logger.Warn("Failed to collect network stats for interface "+endpoint.Id,
c.logger.Warn("Failed to collect network stats for interface "+endpoint.Id,
slog.Any("err", err),
)
@ -400,7 +379,7 @@ func (c *Collector) collectNetworkMetrics(logger *slog.Logger, ch chan<- prometh
containerIdWithPrefix, ok := containerPrefixes[containerId]
if !ok {
logger.Debug("Failed to collect network stats for container " + containerId)
c.logger.Debug("Failed to collect network stats for container " + containerId)
continue
}

View File

@ -1,10 +1,12 @@
//go:build windows
package container_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/container"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -1,3 +1,5 @@
//go:build windows
package cpu
// Processor performance counters.
@ -26,32 +28,3 @@ const (
processorUtilityRate = "% Processor Utility"
userTimeSeconds = "% User Time"
)
type perflibProcessorInformation struct {
Name string
C1TimeSeconds float64 `perflib:"% C1 Time"`
C2TimeSeconds float64 `perflib:"% C2 Time"`
C3TimeSeconds float64 `perflib:"% C3 Time"`
C1TransitionsTotal float64 `perflib:"C1 Transitions/sec"`
C2TransitionsTotal float64 `perflib:"C2 Transitions/sec"`
C3TransitionsTotal float64 `perflib:"C3 Transitions/sec"`
ClockInterruptsTotal float64 `perflib:"Clock Interrupts/sec"`
DPCsQueuedTotal float64 `perflib:"DPCs Queued/sec"`
DPCTimeSeconds float64 `perflib:"% DPC Time"`
IdleBreakEventsTotal float64 `perflib:"Idle Break Events/sec"`
IdleTimeSeconds float64 `perflib:"% Idle Time"`
InterruptsTotal float64 `perflib:"Interrupts/sec"`
InterruptTimeSeconds float64 `perflib:"% Interrupt Time"`
ParkingStatus float64 `perflib:"Parking Status"`
PerformanceLimitPercent float64 `perflib:"% Performance Limit"`
PriorityTimeSeconds float64 `perflib:"% Priority Time"`
PrivilegedTimeSeconds float64 `perflib:"% Privileged Time"`
PrivilegedUtilitySeconds float64 `perflib:"% Privileged Utility"`
ProcessorFrequencyMHz float64 `perflib:"Processor Frequency"`
ProcessorPerformance float64 `perflib:"% Processor Performance"`
ProcessorMPerf float64 `perflib:"% Processor Performance,secondvalue"`
ProcessorTimeSeconds float64 `perflib:"% Processor Time"`
ProcessorUtilityRate float64 `perflib:"% Processor Utility"`
ProcessorRTC float64 `perflib:"% Processor Utility,secondvalue"`
UserTimeSeconds float64 `perflib:"% User Time"`
}

View File

@ -5,13 +5,10 @@ package cpu
import (
"fmt"
"log/slog"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/toggle"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
@ -26,7 +23,7 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
perfDataCollector perfdata.Collector
perfDataCollector *perfdata.Collector
processorRTCValues map[string]utils.Counter
processorMPerfValues map[string]utils.Counter
@ -67,56 +64,42 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if toggle.IsPDHEnabled() {
return []string{}, nil
}
return []string{"Processor Information"}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
c.perfDataCollector.Close()
}
func (c *Collector) Close() error {
c.perfDataCollector.Close()
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if toggle.IsPDHEnabled() {
counters := []string{
c1TimeSeconds,
c2TimeSeconds,
c3TimeSeconds,
c1TransitionsTotal,
c2TransitionsTotal,
c3TransitionsTotal,
clockInterruptsTotal,
dpcQueuedPerSecond,
dpcTimeSeconds,
idleBreakEventsTotal,
idleTimeSeconds,
interruptsTotal,
interruptTimeSeconds,
parkingStatus,
performanceLimitPercent,
priorityTimeSeconds,
privilegedTimeSeconds,
privilegedUtilitySeconds,
processorFrequencyMHz,
processorPerformance,
processorTimeSeconds,
processorUtilityRate,
userTimeSeconds,
}
var err error
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Processor Information", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create Processor Information collector: %w", err)
}
c.perfDataCollector, err = perfdata.NewCollector("Processor Information", perfdata.InstanceAll, []string{
c1TimeSeconds,
c2TimeSeconds,
c3TimeSeconds,
c1TransitionsTotal,
c2TransitionsTotal,
c3TransitionsTotal,
clockInterruptsTotal,
dpcQueuedPerSecond,
dpcTimeSeconds,
idleBreakEventsTotal,
idleTimeSeconds,
interruptsTotal,
interruptTimeSeconds,
parkingStatus,
performanceLimitPercent,
priorityTimeSeconds,
privilegedTimeSeconds,
privilegedUtilitySeconds,
processorFrequencyMHz,
processorPerformance,
processorTimeSeconds,
processorUtilityRate,
userTimeSeconds,
})
if err != nil {
return fmt.Errorf("failed to create Processor Information collector: %w", err)
}
c.logicalProcessors = prometheus.NewDesc(
@ -235,187 +218,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
return nil
}
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
if toggle.IsPDHEnabled() {
return c.collectPDH(ch)
}
logger = logger.With(slog.String("collector", Name))
return c.collectFull(ctx, logger, ch)
}
func (c *Collector) collectFull(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
data := make([]perflibProcessorInformation, 0)
err := v1.UnmarshalObject(ctx.PerfObjects["Processor Information"], &data, logger)
if err != nil {
return err
}
var coreCount float64
for _, cpu := range data {
if strings.Contains(strings.ToLower(cpu.Name), "_total") {
continue
}
core := cpu.Name
var (
counterProcessorRTCValues utils.Counter
counterProcessorMPerfValues utils.Counter
ok bool
)
if counterProcessorRTCValues, ok = c.processorRTCValues[core]; ok {
counterProcessorRTCValues.AddValue(uint32(cpu.ProcessorRTC))
} else {
counterProcessorRTCValues = utils.NewCounter(uint32(cpu.ProcessorRTC))
}
c.processorRTCValues[core] = counterProcessorRTCValues
if counterProcessorMPerfValues, ok = c.processorMPerfValues[core]; ok {
counterProcessorMPerfValues.AddValue(uint32(cpu.ProcessorMPerf))
} else {
counterProcessorMPerfValues = utils.NewCounter(uint32(cpu.ProcessorMPerf))
}
c.processorMPerfValues[core] = counterProcessorMPerfValues
coreCount++
ch <- prometheus.MustNewConstMetric(
c.cStateSecondsTotal,
prometheus.CounterValue,
cpu.C1TimeSeconds,
core, "c1",
)
ch <- prometheus.MustNewConstMetric(
c.cStateSecondsTotal,
prometheus.CounterValue,
cpu.C2TimeSeconds,
core, "c2",
)
ch <- prometheus.MustNewConstMetric(
c.cStateSecondsTotal,
prometheus.CounterValue,
cpu.C3TimeSeconds,
core, "c3",
)
ch <- prometheus.MustNewConstMetric(
c.timeTotal,
prometheus.CounterValue,
cpu.IdleTimeSeconds,
core, "idle",
)
ch <- prometheus.MustNewConstMetric(
c.timeTotal,
prometheus.CounterValue,
cpu.InterruptTimeSeconds,
core, "interrupt",
)
ch <- prometheus.MustNewConstMetric(
c.timeTotal,
prometheus.CounterValue,
cpu.DPCTimeSeconds,
core, "dpc",
)
ch <- prometheus.MustNewConstMetric(
c.timeTotal,
prometheus.CounterValue,
cpu.PrivilegedTimeSeconds,
core, "privileged",
)
ch <- prometheus.MustNewConstMetric(
c.timeTotal,
prometheus.CounterValue,
cpu.UserTimeSeconds,
core, "user",
)
ch <- prometheus.MustNewConstMetric(
c.interruptsTotal,
prometheus.CounterValue,
cpu.InterruptsTotal,
core,
)
ch <- prometheus.MustNewConstMetric(
c.dpcsTotal,
prometheus.CounterValue,
cpu.DPCsQueuedTotal,
core,
)
ch <- prometheus.MustNewConstMetric(
c.clockInterruptsTotal,
prometheus.CounterValue,
cpu.ClockInterruptsTotal,
core,
)
ch <- prometheus.MustNewConstMetric(
c.idleBreakEventsTotal,
prometheus.CounterValue,
cpu.IdleBreakEventsTotal,
core,
)
ch <- prometheus.MustNewConstMetric(
c.parkingStatus,
prometheus.GaugeValue,
cpu.ParkingStatus,
core,
)
ch <- prometheus.MustNewConstMetric(
c.processorFrequencyMHz,
prometheus.GaugeValue,
cpu.ProcessorFrequencyMHz,
core,
)
ch <- prometheus.MustNewConstMetric(
c.processorPerformance,
prometheus.CounterValue,
cpu.ProcessorPerformance,
core,
)
ch <- prometheus.MustNewConstMetric(
c.processorMPerf,
prometheus.CounterValue,
counterProcessorMPerfValues.Value(),
core,
)
ch <- prometheus.MustNewConstMetric(
c.processorRTC,
prometheus.CounterValue,
counterProcessorRTCValues.Value(),
core,
)
ch <- prometheus.MustNewConstMetric(
c.processorUtility,
prometheus.CounterValue,
cpu.ProcessorUtilityRate,
core,
)
ch <- prometheus.MustNewConstMetric(
c.processorPrivilegedUtility,
prometheus.CounterValue,
cpu.PrivilegedUtilitySeconds,
core,
)
}
ch <- prometheus.MustNewConstMetric(
c.logicalProcessors,
prometheus.GaugeValue,
coreCount,
)
return nil
}
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
data, err := c.perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect Processor Information metrics: %w", err)

View File

@ -6,7 +6,7 @@ import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/cpu"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -56,11 +56,7 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
return nil
}
@ -159,20 +155,7 @@ type miProcessor struct {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
if err := c.collect(ch); err != nil {
logger.Error("failed collecting cpu_info metrics",
slog.Any("err", err),
)
return err
}
return nil
}
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
var dst []miProcessor
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil {
return fmt.Errorf("WMI query failed: %w", err)

View File

@ -1,10 +1,12 @@
//go:build windows
package cpu_info_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/cpu_info"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -53,11 +53,7 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
return nil
}
@ -95,21 +91,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
if err := c.collect(ch); err != nil {
logger.Error("failed collecting cs metrics",
slog.Any("err", err),
)
return err
}
return nil
}
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
// Get systeminfo for number of processors
systemInfo := sysinfoapi.GetSystemInfo()

View File

@ -1,10 +1,12 @@
//go:build windows
package cs_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/cs"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -1,3 +1,5 @@
//go:build windows
package dfsr
const (
@ -40,62 +42,3 @@ const (
usnJournalRecordsAcceptedTotal = "USN Journal Records Accepted"
usnJournalUnreadPercentage = "USN Journal Records Unread Percentage"
)
// PerflibDFSRConnection Perflib: "DFS Replication Service Connections".
type PerflibDFSRConnection struct {
Name string
BandwidthSavingsUsingDFSReplicationTotal float64 `perflib:"Bandwidth Savings Using DFS Replication"`
BytesReceivedTotal float64 `perflib:"Total Bytes Received"`
CompressedSizeOfFilesReceivedTotal float64 `perflib:"Compressed Size of Files Received"`
FilesReceivedTotal float64 `perflib:"Total Files Received"`
RDCBytesReceivedTotal float64 `perflib:"RDC Bytes Received"`
RDCCompressedSizeOfFilesReceivedTotal float64 `perflib:"RDC Compressed Size of Files Received"`
RDCNumberOfFilesReceivedTotal float64 `perflib:"RDC Number of Files Received"`
RDCSizeOfFilesReceivedTotal float64 `perflib:"RDC Size of Files Received"`
SizeOfFilesReceivedTotal float64 `perflib:"Size of Files Received"`
}
// perflibDFSRFolder Perflib: "DFS Replicated Folder".
type perflibDFSRFolder struct {
Name string
BandwidthSavingsUsingDFSReplicationTotal float64 `perflib:"Bandwidth Savings Using DFS Replication"`
CompressedSizeOfFilesReceivedTotal float64 `perflib:"Compressed Size of Files Received"`
ConflictBytesCleanedUpTotal float64 `perflib:"Conflict Bytes Cleaned Up"`
ConflictBytesGeneratedTotal float64 `perflib:"Conflict Bytes Generated"`
ConflictFilesCleanedUpTotal float64 `perflib:"Conflict Files Cleaned Up"`
ConflictFilesGeneratedTotal float64 `perflib:"Conflict Files Generated"`
ConflictFolderCleanupsCompletedTotal float64 `perflib:"Conflict folder Cleanups Completed"`
ConflictSpaceInUse float64 `perflib:"Conflict Space In Use"`
DeletedSpaceInUse float64 `perflib:"Deleted Space In Use"`
DeletedBytesCleanedUpTotal float64 `perflib:"Deleted Bytes Cleaned Up"`
DeletedBytesGeneratedTotal float64 `perflib:"Deleted Bytes Generated"`
DeletedFilesCleanedUpTotal float64 `perflib:"Deleted Files Cleaned Up"`
DeletedFilesGeneratedTotal float64 `perflib:"Deleted Files Generated"`
FileInstallsRetriedTotal float64 `perflib:"File Installs Retried"`
FileInstallsSucceededTotal float64 `perflib:"File Installs Succeeded"`
FilesReceivedTotal float64 `perflib:"Total Files Received"`
RDCBytesReceivedTotal float64 `perflib:"RDC Bytes Received"`
RDCCompressedSizeOfFilesReceivedTotal float64 `perflib:"RDC Compressed Size of Files Received"`
RDCNumberOfFilesReceivedTotal float64 `perflib:"RDC Number of Files Received"`
RDCSizeOfFilesReceivedTotal float64 `perflib:"RDC Size of Files Received"`
SizeOfFilesReceivedTotal float64 `perflib:"Size of Files Received"`
StagingSpaceInUse float64 `perflib:"Staging Space In Use"`
StagingBytesCleanedUpTotal float64 `perflib:"Staging Bytes Cleaned Up"`
StagingBytesGeneratedTotal float64 `perflib:"Staging Bytes Generated"`
StagingFilesCleanedUpTotal float64 `perflib:"Staging Files Cleaned Up"`
StagingFilesGeneratedTotal float64 `perflib:"Staging Files Generated"`
UpdatesDroppedTotal float64 `perflib:"Updates Dropped"`
}
// perflibDFSRVolume Perflib: "DFS Replication Service Volumes".
type perflibDFSRVolume struct {
Name string
DatabaseCommitsTotal float64 `perflib:"Database Commits"`
DatabaseLookupsTotal float64 `perflib:"Database Lookups"`
USNJournalRecordsReadTotal float64 `perflib:"USN Journal Records Read"`
USNJournalRecordsAcceptedTotal float64 `perflib:"USN Journal Records Accepted"`
USNJournalUnreadPercentage float64 `perflib:"USN Journal Records Unread Percentage"`
}

View File

@ -12,8 +12,6 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/toggle"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -32,9 +30,9 @@ var ConfigDefaults = Config{
type Collector struct {
config Config
perfDataCollectorConnection perfdata.Collector
perfDataCollectorFolder perfdata.Collector
perfDataCollectorVolume perfdata.Collector
perfDataCollectorConnection *perfdata.Collector
perfDataCollectorFolder *perfdata.Collector
perfDataCollectorVolume *perfdata.Collector
// connection source
connectionBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc
@ -82,29 +80,6 @@ type Collector struct {
volumeUSNJournalUnreadPercentage *prometheus.Desc
volumeUSNJournalRecordsAcceptedTotal *prometheus.Desc
volumeUSNJournalRecordsReadTotal *prometheus.Desc
// Map of child Collector functions used during collection
dfsrChildCollectors []dfsrCollectorFunc
}
type dfsrCollectorFunc func(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error
// Map Perflib sources to DFSR Collector names
// e.g, volume -> DFS Replication Service Volumes.
func dfsrGetPerfObjectName(collector string) string {
prefix := "DFS "
suffix := ""
switch collector {
case "connection":
suffix = "Replication Connections"
case "folder":
suffix = "Replicated Folders"
case "volume":
suffix = "Replication Service Volumes"
}
return prefix + suffix
}
func New(config *Config) *Collector {
@ -147,35 +122,17 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if toggle.IsPDHEnabled() {
return []string{}, nil
func (c *Collector) Close() error {
if slices.Contains(c.config.CollectorsEnabled, "connection") {
c.perfDataCollectorConnection.Close()
}
// Perflib sources are dynamic, depending on the enabled child collectors
expandedChildCollectors := slices.Compact(c.config.CollectorsEnabled)
perflibDependencies := make([]string, 0, len(expandedChildCollectors))
for _, source := range expandedChildCollectors {
perflibDependencies = append(perflibDependencies, dfsrGetPerfObjectName(source))
if slices.Contains(c.config.CollectorsEnabled, "folder") {
c.perfDataCollectorFolder.Close()
}
return perflibDependencies, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
if slices.Contains(c.config.CollectorsEnabled, "connection") {
c.perfDataCollectorConnection.Close()
}
if slices.Contains(c.config.CollectorsEnabled, "folder") {
c.perfDataCollectorFolder.Close()
}
if slices.Contains(c.config.CollectorsEnabled, "volume") {
c.perfDataCollectorVolume.Close()
}
if slices.Contains(c.config.CollectorsEnabled, "volume") {
c.perfDataCollectorVolume.Close()
}
return nil
@ -186,82 +143,72 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
logger.Info("dfsr collector is in an experimental state! Metrics for this collector have not been tested.")
//nolint:nestif
if toggle.IsPDHEnabled() {
var err error
var err error
if slices.Contains(c.config.CollectorsEnabled, "connection") {
counters := []string{
bandwidthSavingsUsingDFSReplicationTotal,
bytesReceivedTotal,
compressedSizeOfFilesReceivedTotal,
filesReceivedTotal,
rdcBytesReceivedTotal,
rdcCompressedSizeOfFilesReceivedTotal,
rdcNumberOfFilesReceivedTotal,
rdcSizeOfFilesReceivedTotal,
sizeOfFilesReceivedTotal,
}
c.perfDataCollectorConnection, err = perfdata.NewCollector(perfdata.V2, "DFS Replication Connections", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, "folder") {
counters := []string{
bandwidthSavingsUsingDFSReplicationTotal,
compressedSizeOfFilesReceivedTotal,
conflictBytesCleanedUpTotal,
conflictBytesGeneratedTotal,
conflictFilesCleanedUpTotal,
conflictFilesGeneratedTotal,
conflictFolderCleanupsCompletedTotal,
conflictSpaceInUse,
deletedSpaceInUse,
deletedBytesCleanedUpTotal,
deletedBytesGeneratedTotal,
deletedFilesCleanedUpTotal,
deletedFilesGeneratedTotal,
fileInstallsRetriedTotal,
fileInstallsSucceededTotal,
filesReceivedTotal,
rdcBytesReceivedTotal,
rdcCompressedSizeOfFilesReceivedTotal,
rdcNumberOfFilesReceivedTotal,
rdcSizeOfFilesReceivedTotal,
sizeOfFilesReceivedTotal,
stagingSpaceInUse,
stagingBytesCleanedUpTotal,
stagingBytesGeneratedTotal,
stagingFilesCleanedUpTotal,
stagingFilesGeneratedTotal,
updatesDroppedTotal,
}
c.perfDataCollectorFolder, err = perfdata.NewCollector(perfdata.V2, "DFS Replicated Folders", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, "volume") {
counters := []string{
databaseCommitsTotal,
databaseLookupsTotal,
usnJournalRecordsReadTotal,
usnJournalRecordsAcceptedTotal,
usnJournalUnreadPercentage,
}
c.perfDataCollectorVolume, err = perfdata.NewCollector(perfdata.V2, "DFS Replication Service Volumes", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err)
}
if slices.Contains(c.config.CollectorsEnabled, "connection") {
c.perfDataCollectorConnection, err = perfdata.NewCollector("DFS Replication Connections", perfdata.InstanceAll, []string{
bandwidthSavingsUsingDFSReplicationTotal,
bytesReceivedTotal,
compressedSizeOfFilesReceivedTotal,
filesReceivedTotal,
rdcBytesReceivedTotal,
rdcCompressedSizeOfFilesReceivedTotal,
rdcNumberOfFilesReceivedTotal,
rdcSizeOfFilesReceivedTotal,
sizeOfFilesReceivedTotal,
})
if err != nil {
return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, "folder") {
c.perfDataCollectorFolder, err = perfdata.NewCollector("DFS Replicated Folders", perfdata.InstanceAll, []string{
bandwidthSavingsUsingDFSReplicationTotal,
compressedSizeOfFilesReceivedTotal,
conflictBytesCleanedUpTotal,
conflictBytesGeneratedTotal,
conflictFilesCleanedUpTotal,
conflictFilesGeneratedTotal,
conflictFolderCleanupsCompletedTotal,
conflictSpaceInUse,
deletedSpaceInUse,
deletedBytesCleanedUpTotal,
deletedBytesGeneratedTotal,
deletedFilesCleanedUpTotal,
deletedFilesGeneratedTotal,
fileInstallsRetriedTotal,
fileInstallsSucceededTotal,
filesReceivedTotal,
rdcBytesReceivedTotal,
rdcCompressedSizeOfFilesReceivedTotal,
rdcNumberOfFilesReceivedTotal,
rdcSizeOfFilesReceivedTotal,
sizeOfFilesReceivedTotal,
stagingSpaceInUse,
stagingBytesCleanedUpTotal,
stagingBytesGeneratedTotal,
stagingFilesCleanedUpTotal,
stagingFilesGeneratedTotal,
updatesDroppedTotal,
})
if err != nil {
return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, "volume") {
c.perfDataCollectorVolume, err = perfdata.NewCollector("DFS Replication Service Volumes", perfdata.InstanceAll, []string{
databaseCommitsTotal,
databaseLookupsTotal,
usnJournalRecordsReadTotal,
usnJournalRecordsAcceptedTotal,
usnJournalUnreadPercentage,
})
if err != nil {
return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err)
}
}
// connection
c.connectionBandwidthSavingsUsingDFSReplicationTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_bandwidth_savings_using_dfs_replication_bytes_total"),
@ -552,374 +499,12 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
nil,
)
// Perflib sources are dynamic, depending on the enabled child collectors
expandedChildCollectors := slices.Compact(c.config.CollectorsEnabled)
c.dfsrChildCollectors = c.getDFSRChildCollectors(expandedChildCollectors)
return nil
}
// Maps enabled child collectors names to their relevant collection function,
// for use in Collector.Collect().
func (c *Collector) getDFSRChildCollectors(enabledCollectors []string) []dfsrCollectorFunc {
var dfsrCollectors []dfsrCollectorFunc
for _, collector := range enabledCollectors {
switch collector {
case "connection":
dfsrCollectors = append(dfsrCollectors, c.collectConnection)
case "folder":
dfsrCollectors = append(dfsrCollectors, c.collectFolder)
case "volume":
dfsrCollectors = append(dfsrCollectors, c.collectVolume)
}
}
return dfsrCollectors
}
// Collect implements the Collector interface.
// Sends metric values for each metric to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
if toggle.IsPDHEnabled() {
return c.collectPDH(ch)
}
logger = logger.With(slog.String("collector", Name))
for _, fn := range c.dfsrChildCollectors {
err := fn(ctx, logger, ch)
if err != nil {
return err
}
}
return nil
}
func (c *Collector) collectConnection(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var dst []PerflibDFSRConnection
if err := v1.UnmarshalObject(ctx.PerfObjects["DFS Replication Connections"], &dst, logger); err != nil {
return err
}
for _, connection := range dst {
ch <- prometheus.MustNewConstMetric(
c.connectionBandwidthSavingsUsingDFSReplicationTotal,
prometheus.CounterValue,
connection.BandwidthSavingsUsingDFSReplicationTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionBytesReceivedTotal,
prometheus.CounterValue,
connection.BytesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
connection.CompressedSizeOfFilesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionFilesReceivedTotal,
prometheus.CounterValue,
connection.FilesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionRDCBytesReceivedTotal,
prometheus.CounterValue,
connection.RDCBytesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionRDCCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
connection.RDCCompressedSizeOfFilesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionRDCSizeOfFilesReceivedTotal,
prometheus.CounterValue,
connection.RDCSizeOfFilesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionRDCNumberOfFilesReceivedTotal,
prometheus.CounterValue,
connection.RDCNumberOfFilesReceivedTotal,
connection.Name,
)
ch <- prometheus.MustNewConstMetric(
c.connectionSizeOfFilesReceivedTotal,
prometheus.CounterValue,
connection.SizeOfFilesReceivedTotal,
connection.Name,
)
}
return nil
}
func (c *Collector) collectFolder(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var dst []perflibDFSRFolder
if err := v1.UnmarshalObject(ctx.PerfObjects["DFS Replicated Folders"], &dst, logger); err != nil {
return err
}
for _, folder := range dst {
ch <- prometheus.MustNewConstMetric(
c.folderBandwidthSavingsUsingDFSReplicationTotal,
prometheus.CounterValue,
folder.BandwidthSavingsUsingDFSReplicationTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
folder.CompressedSizeOfFilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictBytesCleanedUpTotal,
prometheus.CounterValue,
folder.ConflictBytesCleanedUpTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictBytesGeneratedTotal,
prometheus.CounterValue,
folder.ConflictBytesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictFilesCleanedUpTotal,
prometheus.CounterValue,
folder.ConflictFilesCleanedUpTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictFilesGeneratedTotal,
prometheus.CounterValue,
folder.ConflictFilesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictFolderCleanupsCompletedTotal,
prometheus.CounterValue,
folder.ConflictFolderCleanupsCompletedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderConflictSpaceInUse,
prometheus.GaugeValue,
folder.ConflictSpaceInUse,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderDeletedSpaceInUse,
prometheus.GaugeValue,
folder.DeletedSpaceInUse,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderDeletedBytesCleanedUpTotal,
prometheus.CounterValue,
folder.DeletedBytesCleanedUpTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderDeletedBytesGeneratedTotal,
prometheus.CounterValue,
folder.DeletedBytesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderDeletedFilesCleanedUpTotal,
prometheus.CounterValue,
folder.DeletedFilesCleanedUpTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderDeletedFilesGeneratedTotal,
prometheus.CounterValue,
folder.DeletedFilesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderFileInstallsRetriedTotal,
prometheus.CounterValue,
folder.FileInstallsRetriedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderFileInstallsSucceededTotal,
prometheus.CounterValue,
folder.FileInstallsSucceededTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderFilesReceivedTotal,
prometheus.CounterValue,
folder.FilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderRDCBytesReceivedTotal,
prometheus.CounterValue,
folder.RDCBytesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderRDCCompressedSizeOfFilesReceivedTotal,
prometheus.CounterValue,
folder.RDCCompressedSizeOfFilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderRDCNumberOfFilesReceivedTotal,
prometheus.CounterValue,
folder.RDCNumberOfFilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderRDCSizeOfFilesReceivedTotal,
prometheus.CounterValue,
folder.RDCSizeOfFilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderSizeOfFilesReceivedTotal,
prometheus.CounterValue,
folder.SizeOfFilesReceivedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderStagingSpaceInUse,
prometheus.GaugeValue,
folder.StagingSpaceInUse,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderStagingBytesCleanedUpTotal,
prometheus.CounterValue,
folder.StagingBytesCleanedUpTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderStagingBytesGeneratedTotal,
prometheus.CounterValue,
folder.StagingBytesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderStagingFilesCleanedUpTotal,
prometheus.CounterValue,
folder.StagingFilesCleanedUpTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderStagingFilesGeneratedTotal,
prometheus.CounterValue,
folder.StagingFilesGeneratedTotal,
folder.Name,
)
ch <- prometheus.MustNewConstMetric(
c.folderUpdatesDroppedTotal,
prometheus.CounterValue,
folder.UpdatesDroppedTotal,
folder.Name,
)
}
return nil
}
func (c *Collector) collectVolume(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var dst []perflibDFSRVolume
if err := v1.UnmarshalObject(ctx.PerfObjects["DFS Replication Service Volumes"], &dst, logger); err != nil {
return err
}
for _, volume := range dst {
ch <- prometheus.MustNewConstMetric(
c.volumeDatabaseLookupsTotal,
prometheus.CounterValue,
volume.DatabaseLookupsTotal,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.volumeDatabaseCommitsTotal,
prometheus.CounterValue,
volume.DatabaseCommitsTotal,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.volumeUSNJournalRecordsAcceptedTotal,
prometheus.CounterValue,
volume.USNJournalRecordsAcceptedTotal,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.volumeUSNJournalRecordsReadTotal,
prometheus.CounterValue,
volume.USNJournalRecordsReadTotal,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.volumeUSNJournalUnreadPercentage,
prometheus.GaugeValue,
volume.USNJournalUnreadPercentage,
volume.Name,
)
}
return nil
}
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs := make([]error, 0, 3)
if slices.Contains(c.config.CollectorsEnabled, "connection") {

View File

@ -1,10 +1,12 @@
//go:build windows
package dfsr_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/dfsr"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -1,3 +1,5 @@
//go:build windows
package dhcp
const (
@ -27,34 +29,3 @@ const (
releasesTotal = "Releases/sec"
requestsTotal = "Requests/sec"
)
// represents perflib metrics from the DHCP Server class.
// While the name of a number of perflib metrics would indicate a rate is being returned (E.G. Packets Received/sec),
// perflib instead returns a counter, hence the "Total" suffix in some of the variable names.
type dhcpPerf struct {
AcksTotal float64 `perflib:"Acks/sec"`
ActiveQueueLength float64 `perflib:"Active Queue Length"`
ConflictCheckQueueLength float64 `perflib:"Conflict Check Queue Length"`
DeclinesTotal float64 `perflib:"Declines/sec"`
DeniedDueToMatch float64 `perflib:"Denied due to match."`
DeniedDueToNonMatch float64 `perflib:"Denied due to match."`
DiscoversTotal float64 `perflib:"Discovers/sec"`
DuplicatesDroppedTotal float64 `perflib:"Duplicates Dropped/sec"`
FailoverBndAckReceivedTotal float64 `perflib:"Failover: BndAck received/sec."`
FailoverBndAckSentTotal float64 `perflib:"Failover: BndAck sent/sec."`
FailoverBndUpdDropped float64 `perflib:"Failover: BndUpd Dropped."`
FailoverBndUpdPendingOutboundQueue float64 `perflib:"Failover: BndUpd pending in outbound queue."`
FailoverBndUpdReceivedTotal float64 `perflib:"Failover: BndUpd received/sec."`
FailoverBndUpdSentTotal float64 `perflib:"Failover: BndUpd sent/sec."`
FailoverTransitionsCommunicationInterruptedState float64 `perflib:"Failover: Transitions to COMMUNICATION-INTERRUPTED state."`
FailoverTransitionsPartnerDownState float64 `perflib:"Failover: Transitions to PARTNER-DOWN state."`
FailoverTransitionsRecoverState float64 `perflib:"Failover: Transitions to RECOVER state."`
InformsTotal float64 `perflib:"Informs/sec"`
NacksTotal float64 `perflib:"Nacks/sec"`
OfferQueueLength float64 `perflib:"Offer Queue Length"`
OffersTotal float64 `perflib:"Offers/sec"`
PacketsExpiredTotal float64 `perflib:"Packets Expired/sec"`
PacketsReceivedTotal float64 `perflib:"Packets Received/sec"`
ReleasesTotal float64 `perflib:"Releases/sec"`
RequestsTotal float64 `perflib:"Requests/sec"`
}

View File

@ -10,9 +10,6 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/perfdata/perftypes"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/toggle"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -27,7 +24,7 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
perfDataCollector perfdata.Collector
perfDataCollector *perfdata.Collector
acksTotal *prometheus.Desc
activeQueueLength *prometheus.Desc
@ -76,56 +73,44 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if toggle.IsPDHEnabled() {
return []string{}, nil
}
return []string{"DHCP Server"}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
c.perfDataCollector.Close()
}
func (c *Collector) Close() error {
c.perfDataCollector.Close()
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if toggle.IsPDHEnabled() {
var err error
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "DHCP Server", perfdata.AllInstances, []string{
acksTotal,
activeQueueLength,
conflictCheckQueueLength,
declinesTotal,
deniedDueToMatch,
deniedDueToNonMatch,
discoversTotal,
duplicatesDroppedTotal,
failoverBndAckReceivedTotal,
failoverBndAckSentTotal,
failoverBndUpdDropped,
failoverBndUpdPendingOutboundQueue,
failoverBndUpdReceivedTotal,
failoverBndUpdSentTotal,
failoverTransitionsCommunicationInterruptedState,
failoverTransitionsPartnerDownState,
failoverTransitionsRecoverState,
informsTotal,
nacksTotal,
offerQueueLength,
offersTotal,
packetsExpiredTotal,
packetsReceivedTotal,
releasesTotal,
requestsTotal,
})
if err != nil {
return fmt.Errorf("failed to create DHCP Server collector: %w", err)
}
c.perfDataCollector, err = perfdata.NewCollector("DHCP Server", perfdata.InstanceAll, []string{
acksTotal,
activeQueueLength,
conflictCheckQueueLength,
declinesTotal,
deniedDueToMatch,
deniedDueToNonMatch,
discoversTotal,
duplicatesDroppedTotal,
failoverBndAckReceivedTotal,
failoverBndAckSentTotal,
failoverBndUpdDropped,
failoverBndUpdPendingOutboundQueue,
failoverBndUpdReceivedTotal,
failoverBndUpdSentTotal,
failoverTransitionsCommunicationInterruptedState,
failoverTransitionsPartnerDownState,
failoverTransitionsRecoverState,
informsTotal,
nacksTotal,
offerQueueLength,
offersTotal,
packetsExpiredTotal,
packetsReceivedTotal,
releasesTotal,
requestsTotal,
})
if err != nil {
return fmt.Errorf("failed to create DHCP Server collector: %w", err)
}
c.packetsReceivedTotal = prometheus.NewDesc(
@ -282,183 +267,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
return nil
}
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
if toggle.IsPDHEnabled() {
return c.collectPDH(ch)
}
logger = logger.With(slog.String("collector", Name))
return c.collect(ctx, logger, ch)
}
func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var dhcpPerfs []dhcpPerf
if err := v1.UnmarshalObject(ctx.PerfObjects["DHCP Server"], &dhcpPerfs, logger); err != nil {
return err
}
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedTotal,
prometheus.CounterValue,
dhcpPerfs[0].PacketsReceivedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.duplicatesDroppedTotal,
prometheus.CounterValue,
dhcpPerfs[0].DuplicatesDroppedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.packetsExpiredTotal,
prometheus.CounterValue,
dhcpPerfs[0].PacketsExpiredTotal,
)
ch <- prometheus.MustNewConstMetric(
c.activeQueueLength,
prometheus.GaugeValue,
dhcpPerfs[0].ActiveQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.conflictCheckQueueLength,
prometheus.GaugeValue,
dhcpPerfs[0].ConflictCheckQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.discoversTotal,
prometheus.CounterValue,
dhcpPerfs[0].DiscoversTotal,
)
ch <- prometheus.MustNewConstMetric(
c.offersTotal,
prometheus.CounterValue,
dhcpPerfs[0].OffersTotal,
)
ch <- prometheus.MustNewConstMetric(
c.requestsTotal,
prometheus.CounterValue,
dhcpPerfs[0].RequestsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.informsTotal,
prometheus.CounterValue,
dhcpPerfs[0].InformsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.acksTotal,
prometheus.CounterValue,
dhcpPerfs[0].AcksTotal,
)
ch <- prometheus.MustNewConstMetric(
c.nACKsTotal,
prometheus.CounterValue,
dhcpPerfs[0].NacksTotal,
)
ch <- prometheus.MustNewConstMetric(
c.declinesTotal,
prometheus.CounterValue,
dhcpPerfs[0].DeclinesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.releasesTotal,
prometheus.CounterValue,
dhcpPerfs[0].ReleasesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.offerQueueLength,
prometheus.GaugeValue,
dhcpPerfs[0].OfferQueueLength,
)
ch <- prometheus.MustNewConstMetric(
c.deniedDueToMatch,
prometheus.CounterValue,
dhcpPerfs[0].DeniedDueToMatch,
)
ch <- prometheus.MustNewConstMetric(
c.deniedDueToNonMatch,
prometheus.CounterValue,
dhcpPerfs[0].DeniedDueToNonMatch,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndUpdSentTotal,
prometheus.CounterValue,
dhcpPerfs[0].FailoverBndUpdSentTotal,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndUpdReceivedTotal,
prometheus.CounterValue,
dhcpPerfs[0].FailoverBndUpdReceivedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndAckSentTotal,
prometheus.CounterValue,
dhcpPerfs[0].FailoverBndAckSentTotal,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndAckReceivedTotal,
prometheus.CounterValue,
dhcpPerfs[0].FailoverBndAckReceivedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndUpdPendingOutboundQueue,
prometheus.GaugeValue,
dhcpPerfs[0].FailoverBndUpdPendingOutboundQueue,
)
ch <- prometheus.MustNewConstMetric(
c.failoverTransitionsCommunicationInterruptedState,
prometheus.CounterValue,
dhcpPerfs[0].FailoverTransitionsCommunicationInterruptedState,
)
ch <- prometheus.MustNewConstMetric(
c.failoverTransitionsPartnerDownState,
prometheus.CounterValue,
dhcpPerfs[0].FailoverTransitionsPartnerDownState,
)
ch <- prometheus.MustNewConstMetric(
c.failoverTransitionsRecoverState,
prometheus.CounterValue,
dhcpPerfs[0].FailoverTransitionsRecoverState,
)
ch <- prometheus.MustNewConstMetric(
c.failoverBndUpdDropped,
prometheus.CounterValue,
dhcpPerfs[0].FailoverBndUpdDropped,
)
return nil
}
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect DHCP Server metrics: %w", err)
}
data, ok := perfData[perftypes.EmptyInstance]
data, ok := perfData[perfdata.EmptyInstance]
if !ok {
return errors.New("perflib query for DHCP Server returned empty result set")
}

View File

@ -1,10 +1,12 @@
//go:build windows
package dhcp_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/dhcp"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -53,11 +53,7 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
return nil
}
@ -113,7 +109,7 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
return nil
}
type win32_DiskDrive struct {
type diskDrive struct {
DeviceID string `mi:"DeviceID"`
Model string `mi:"Model"`
Size uint64 `mi:"Size"`
@ -166,21 +162,8 @@ var (
)
// Collect sends the metric values for each metric to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
if err := c.collect(ch); err != nil {
logger.Error("failed collecting disk_drive_info metrics",
slog.Any("err", err),
)
return err
}
return nil
}
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
var dst []win32_DiskDrive
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
var dst []diskDrive
if err := c.miSession.Query(&dst, mi.NamespaceRootCIMv2, c.miQuery); err != nil {
return fmt.Errorf("WMI query failed: %w", err)
}
@ -190,14 +173,16 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
}
for _, disk := range dst {
distName := strings.Trim(disk.Name, `\.`)
ch <- prometheus.MustNewConstMetric(
c.diskInfo,
prometheus.GaugeValue,
1.0,
strings.Trim(disk.DeviceID, "\\.\\"), //nolint:staticcheck
strings.Trim(disk.DeviceID, `\.`),
strings.TrimRight(disk.Model, " "),
strings.TrimRight(disk.Caption, " "),
strings.TrimRight(disk.Name, "\\.\\"), //nolint:staticcheck
distName,
)
for _, status := range allDiskStatus {
@ -210,7 +195,7 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
c.status,
prometheus.GaugeValue,
isCurrentState,
strings.Trim(disk.Name, "\\.\\"), //nolint:staticcheck
distName,
status,
)
}
@ -219,14 +204,14 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
c.size,
prometheus.GaugeValue,
float64(disk.Size),
strings.Trim(disk.Name, "\\.\\"), //nolint:staticcheck
distName,
)
ch <- prometheus.MustNewConstMetric(
c.partitions,
prometheus.GaugeValue,
float64(disk.Partitions),
strings.Trim(disk.Name, "\\.\\"), //nolint:staticcheck
distName,
)
for availNum, val := range availMap {
@ -238,7 +223,7 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
c.availability,
prometheus.GaugeValue,
isCurrentState,
strings.Trim(disk.Name, "\\.\\"), //nolint:staticcheck
distName,
val,
)
}

View File

@ -1,10 +1,12 @@
//go:build windows
package diskdrive_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/diskdrive"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -1,3 +1,5 @@
//go:build windows
package dns
const (

View File

@ -10,7 +10,6 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/perfdata/perftypes"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -25,7 +24,7 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
perfDataCollector perfdata.Collector
perfDataCollector *perfdata.Collector
dynamicUpdatesFailures *prometheus.Desc
dynamicUpdatesQueued *prometheus.Desc
@ -71,11 +70,7 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
c.perfDataCollector.Close()
return nil
@ -84,7 +79,7 @@ func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "DNS", perfdata.AllInstances, []string{
c.perfDataCollector, err = perfdata.NewCollector("DNS", perfdata.InstanceAll, []string{
axfrRequestReceived,
axfrRequestSent,
axfrResponseReceived,
@ -268,13 +263,13 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, _ *slog.Logger, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect DNS metrics: %w", err)
}
data, ok := perfData[perftypes.EmptyInstance]
data, ok := perfData[perfdata.EmptyInstance]
if !ok {
return errors.New("perflib query for DNS returned empty result set")
}

View File

@ -1,10 +1,12 @@
//go:build windows
package dns_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/dns"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -12,7 +12,6 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/toggle"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -54,16 +53,16 @@ var ConfigDefaults = Config{
type Collector struct {
config Config
perfDataCollectorADAccessProcesses perfdata.Collector
perfDataCollectorTransportQueues perfdata.Collector
perfDataCollectorHttpProxy perfdata.Collector
perfDataCollectorActiveSync perfdata.Collector
perfDataCollectorAvailabilityService perfdata.Collector
perfDataCollectorOWA perfdata.Collector
perfDataCollectorAutoDiscover perfdata.Collector
perfDataCollectorWorkloadManagementWorkloads perfdata.Collector
perfDataCollectorRpcClientAccess perfdata.Collector
perfDataCollectorMapiHttpEmsmdb perfdata.Collector
perfDataCollectorADAccessProcesses *perfdata.Collector
perfDataCollectorTransportQueues *perfdata.Collector
perfDataCollectorHttpProxy *perfdata.Collector
perfDataCollectorActiveSync *perfdata.Collector
perfDataCollectorAvailabilityService *perfdata.Collector
perfDataCollectorOWA *perfdata.Collector
perfDataCollectorAutoDiscover *perfdata.Collector
perfDataCollectorWorkloadManagementWorkloads *perfdata.Collector
perfDataCollectorRpcClientAccess *perfdata.Collector
perfDataCollectorMapiHttpEmsmdb *perfdata.Collector
activeMailboxDeliveryQueueLength *prometheus.Desc
activeSyncRequestsPerSec *prometheus.Desc
@ -184,48 +183,27 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if toggle.IsPDHEnabled() {
return []string{}, nil
}
return []string{
"MSExchange ADAccess Processes",
"MSExchangeTransport Queues",
"MSExchange HttpProxy",
"MSExchange ActiveSync",
"MSExchange Availability Service",
"MSExchange OWA",
"MSExchangeAutodiscover",
"MSExchange WorkloadManagement Workloads",
"MSExchange RpcClientAccess",
"MSExchange MapiHttp Emsmdb",
}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if toggle.IsPDHEnabled() {
collectorFuncs := map[string]func() error{
adAccessProcesses: c.buildADAccessProcesses,
transportQueues: c.buildTransportQueues,
httpProxy: c.buildHTTPProxy,
activeSync: c.buildActiveSync,
availabilityService: c.buildAvailabilityService,
outlookWebAccess: c.buildOWA,
autoDiscover: c.buildAutoDiscover,
workloadManagement: c.buildWorkloadManagementWorkloads,
rpcClientAccess: c.buildRPC,
mapiHttpEmsmdb: c.buildMapiHttpEmsmdb,
}
collectorFuncs := map[string]func() error{
adAccessProcesses: c.buildADAccessProcesses,
transportQueues: c.buildTransportQueues,
httpProxy: c.buildHTTPProxy,
activeSync: c.buildActiveSync,
availabilityService: c.buildAvailabilityService,
outlookWebAccess: c.buildOWA,
autoDiscover: c.buildAutoDiscover,
workloadManagement: c.buildWorkloadManagementWorkloads,
rpcClientAccess: c.buildRPC,
mapiHttpEmsmdb: c.buildMapiHttpEmsmdb,
}
for _, collectorName := range c.config.CollectorsEnabled {
if err := collectorFuncs[collectorName](); err != nil {
return err
}
for _, collectorName := range c.config.CollectorsEnabled {
if err := collectorFuncs[collectorName](); err != nil {
return err
}
}
@ -282,13 +260,8 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
}
// Collect collects exchange metrics and sends them to prometheus.
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
if toggle.IsPDHEnabled() {
return c.collectPDH(ch)
}
logger = logger.With(slog.String("collector", Name))
collectorFuncs := map[string]func(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error{
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
collectorFuncs := map[string]func(ch chan<- prometheus.Metric) error{
adAccessProcesses: c.collectADAccessProcesses,
transportQueues: c.collectTransportQueues,
httpProxy: c.collectHTTPProxy,
@ -301,34 +274,6 @@ func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch
mapiHttpEmsmdb: c.collectMapiHttpEmsmdb,
}
for _, collectorName := range c.config.CollectorsEnabled {
if err := collectorFuncs[collectorName](ctx, logger, ch); err != nil {
logger.Error("Error in "+collectorName,
slog.Any("err", err),
)
return err
}
}
return nil
}
// Collect collects exchange metrics and sends them to prometheus.
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
collectorFuncs := map[string]func(ch chan<- prometheus.Metric) error{
adAccessProcesses: c.collectPDHADAccessProcesses,
transportQueues: c.collectPDHTransportQueues,
httpProxy: c.collectPDHHTTPProxy,
activeSync: c.collectPDHActiveSync,
availabilityService: c.collectPDHAvailabilityService,
outlookWebAccess: c.collectPDHOWA,
autoDiscover: c.collectPDHAutoDiscover,
workloadManagement: c.collectPDHWorkloadManagementWorkloads,
rpcClientAccess: c.collectPDHRPC,
mapiHttpEmsmdb: c.collectPDHMapiHttpEmsmdb,
}
errs := make([]error, len(c.config.CollectorsEnabled))
for i, collectorName := range c.config.CollectorsEnabled {

View File

@ -1,13 +1,12 @@
//go:build windows
package exchange
import (
"errors"
"fmt"
"log/slog"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -17,13 +16,6 @@ const (
syncCommandsPerSec = "Sync Commands/sec"
)
// Perflib: [25138] MSExchange ActiveSync.
type perflibActiveSync struct {
RequestsPerSec float64 `perflib:"Requests/sec"`
PingCommandsPending float64 `perflib:"Ping Commands Pending"`
SyncCommandsPerSec float64 `perflib:"Sync Commands/sec"`
}
func (c *Collector) buildActiveSync() error {
counters := []string{
requestsPerSec,
@ -33,7 +25,7 @@ func (c *Collector) buildActiveSync() error {
var err error
c.perfDataCollectorActiveSync, err = perfdata.NewCollector(perfdata.V2, "MSExchange ActiveSync", perfdata.AllInstances, counters)
c.perfDataCollectorActiveSync, err = perfdata.NewCollector("MSExchange ActiveSync", perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange ActiveSync collector: %w", err)
}
@ -41,35 +33,7 @@ func (c *Collector) buildActiveSync() error {
return nil
}
func (c *Collector) collectActiveSync(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibActiveSync
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchange ActiveSync"], &data, logger); err != nil {
return err
}
for _, instance := range data {
ch <- prometheus.MustNewConstMetric(
c.activeSyncRequestsPerSec,
prometheus.CounterValue,
instance.RequestsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.pingCommandsPending,
prometheus.GaugeValue,
instance.PingCommandsPending,
)
ch <- prometheus.MustNewConstMetric(
c.syncCommandsPerSec,
prometheus.CounterValue,
instance.SyncCommandsPerSec,
)
}
return nil
}
func (c *Collector) collectPDHActiveSync(ch chan<- prometheus.Metric) error {
func (c *Collector) collectActiveSync(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorActiveSync.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange ActiveSync metrics: %w", err)

View File

@ -1,14 +1,12 @@
//go:build windows
package exchange
import (
"errors"
"fmt"
"log/slog"
"strings"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -20,17 +18,6 @@ const (
longRunningLDAPOperationsPerMin = "Long Running LDAP Operations/min"
)
// Perflib: [19108] MSExchange ADAccess Processes.
type perflibADAccessProcesses struct {
Name string
LDAPReadTime float64 `perflib:"LDAP Read Time"`
LDAPSearchTime float64 `perflib:"LDAP Search Time"`
LDAPWriteTime float64 `perflib:"LDAP Write Time"`
LDAPTimeoutErrorsPerSec float64 `perflib:"LDAP Timeout Errors/sec"`
LongRunningLDAPOperationsPerMin float64 `perflib:"Long Running LDAP Operations/min"`
}
func (c *Collector) buildADAccessProcesses() error {
counters := []string{
ldapReadTime,
@ -42,7 +29,7 @@ func (c *Collector) buildADAccessProcesses() error {
var err error
c.perfDataCollectorADAccessProcesses, err = perfdata.NewCollector(perfdata.V2, "MSExchange ADAccess Processes", perfdata.AllInstances, counters)
c.perfDataCollectorADAccessProcesses, err = perfdata.NewCollector("MSExchange ADAccess Processes", perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange ADAccess Processes collector: %w", err)
}
@ -50,63 +37,7 @@ func (c *Collector) buildADAccessProcesses() error {
return nil
}
func (c *Collector) collectADAccessProcesses(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibADAccessProcesses
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchange ADAccess Processes"], &data, logger); err != nil {
return err
}
labelUseCount := make(map[string]int)
for _, proc := range data {
labelName := c.toLabelName(proc.Name)
if strings.HasSuffix(labelName, "_total") {
continue
}
// Since we're not including the PID suffix from the instance names in the label names, we get an occasional duplicate.
// This seems to affect about 4 instances only of this object.
labelUseCount[labelName]++
if labelUseCount[labelName] > 1 {
labelName = fmt.Sprintf("%s_%d", labelName, labelUseCount[labelName])
}
ch <- prometheus.MustNewConstMetric(
c.ldapReadTime,
prometheus.CounterValue,
c.msToSec(proc.LDAPReadTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ldapSearchTime,
prometheus.CounterValue,
c.msToSec(proc.LDAPSearchTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ldapWriteTime,
prometheus.CounterValue,
c.msToSec(proc.LDAPWriteTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ldapTimeoutErrorsPerSec,
prometheus.CounterValue,
proc.LDAPTimeoutErrorsPerSec,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.longRunningLDAPOperationsPerMin,
prometheus.CounterValue,
proc.LongRunningLDAPOperationsPerMin*60,
labelName,
)
}
return nil
}
func (c *Collector) collectPDHADAccessProcesses(ch chan<- prometheus.Metric) error {
func (c *Collector) collectADAccessProcesses(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorADAccessProcesses.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange ADAccess Processes metrics: %w", err)

View File

@ -1,21 +1,15 @@
//go:build windows
package exchange
import (
"errors"
"fmt"
"log/slog"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// [29240] MSExchangeAutodiscover.
type perflibAutodiscover struct {
RequestsPerSec float64 `perflib:"Requests/sec"`
}
func (c *Collector) buildAutoDiscover() error {
counters := []string{
requestsPerSec,
@ -23,7 +17,7 @@ func (c *Collector) buildAutoDiscover() error {
var err error
c.perfDataCollectorAutoDiscover, err = perfdata.NewCollector(perfdata.V2, "MSExchange Autodiscover", perfdata.AllInstances, counters)
c.perfDataCollectorAutoDiscover, err = perfdata.NewCollector("MSExchange Autodiscover", perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange Autodiscover collector: %w", err)
}
@ -31,25 +25,7 @@ func (c *Collector) buildAutoDiscover() error {
return nil
}
func (c *Collector) collectAutoDiscover(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibAutodiscover
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchangeAutodiscover"], &data, logger); err != nil {
return err
}
for _, autodisc := range data {
ch <- prometheus.MustNewConstMetric(
c.autoDiscoverRequestsPerSec,
prometheus.CounterValue,
autodisc.RequestsPerSec,
)
}
return nil
}
func (c *Collector) collectPDHAutoDiscover(ch chan<- prometheus.Metric) error {
func (c *Collector) collectAutoDiscover(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorAutoDiscover.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange Autodiscover metrics: %w", err)

View File

@ -1,27 +1,21 @@
//go:build windows
package exchange
import (
"errors"
"fmt"
"log/slog"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
// Perflib: [24914] MSExchange Availability Service.
type perflibAvailabilityService struct {
RequestsSec float64 `perflib:"Availability Requests (sec)"`
}
func (c *Collector) buildAvailabilityService() error {
counters := []string{}
var err error
c.perfDataCollectorAvailabilityService, err = perfdata.NewCollector(perfdata.V2, "MSExchange Availability Service", perfdata.AllInstances, counters)
c.perfDataCollectorAvailabilityService, err = perfdata.NewCollector("MSExchange Availability Service", perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange Availability Service collector: %w", err)
}
@ -29,25 +23,7 @@ func (c *Collector) buildAvailabilityService() error {
return nil
}
func (c *Collector) collectAvailabilityService(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibAvailabilityService
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchange Availability Service"], &data, logger); err != nil {
return err
}
for _, availservice := range data {
ch <- prometheus.MustNewConstMetric(
c.availabilityRequestsSec,
prometheus.CounterValue,
availservice.RequestsSec,
)
}
return nil
}
func (c *Collector) collectPDHAvailabilityService(ch chan<- prometheus.Metric) error {
func (c *Collector) collectAvailabilityService(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorAvailabilityService.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange Availability Service metrics: %w", err)

View File

@ -1,13 +1,12 @@
//go:build windows
package exchange
import (
"errors"
"fmt"
"log/slog"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -20,18 +19,6 @@ const (
proxyRequestsPerSec = "Proxy Requests/Sec"
)
// Perflib: [36934] MSExchange HttpProxy.
type perflibHTTPProxy struct {
Name string
MailboxServerLocatorAverageLatency float64 `perflib:"MailboxServerLocator Average Latency (Moving Average)"`
AverageAuthenticationLatency float64 `perflib:"Average Authentication Latency"`
AverageCASProcessingLatency float64 `perflib:"Average ClientAccess Server Processing Latency"`
MailboxServerProxyFailureRate float64 `perflib:"Mailbox Server Proxy Failure Rate"`
OutstandingProxyRequests float64 `perflib:"Outstanding Proxy Requests"`
ProxyRequestsPerSec float64 `perflib:"Proxy Requests/Sec"`
}
func (c *Collector) buildHTTPProxy() error {
counters := []string{
mailboxServerLocatorAverageLatency,
@ -44,7 +31,7 @@ func (c *Collector) buildHTTPProxy() error {
var err error
c.perfDataCollectorHttpProxy, err = perfdata.NewCollector(perfdata.V2, "MSExchange HttpProxy", perfdata.AllInstances, counters)
c.perfDataCollectorHttpProxy, err = perfdata.NewCollector("MSExchange HttpProxy", perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange HttpProxy collector: %w", err)
}
@ -52,57 +39,7 @@ func (c *Collector) buildHTTPProxy() error {
return nil
}
func (c *Collector) collectHTTPProxy(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibHTTPProxy
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchange HttpProxy"], &data, logger); err != nil {
return err
}
for _, instance := range data {
labelName := c.toLabelName(instance.Name)
ch <- prometheus.MustNewConstMetric(
c.mailboxServerLocatorAverageLatency,
prometheus.GaugeValue,
c.msToSec(instance.MailboxServerLocatorAverageLatency),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.averageAuthenticationLatency,
prometheus.GaugeValue,
instance.AverageAuthenticationLatency,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.averageCASProcessingLatency,
prometheus.GaugeValue,
c.msToSec(instance.AverageCASProcessingLatency),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.mailboxServerProxyFailureRate,
prometheus.GaugeValue,
instance.MailboxServerProxyFailureRate,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.outstandingProxyRequests,
prometheus.GaugeValue,
instance.OutstandingProxyRequests,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.proxyRequestsPerSec,
prometheus.CounterValue,
instance.ProxyRequestsPerSec,
labelName,
)
}
return nil
}
func (c *Collector) collectPDHHTTPProxy(ch chan<- prometheus.Metric) error {
func (c *Collector) collectHTTPProxy(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorHttpProxy.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange HttpProxy Service metrics: %w", err)

View File

@ -1,13 +1,12 @@
//go:build windows
package exchange
import (
"errors"
"fmt"
"log/slog"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -15,11 +14,6 @@ const (
activeUserCount = "Active User Count"
)
// perflib [26463] MSExchange MapiHttp Emsmdb.
type perflibMapiHttpEmsmdb struct {
ActiveUserCount float64 `perflib:"Active User Count"`
}
func (c *Collector) buildMapiHttpEmsmdb() error {
counters := []string{
activeUserCount,
@ -27,7 +21,7 @@ func (c *Collector) buildMapiHttpEmsmdb() error {
var err error
c.perfDataCollectorMapiHttpEmsmdb, err = perfdata.NewCollector(perfdata.V2, "MSExchange MapiHttp Emsmdb", perfdata.AllInstances, counters)
c.perfDataCollectorMapiHttpEmsmdb, err = perfdata.NewCollector("MSExchange MapiHttp Emsmdb", perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange MapiHttp Emsmdb: %w", err)
}
@ -35,25 +29,7 @@ func (c *Collector) buildMapiHttpEmsmdb() error {
return nil
}
func (c *Collector) collectMapiHttpEmsmdb(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibMapiHttpEmsmdb
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchange MapiHttp Emsmdb"], &data, logger); err != nil {
return err
}
for _, mapihttp := range data {
ch <- prometheus.MustNewConstMetric(
c.activeUserCountMapiHttpEmsMDB,
prometheus.GaugeValue,
mapihttp.ActiveUserCount,
)
}
return nil
}
func (c *Collector) collectPDHMapiHttpEmsmdb(ch chan<- prometheus.Metric) error {
func (c *Collector) collectMapiHttpEmsmdb(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorMapiHttpEmsmdb.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange MapiHttp Emsmdb metrics: %w", err)

View File

@ -1,13 +1,12 @@
//go:build windows
package exchange
import (
"errors"
"fmt"
"log/slog"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -16,12 +15,6 @@ const (
// requestsPerSec = "Requests/sec"
)
// Perflib: [24618] MSExchange OWA.
type perflibOWA struct {
CurrentUniqueUsers float64 `perflib:"Current Unique Users"`
RequestsPerSec float64 `perflib:"Requests/sec"`
}
func (c *Collector) buildOWA() error {
counters := []string{
currentUniqueUsers,
@ -30,7 +23,7 @@ func (c *Collector) buildOWA() error {
var err error
c.perfDataCollectorOWA, err = perfdata.NewCollector(perfdata.V2, "MSExchange OWA", perfdata.AllInstances, counters)
c.perfDataCollectorOWA, err = perfdata.NewCollector("MSExchange OWA", perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange OWA collector: %w", err)
}
@ -38,30 +31,7 @@ func (c *Collector) buildOWA() error {
return nil
}
func (c *Collector) collectOWA(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibOWA
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchange OWA"], &data, logger); err != nil {
return err
}
for _, owa := range data {
ch <- prometheus.MustNewConstMetric(
c.currentUniqueUsers,
prometheus.GaugeValue,
owa.CurrentUniqueUsers,
)
ch <- prometheus.MustNewConstMetric(
c.owaRequestsPerSec,
prometheus.CounterValue,
owa.RequestsPerSec,
)
}
return nil
}
func (c *Collector) collectPDHOWA(ch chan<- prometheus.Metric) error {
func (c *Collector) collectOWA(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorOWA.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange OWA metrics: %w", err)

View File

@ -1,13 +1,12 @@
//go:build windows
package exchange
import (
"errors"
"fmt"
"log/slog"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -20,16 +19,6 @@ const (
userCount = "User Count"
)
// Perflib: [29366] MSExchange RpcClientAccess.
type perflibRPCClientAccess struct {
RPCAveragedLatency float64 `perflib:"RPC Averaged Latency"`
RPCRequests float64 `perflib:"RPC Requests"`
ActiveUserCount float64 `perflib:"Active User Count"`
ConnectionCount float64 `perflib:"Connection Count"`
RPCOperationsPerSec float64 `perflib:"RPC Operations/sec"`
UserCount float64 `perflib:"User Count"`
}
func (c *Collector) buildRPC() error {
counters := []string{
rpcAveragedLatency,
@ -42,7 +31,7 @@ func (c *Collector) buildRPC() error {
var err error
c.perfDataCollectorRpcClientAccess, err = perfdata.NewCollector(perfdata.V2, "MSExchange RpcClientAccess", perfdata.AllInstances, counters)
c.perfDataCollectorRpcClientAccess, err = perfdata.NewCollector("MSExchange RpcClientAccess", perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange RpcClientAccess collector: %w", err)
}
@ -50,50 +39,7 @@ func (c *Collector) buildRPC() error {
return nil
}
func (c *Collector) collectRPC(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibRPCClientAccess
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchange RpcClientAccess"], &data, logger); err != nil {
return err
}
for _, rpc := range data {
ch <- prometheus.MustNewConstMetric(
c.rpcAveragedLatency,
prometheus.GaugeValue,
c.msToSec(rpc.RPCAveragedLatency),
)
ch <- prometheus.MustNewConstMetric(
c.rpcRequests,
prometheus.GaugeValue,
rpc.RPCRequests,
)
ch <- prometheus.MustNewConstMetric(
c.activeUserCount,
prometheus.GaugeValue,
rpc.ActiveUserCount,
)
ch <- prometheus.MustNewConstMetric(
c.connectionCount,
prometheus.GaugeValue,
rpc.ConnectionCount,
)
ch <- prometheus.MustNewConstMetric(
c.rpcOperationsPerSec,
prometheus.CounterValue,
rpc.RPCOperationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.userCount,
prometheus.GaugeValue,
rpc.UserCount,
)
}
return nil
}
func (c *Collector) collectPDHRPC(ch chan<- prometheus.Metric) error {
func (c *Collector) collectRPC(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorRpcClientAccess.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange RpcClientAccess: %w", err)

View File

@ -1,10 +1,12 @@
//go:build windows
package exchange_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/exchange"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -1,14 +1,12 @@
//go:build windows
package exchange
import (
"errors"
"fmt"
"log/slog"
"strings"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -23,20 +21,6 @@ const (
poisonQueueLength = "Poison Queue Length"
)
// Perflib: [20524] MSExchangeTransport Queues.
type perflibTransportQueues struct {
Name string
ExternalActiveRemoteDeliveryQueueLength float64 `perflib:"External Active Remote Delivery Queue Length"`
InternalActiveRemoteDeliveryQueueLength float64 `perflib:"Internal Active Remote Delivery Queue Length"`
ActiveMailboxDeliveryQueueLength float64 `perflib:"Active Mailbox Delivery Queue Length"`
RetryMailboxDeliveryQueueLength float64 `perflib:"Retry Mailbox Delivery Queue Length"`
UnreachableQueueLength float64 `perflib:"Unreachable Queue Length"`
ExternalLargestDeliveryQueueLength float64 `perflib:"External Largest Delivery Queue Length"`
InternalLargestDeliveryQueueLength float64 `perflib:"Internal Largest Delivery Queue Length"`
PoisonQueueLength float64 `perflib:"Poison Queue Length"`
}
func (c *Collector) buildTransportQueues() error {
counters := []string{
externalActiveRemoteDeliveryQueueLength,
@ -51,7 +35,7 @@ func (c *Collector) buildTransportQueues() error {
var err error
c.perfDataCollectorTransportQueues, err = perfdata.NewCollector(perfdata.V2, "MSExchangeTransport Queues", perfdata.AllInstances, counters)
c.perfDataCollectorTransportQueues, err = perfdata.NewCollector("MSExchangeTransport Queues", perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchangeTransport Queues collector: %w", err)
}
@ -59,72 +43,7 @@ func (c *Collector) buildTransportQueues() error {
return nil
}
func (c *Collector) collectTransportQueues(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibTransportQueues
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchangeTransport Queues"], &data, logger); err != nil {
return err
}
for _, queue := range data {
labelName := c.toLabelName(queue.Name)
if strings.HasSuffix(labelName, "_total") {
continue
}
ch <- prometheus.MustNewConstMetric(
c.externalActiveRemoteDeliveryQueueLength,
prometheus.GaugeValue,
queue.ExternalActiveRemoteDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.internalActiveRemoteDeliveryQueueLength,
prometheus.GaugeValue,
queue.InternalActiveRemoteDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.activeMailboxDeliveryQueueLength,
prometheus.GaugeValue,
queue.ActiveMailboxDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.retryMailboxDeliveryQueueLength,
prometheus.GaugeValue,
queue.RetryMailboxDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.unreachableQueueLength,
prometheus.GaugeValue,
queue.UnreachableQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.externalLargestDeliveryQueueLength,
prometheus.GaugeValue,
queue.ExternalLargestDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.internalLargestDeliveryQueueLength,
prometheus.GaugeValue,
queue.InternalLargestDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.poisonQueueLength,
prometheus.GaugeValue,
queue.PoisonQueueLength,
labelName,
)
}
return nil
}
func (c *Collector) collectPDHTransportQueues(ch chan<- prometheus.Metric) error {
func (c *Collector) collectTransportQueues(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorTransportQueues.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchangeTransport Queues: %w", err)

View File

@ -1,14 +1,12 @@
//go:build windows
package exchange
import (
"errors"
"fmt"
"log/slog"
"strings"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -20,17 +18,6 @@ const (
isActive = "Active"
)
// Perflib: [19430] MSExchange WorkloadManagement Workloads.
type perflibWorkloadManagementWorkloads struct {
Name string
ActiveTasks float64 `perflib:"ActiveTasks"`
CompletedTasks float64 `perflib:"CompletedTasks"`
QueuedTasks float64 `perflib:"QueuedTasks"`
YieldedTasks float64 `perflib:"YieldedTasks"`
IsActive float64 `perflib:"Active"`
}
func (c *Collector) buildWorkloadManagementWorkloads() error {
counters := []string{
activeTasks,
@ -42,7 +29,7 @@ func (c *Collector) buildWorkloadManagementWorkloads() error {
var err error
c.perfDataCollectorWorkloadManagementWorkloads, err = perfdata.NewCollector(perfdata.V2, "MSExchange WorkloadManagement Workloads", perfdata.AllInstances, counters)
c.perfDataCollectorWorkloadManagementWorkloads, err = perfdata.NewCollector("MSExchange WorkloadManagement Workloads", perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create MSExchange WorkloadManagement Workloads collector: %w", err)
}
@ -50,54 +37,7 @@ func (c *Collector) buildWorkloadManagementWorkloads() error {
return nil
}
func (c *Collector) collectWorkloadManagementWorkloads(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var data []perflibWorkloadManagementWorkloads
if err := v1.UnmarshalObject(ctx.PerfObjects["MSExchange WorkloadManagement Workloads"], &data, logger); err != nil {
return err
}
for _, instance := range data {
labelName := c.toLabelName(instance.Name)
if strings.HasSuffix(labelName, "_total") {
continue
}
ch <- prometheus.MustNewConstMetric(
c.activeTasks,
prometheus.GaugeValue,
instance.ActiveTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.completedTasks,
prometheus.CounterValue,
instance.CompletedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.queuedTasks,
prometheus.CounterValue,
instance.QueuedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.yieldedTasks,
prometheus.CounterValue,
instance.YieldedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.isActive,
prometheus.GaugeValue,
instance.IsActive,
labelName,
)
}
return nil
}
func (c *Collector) collectPDHWorkloadManagementWorkloads(ch chan<- prometheus.Metric) error {
func (c *Collector) collectWorkloadManagementWorkloads(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorWorkloadManagementWorkloads.Collect()
if err != nil {
return fmt.Errorf("failed to collect MSExchange WorkloadManagement Workloads: %w", err)

View File

@ -31,6 +31,7 @@ var ConfigDefaults = Config{
type Collector struct {
config Config
logger *slog.Logger
fileMTime *prometheus.Desc
}
@ -77,18 +78,14 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
logger.Info("filetime collector is in an experimental state! It may subject to change.",
slog.String("collector", Name),
)
c.logger = logger.With(slog.String("collector", Name))
c.logger.Info("filetime collector is in an experimental state! It may subject to change.")
c.fileMTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mtime_timestamp_seconds"),
@ -111,14 +108,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
return c.collectGlob(logger, ch)
}
// collectWin32 collects file times for each file path in the config. It using Win32 FindFirstFile and FindNextFile.
func (c *Collector) collectGlob(logger *slog.Logger, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
wg := sync.WaitGroup{}
for _, filePattern := range c.config.FilePatterns {
@ -127,8 +117,8 @@ func (c *Collector) collectGlob(logger *slog.Logger, ch chan<- prometheus.Metric
go func(filePattern string) {
defer wg.Done()
if err := c.collectGlobFilePath(logger, ch, filePattern); err != nil {
logger.Error("failed collecting metrics for filepath",
if err := c.collectGlobFilePath(ch, filePattern); err != nil {
c.logger.Error("failed collecting metrics for filepath",
slog.String("filepath", filePattern),
slog.Any("err", err),
)
@ -141,7 +131,7 @@ func (c *Collector) collectGlob(logger *slog.Logger, ch chan<- prometheus.Metric
return nil
}
func (c *Collector) collectGlobFilePath(logger *slog.Logger, ch chan<- prometheus.Metric, filePattern string) error {
func (c *Collector) collectGlobFilePath(ch chan<- prometheus.Metric, filePattern string) error {
basePath, pattern := doublestar.SplitPattern(filePattern)
basePathFS := os.DirFS(basePath)
@ -155,7 +145,7 @@ func (c *Collector) collectGlobFilePath(logger *slog.Logger, ch chan<- prometheu
fileInfo, err := os.Stat(filePath)
if err != nil {
logger.Warn("failed to state file",
c.logger.Warn("failed to state file",
slog.String("file", filePath),
slog.Any("err", err),
)

View File

@ -1,10 +1,12 @@
//go:build windows
package filetime_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/filetime"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -57,11 +57,7 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
return nil
}
@ -136,21 +132,6 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
if err := c.collect(ch); err != nil {
logger.Error("failed collecting fsrmquota metrics",
slog.Any("err", err),
)
return err
}
return nil
}
// MSFT_FSRMQuota docs:
// https://docs.microsoft.com/en-us/previous-versions/windows/desktop/fsrm/msft-fsrmquota
type MSFT_FSRMQuota struct {
@ -168,7 +149,9 @@ type MSFT_FSRMQuota struct {
SoftLimit bool `mi:"SoftLimit"`
}
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
var dst []MSFT_FSRMQuota
if err := c.miSession.Query(&dst, mi.NamespaceRootWindowsFSRM, c.miQuery); err != nil {
return fmt.Errorf("WMI query failed: %w", err)

View File

@ -1,10 +1,12 @@
//go:build windows
package fsrmquota_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/fsrmquota"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -10,7 +10,6 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
)
@ -161,11 +160,7 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
return nil
}
@ -179,13 +174,13 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
buildSubsystemName := func(component string) string { return "hyperv_" + component }
c.healthCritical = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("health"), "critical"),
prometheus.BuildFQName("windows", buildSubsystemName("health"), "critical"),
"This counter represents the number of virtual machines with critical health",
nil,
nil,
)
c.healthOk = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("health"), "ok"),
prometheus.BuildFQName("windows", buildSubsystemName("health"), "ok"),
"This counter represents the number of virtual machines with ok health",
nil,
nil,
@ -194,19 +189,19 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
//
c.physicalPagesAllocated = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vid"), "physical_pages_allocated"),
prometheus.BuildFQName("windows", buildSubsystemName("vid"), "physical_pages_allocated"),
"The number of physical pages allocated",
[]string{"vm"},
nil,
)
c.preferredNUMANodeIndex = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vid"), "preferred_numa_node_index"),
prometheus.BuildFQName("windows", buildSubsystemName("vid"), "preferred_numa_node_index"),
"The preferred NUMA node index associated with this partition",
[]string{"vm"},
nil,
)
c.remotePhysicalPages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vid"), "remote_physical_pages"),
prometheus.BuildFQName("windows", buildSubsystemName("vid"), "remote_physical_pages"),
"The number of physical pages not allocated from the preferred NUMA node",
[]string{"vm"},
nil,
@ -215,127 +210,127 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
//
c.addressSpaces = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "address_spaces"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "address_spaces"),
"The number of address spaces in the virtual TLB of the partition",
nil,
nil,
)
c.attachedDevices = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "attached_devices"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "attached_devices"),
"The number of devices attached to the partition",
nil,
nil,
)
c.depositedPages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "deposited_pages"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "deposited_pages"),
"The number of pages deposited into the partition",
nil,
nil,
)
c.deviceDMAErrors = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "device_dma_errors"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "device_dma_errors"),
"An indicator of illegal DMA requests generated by all devices assigned to the partition",
nil,
nil,
)
c.deviceInterruptErrors = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "device_interrupt_errors"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "device_interrupt_errors"),
"An indicator of illegal interrupt requests generated by all devices assigned to the partition",
nil,
nil,
)
c.deviceInterruptMappings = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "device_interrupt_mappings"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "device_interrupt_mappings"),
"The number of device interrupt mappings used by the partition",
nil,
nil,
)
c.deviceInterruptThrottleEvents = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "device_interrupt_throttle_events"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "device_interrupt_throttle_events"),
"The number of times an interrupt from a device assigned to the partition was temporarily throttled because the device was generating too many interrupts",
nil,
nil,
)
c.gpaPages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "preferred_numa_node_index"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "preferred_numa_node_index"),
"The number of pages present in the GPA space of the partition (zero for root partition)",
nil,
nil,
)
c.gpaSpaceModifications = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "gpa_space_modifications"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "gpa_space_modifications"),
"The rate of modifications to the GPA space of the partition",
nil,
nil,
)
c.ioTLBFlushCost = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "io_tlb_flush_cost"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "io_tlb_flush_cost"),
"The average time (in nanoseconds) spent processing an I/O TLB flush",
nil,
nil,
)
c.ioTLBFlushes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "io_tlb_flush"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "io_tlb_flush"),
"The rate of flushes of I/O TLBs of the partition",
nil,
nil,
)
c.recommendedVirtualTLBSize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "recommended_virtual_tlb_size"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "recommended_virtual_tlb_size"),
"The recommended number of pages to be deposited for the virtual TLB",
nil,
nil,
)
c.skippedTimerTicks = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "physical_pages_allocated"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "physical_pages_allocated"),
"The number of timer interrupts skipped for the partition",
nil,
nil,
)
c.value1Gdevicepages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "1G_device_pages"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "1G_device_pages"),
"The number of 1G pages present in the device space of the partition",
nil,
nil,
)
c.value1GGPApages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "1G_gpa_pages"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "1G_gpa_pages"),
"The number of 1G pages present in the GPA space of the partition",
nil,
nil,
)
c.value2Mdevicepages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "2M_device_pages"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "2M_device_pages"),
"The number of 2M pages present in the device space of the partition",
nil,
nil,
)
c.value2MGPApages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "2M_gpa_pages"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "2M_gpa_pages"),
"The number of 2M pages present in the GPA space of the partition",
nil,
nil,
)
c.value4Kdevicepages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "4K_device_pages"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "4K_device_pages"),
"The number of 4K pages present in the device space of the partition",
nil,
nil,
)
c.value4KGPApages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "4K_gpa_pages"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "4K_gpa_pages"),
"The number of 4K pages present in the GPA space of the partition",
nil,
nil,
)
c.virtualTLBFlushEntires = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "virtual_tlb_flush_entires"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "virtual_tlb_flush_entires"),
"The rate of flushes of the entire virtual TLB",
nil,
nil,
)
c.virtualTLBPages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("root_partition"), "virtual_tlb_pages"),
prometheus.BuildFQName("windows", buildSubsystemName("root_partition"), "virtual_tlb_pages"),
"The number of pages used by the virtual TLB of the partition",
nil,
nil,
@ -344,13 +339,13 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
//
c.virtualProcessors = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("hypervisor"), "virtual_processors"),
prometheus.BuildFQName("windows", buildSubsystemName("hypervisor"), "virtual_processors"),
"The number of virtual processors present in the system",
nil,
nil,
)
c.logicalProcessors = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("hypervisor"), "logical_processors"),
prometheus.BuildFQName("windows", buildSubsystemName("hypervisor"), "logical_processors"),
"The number of logical processors present in the system",
nil,
nil,
@ -359,19 +354,19 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
//
c.hostLPGuestRunTimePercent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("host_lp"), "guest_run_time_percent"),
prometheus.BuildFQName("windows", buildSubsystemName("host_lp"), "guest_run_time_percent"),
"The percentage of time spent by the processor in guest code",
[]string{"core"},
nil,
)
c.hostLPHypervisorRunTimePercent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("host_lp"), "hypervisor_run_time_percent"),
prometheus.BuildFQName("windows", buildSubsystemName("host_lp"), "hypervisor_run_time_percent"),
"The percentage of time spent by the processor in hypervisor code",
[]string{"core"},
nil,
)
c.hostLPTotalRunTimePercent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("host_lp"), "total_run_time_percent"),
prometheus.BuildFQName("windows", buildSubsystemName("host_lp"), "total_run_time_percent"),
"The percentage of time spent by the processor in guest and hypervisor code",
[]string{"core"},
nil,
@ -380,31 +375,31 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
//
c.hostGuestRunTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("host_cpu"), "guest_run_time"),
prometheus.BuildFQName("windows", buildSubsystemName("host_cpu"), "guest_run_time"),
"The time spent by the virtual processor in guest code",
[]string{"core"},
nil,
)
c.hostHypervisorRunTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("host_cpu"), "hypervisor_run_time"),
prometheus.BuildFQName("windows", buildSubsystemName("host_cpu"), "hypervisor_run_time"),
"The time spent by the virtual processor in hypervisor code",
[]string{"core"},
nil,
)
c.hostRemoteRunTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("host_cpu"), "remote_run_time"),
prometheus.BuildFQName("windows", buildSubsystemName("host_cpu"), "remote_run_time"),
"The time spent by the virtual processor running on a remote node",
[]string{"core"},
nil,
)
c.hostTotalRunTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("host_cpu"), "total_run_time"),
prometheus.BuildFQName("windows", buildSubsystemName("host_cpu"), "total_run_time"),
"The time spent by the virtual processor in guest and hypervisor code",
[]string{"core"},
nil,
)
c.hostCPUWaitTimePerDispatch = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("host_cpu"), "wait_time_per_dispatch_total"),
prometheus.BuildFQName("windows", buildSubsystemName("host_cpu"), "wait_time_per_dispatch_total"),
"Time in nanoseconds waiting for a virtual processor to be dispatched onto a logical processor",
[]string{"core"},
nil,
@ -413,31 +408,31 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
//
c.vmGuestRunTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_cpu"), "guest_run_time"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_cpu"), "guest_run_time"),
"The time spent by the virtual processor in guest code",
[]string{"vm", "core"},
nil,
)
c.vmHypervisorRunTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_cpu"), "hypervisor_run_time"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_cpu"), "hypervisor_run_time"),
"The time spent by the virtual processor in hypervisor code",
[]string{"vm", "core"},
nil,
)
c.vmRemoteRunTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_cpu"), "remote_run_time"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_cpu"), "remote_run_time"),
"The time spent by the virtual processor running on a remote node",
[]string{"vm", "core"},
nil,
)
c.vmTotalRunTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_cpu"), "total_run_time"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_cpu"), "total_run_time"),
"The time spent by the virtual processor in guest and hypervisor code",
[]string{"vm", "core"},
nil,
)
c.vmCPUWaitTimePerDispatch = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_cpu"), "wait_time_per_dispatch_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_cpu"), "wait_time_per_dispatch_total"),
"Time in nanoseconds waiting for a virtual processor to be dispatched onto a logical processor",
[]string{"vm", "core"},
nil,
@ -445,127 +440,127 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
//
c.broadcastPacketsReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "broadcast_packets_received_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "broadcast_packets_received_total"),
"This represents the total number of broadcast packets received per second by the virtual switch",
[]string{"vswitch"},
nil,
)
c.broadcastPacketsSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "broadcast_packets_sent_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "broadcast_packets_sent_total"),
"This represents the total number of broadcast packets sent per second by the virtual switch",
[]string{"vswitch"},
nil,
)
c.bytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "bytes_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "bytes_total"),
"This represents the total number of bytes per second traversing the virtual switch",
[]string{"vswitch"},
nil,
)
c.bytesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "bytes_received_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "bytes_received_total"),
"This represents the total number of bytes received per second by the virtual switch",
[]string{"vswitch"},
nil,
)
c.bytesSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "bytes_sent_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "bytes_sent_total"),
"This represents the total number of bytes sent per second by the virtual switch",
[]string{"vswitch"},
nil,
)
c.directedPacketsReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "directed_packets_received_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "directed_packets_received_total"),
"This represents the total number of directed packets received per second by the virtual switch",
[]string{"vswitch"},
nil,
)
c.directedPacketsSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "directed_packets_send_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "directed_packets_send_total"),
"This represents the total number of directed packets sent per second by the virtual switch",
[]string{"vswitch"},
nil,
)
c.droppedPacketsIncoming = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "dropped_packets_incoming_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "dropped_packets_incoming_total"),
"This represents the total number of packet dropped per second by the virtual switch in the incoming direction",
[]string{"vswitch"},
nil,
)
c.droppedPacketsOutgoing = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "dropped_packets_outcoming_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "dropped_packets_outcoming_total"),
"This represents the total number of packet dropped per second by the virtual switch in the outgoing direction",
[]string{"vswitch"},
nil,
)
c.extensionsDroppedPacketsIncoming = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "extensions_dropped_packets_incoming_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "extensions_dropped_packets_incoming_total"),
"This represents the total number of packet dropped per second by the virtual switch extensions in the incoming direction",
[]string{"vswitch"},
nil,
)
c.extensionsDroppedPacketsOutgoing = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "extensions_dropped_packets_outcoming_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "extensions_dropped_packets_outcoming_total"),
"This represents the total number of packet dropped per second by the virtual switch extensions in the outgoing direction",
[]string{"vswitch"},
nil,
)
c.learnedMacAddresses = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "learned_mac_addresses_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "learned_mac_addresses_total"),
"This counter represents the total number of learned MAC addresses of the virtual switch",
[]string{"vswitch"},
nil,
)
c.multicastPacketsReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "multicast_packets_received_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "multicast_packets_received_total"),
"This represents the total number of multicast packets received per second by the virtual switch",
[]string{"vswitch"},
nil,
)
c.multicastPacketsSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "multicast_packets_sent_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "multicast_packets_sent_total"),
"This represents the total number of multicast packets sent per second by the virtual switch",
[]string{"vswitch"},
nil,
)
c.numberOfSendChannelMoves = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "number_of_send_channel_moves_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "number_of_send_channel_moves_total"),
"This represents the total number of send channel moves per second on this virtual switch",
[]string{"vswitch"},
nil,
)
c.numberOfVMQMoves = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "number_of_vmq_moves_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "number_of_vmq_moves_total"),
"This represents the total number of VMQ moves per second on this virtual switch",
[]string{"vswitch"},
nil,
)
c.packetsFlooded = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "packets_flooded_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "packets_flooded_total"),
"This counter represents the total number of packets flooded by the virtual switch",
[]string{"vswitch"},
nil,
)
c.packets = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "packets_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "packets_total"),
"This represents the total number of packets per second traversing the virtual switch",
[]string{"vswitch"},
nil,
)
c.packetsReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "packets_received_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "packets_received_total"),
"This represents the total number of packets received per second by the virtual switch",
[]string{"vswitch"},
nil,
)
c.packetsSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "packets_sent_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "packets_sent_total"),
"This represents the total number of packets send per second by the virtual switch",
[]string{"vswitch"},
nil,
)
c.purgedMacAddresses = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vswitch"), "purged_mac_addresses_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vswitch"), "purged_mac_addresses_total"),
"This counter represents the total number of purged MAC addresses of the virtual switch",
[]string{"vswitch"},
nil,
@ -574,37 +569,37 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
//
c.adapterBytesDropped = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("ethernet"), "bytes_dropped"),
prometheus.BuildFQName("windows", buildSubsystemName("ethernet"), "bytes_dropped"),
"Bytes Dropped is the number of bytes dropped on the network adapter",
[]string{"adapter"},
nil,
)
c.adapterBytesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("ethernet"), "bytes_received"),
prometheus.BuildFQName("windows", buildSubsystemName("ethernet"), "bytes_received"),
"Bytes received is the number of bytes received on the network adapter",
[]string{"adapter"},
nil,
)
c.adapterBytesSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("ethernet"), "bytes_sent"),
prometheus.BuildFQName("windows", buildSubsystemName("ethernet"), "bytes_sent"),
"Bytes sent is the number of bytes sent over the network adapter",
[]string{"adapter"},
nil,
)
c.adapterFramesDropped = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("ethernet"), "frames_dropped"),
prometheus.BuildFQName("windows", buildSubsystemName("ethernet"), "frames_dropped"),
"Frames Dropped is the number of frames dropped on the network adapter",
[]string{"adapter"},
nil,
)
c.adapterFramesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("ethernet"), "frames_received"),
prometheus.BuildFQName("windows", buildSubsystemName("ethernet"), "frames_received"),
"Frames received is the number of frames received on the network adapter",
[]string{"adapter"},
nil,
)
c.adapterFramesSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("ethernet"), "frames_sent"),
prometheus.BuildFQName("windows", buildSubsystemName("ethernet"), "frames_sent"),
"Frames sent is the number of frames sent over the network adapter",
[]string{"adapter"},
nil,
@ -613,37 +608,37 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
//
c.vmStorageErrorCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_device"), "error_count"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_device"), "error_count"),
"This counter represents the total number of errors that have occurred on this virtual device",
[]string{"vm_device"},
nil,
)
c.vmStorageQueueLength = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_device"), "queue_length"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_device"), "queue_length"),
"This counter represents the current queue length on this virtual device",
[]string{"vm_device"},
nil,
)
c.vmStorageReadBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_device"), "bytes_read"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_device"), "bytes_read"),
"This counter represents the total number of bytes that have been read per second on this virtual device",
[]string{"vm_device"},
nil,
)
c.vmStorageReadOperations = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_device"), "operations_read"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_device"), "operations_read"),
"This counter represents the number of read operations that have occurred per second on this virtual device",
[]string{"vm_device"},
nil,
)
c.vmStorageWriteBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_device"), "bytes_written"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_device"), "bytes_written"),
"This counter represents the total number of bytes that have been written per second on this virtual device",
[]string{"vm_device"},
nil,
)
c.vmStorageWriteOperations = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_device"), "operations_written"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_device"), "operations_written"),
"This counter represents the number of write operations that have occurred per second on this virtual device",
[]string{"vm_device"},
nil,
@ -652,37 +647,37 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
//
c.vmStorageBytesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_interface"), "bytes_received"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_interface"), "bytes_received"),
"This counter represents the total number of bytes received per second by the network adapter",
[]string{"vm_interface"},
nil,
)
c.vmStorageBytesSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_interface"), "bytes_sent"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_interface"), "bytes_sent"),
"This counter represents the total number of bytes sent per second by the network adapter",
[]string{"vm_interface"},
nil,
)
c.vmStorageDroppedPacketsIncoming = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_interface"), "packets_incoming_dropped"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_interface"), "packets_incoming_dropped"),
"This counter represents the total number of dropped packets per second in the incoming direction of the network adapter",
[]string{"vm_interface"},
nil,
)
c.vmStorageDroppedPacketsOutgoing = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_interface"), "packets_outgoing_dropped"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_interface"), "packets_outgoing_dropped"),
"This counter represents the total number of dropped packets per second in the outgoing direction of the network adapter",
[]string{"vm_interface"},
nil,
)
c.vmStoragePacketsReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_interface"), "packets_received"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_interface"), "packets_received"),
"This counter represents the total number of packets received per second by the network adapter",
[]string{"vm_interface"},
nil,
)
c.vmStoragePacketsSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_interface"), "packets_sent"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_interface"), "packets_sent"),
"This counter represents the total number of packets sent per second by the network adapter",
[]string{"vm_interface"},
nil,
@ -691,61 +686,61 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
//
c.vmMemoryAddedMemory = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_memory"), "added_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_memory"), "added_total"),
"This counter represents memory in MB added to the VM",
[]string{"vm"},
nil,
)
c.vmMemoryAveragePressure = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_memory"), "pressure_average"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_memory"), "pressure_average"),
"This gauge represents the average pressure in the VM.",
[]string{"vm"},
nil,
)
c.vmMemoryCurrentPressure = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_memory"), "pressure_current"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_memory"), "pressure_current"),
"This gauge represents the current pressure in the VM.",
[]string{"vm"},
nil,
)
c.vmMemoryGuestVisiblePhysicalMemory = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_memory"), "physical_guest_visible"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_memory"), "physical_guest_visible"),
"'This gauge represents the amount of memory in MB visible to the VM guest.'",
[]string{"vm"},
nil,
)
c.vmMemoryMaximumPressure = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_memory"), "pressure_maximum"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_memory"), "pressure_maximum"),
"This gauge represents the maximum pressure band in the VM.",
[]string{"vm"},
nil,
)
c.vmMemoryMemoryAddOperations = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_memory"), "add_operations_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_memory"), "add_operations_total"),
"This counter represents the number of operations adding memory to the VM.",
[]string{"vm"},
nil,
)
c.vmMemoryMemoryRemoveOperations = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_memory"), "remove_operations_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_memory"), "remove_operations_total"),
"This counter represents the number of operations removing memory from the VM.",
[]string{"vm"},
nil,
)
c.vmMemoryMinimumPressure = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_memory"), "pressure_minimum"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_memory"), "pressure_minimum"),
"This gauge represents the minimum pressure band in the VM.",
[]string{"vm"},
nil,
)
c.vmMemoryPhysicalMemory = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_memory"), "physical"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_memory"), "physical"),
"This gauge represents the current amount of memory in MB assigned to the VM.",
[]string{"vm"},
nil,
)
c.vmMemoryRemovedMemory = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, buildSubsystemName("vm_memory"), "removed_total"),
prometheus.BuildFQName("windows", buildSubsystemName("vm_memory"), "removed_total"),
"This counter represents memory in MB removed from the VM",
[]string{"vm"},
nil,
@ -756,101 +751,52 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
if err := c.collectVmHealth(ch); err != nil {
logger.Error("failed collecting hyperV health status metrics",
slog.Any("err", err),
)
return err
}
if err := c.collectVmVid(ch); err != nil {
logger.Error("failed collecting hyperV pages metrics",
slog.Any("err", err),
)
return err
}
if err := c.collectVmHv(ch); err != nil {
logger.Error("failed collecting hyperV hv status metrics",
slog.Any("err", err),
)
return err
}
if err := c.collectVmProcessor(ch); err != nil {
logger.Error("failed collecting hyperV processor metrics",
slog.Any("err", err),
)
return err
}
if err := c.collectHostLPUsage(logger, ch); err != nil {
logger.Error("failed collecting hyperV host logical processors metrics",
slog.Any("err", err),
)
if err := c.collectHostLPUsage(nil, ch); err != nil {
return err
}
if err := c.collectHostCpuUsage(logger, ch); err != nil {
logger.Error("failed collecting hyperV host CPU metrics",
slog.Any("err", err),
)
if err := c.collectHostCpuUsage(nil, ch); err != nil {
return err
}
if err := c.collectVmCpuUsage(logger, ch); err != nil {
logger.Error("failed collecting hyperV VM CPU metrics",
slog.Any("err", err),
)
if err := c.collectVmCpuUsage(nil, ch); err != nil {
return err
}
if err := c.collectVmSwitch(ch); err != nil {
logger.Error("failed collecting hyperV switch metrics",
slog.Any("err", err),
)
return err
}
if err := c.collectVmEthernet(ch); err != nil {
logger.Error("failed collecting hyperV ethernet metrics",
slog.Any("err", err),
)
return err
}
if err := c.collectVmStorage(ch); err != nil {
logger.Error("failed collecting hyperV virtual storage metrics",
slog.Any("err", err),
)
return err
}
if err := c.collectVmNetwork(ch); err != nil {
logger.Error("failed collecting hyperV virtual network metrics",
slog.Any("err", err),
)
return err
}
if err := c.collectVmMemory(ch); err != nil {
logger.Error("failed collecting hyperV virtual memory metrics",
slog.Any("err", err),
)
return err
}

View File

@ -1,10 +1,12 @@
//go:build windows
package hyperv_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/hyperv"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,265 @@
//go:build windows
package iis
import (
"fmt"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorAppPoolWAS struct {
perfDataCollectorAppPoolWAS *perfdata.Collector
currentApplicationPoolState *prometheus.Desc
currentApplicationPoolUptime *prometheus.Desc
currentWorkerProcesses *prometheus.Desc
maximumWorkerProcesses *prometheus.Desc
recentWorkerProcessFailures *prometheus.Desc
timeSinceLastWorkerProcessFailure *prometheus.Desc
totalApplicationPoolRecycles *prometheus.Desc
totalApplicationPoolUptime *prometheus.Desc
totalWorkerProcessesCreated *prometheus.Desc
totalWorkerProcessFailures *prometheus.Desc
totalWorkerProcessPingFailures *prometheus.Desc
totalWorkerProcessShutdownFailures *prometheus.Desc
totalWorkerProcessStartupFailures *prometheus.Desc
}
const (
CurrentApplicationPoolState = "Current Application Pool State"
CurrentApplicationPoolUptime = "Current Application Pool Uptime"
CurrentWorkerProcesses = "Current Worker Processes"
MaximumWorkerProcesses = "Maximum Worker Processes"
RecentWorkerProcessFailures = "Recent Worker Process Failures"
TimeSinceLastWorkerProcessFailure = "Time Since Last Worker Process Failure"
TotalApplicationPoolRecycles = "Total Application Pool Recycles"
TotalApplicationPoolUptime = "Total Application Pool Uptime"
TotalWorkerProcessesCreated = "Total Worker Processes Created"
TotalWorkerProcessFailures = "Total Worker Process Failures"
TotalWorkerProcessPingFailures = "Total Worker Process Ping Failures"
TotalWorkerProcessShutdownFailures = "Total Worker Process Shutdown Failures"
TotalWorkerProcessStartupFailures = "Total Worker Process Startup Failures"
)
var applicationStates = map[uint32]string{
1: "Uninitialized",
2: "Initialized",
3: "Running",
4: "Disabling",
5: "Disabled",
6: "Shutdown Pending",
7: "Delete Pending",
}
func (c *Collector) buildAppPoolWAS() error {
var err error
c.perfDataCollectorAppPoolWAS, err = perfdata.NewCollector("APP_POOL_WAS", perfdata.InstanceAll, []string{
CurrentApplicationPoolState,
CurrentApplicationPoolUptime,
CurrentWorkerProcesses,
MaximumWorkerProcesses,
RecentWorkerProcessFailures,
TimeSinceLastWorkerProcessFailure,
TotalApplicationPoolRecycles,
TotalApplicationPoolUptime,
TotalWorkerProcessesCreated,
TotalWorkerProcessFailures,
TotalWorkerProcessPingFailures,
TotalWorkerProcessShutdownFailures,
TotalWorkerProcessStartupFailures,
})
if err != nil {
return fmt.Errorf("failed to create APP_POOL_WAS collector: %w", err)
}
// APP_POOL_WAS
c.currentApplicationPoolState = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_application_pool_state"),
"The current status of the application pool (1 - Uninitialized, 2 - Initialized, 3 - Running, 4 - Disabling, 5 - Disabled, 6 - Shutdown Pending, 7 - Delete Pending) (CurrentApplicationPoolState)",
[]string{"app", "state"},
nil,
)
c.currentApplicationPoolUptime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_application_pool_start_time"),
"The unix timestamp for the application pool start time (CurrentApplicationPoolUptime)",
[]string{"app"},
nil,
)
c.currentWorkerProcesses = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_worker_processes"),
"The current number of worker processes that are running in the application pool (CurrentWorkerProcesses)",
[]string{"app"},
nil,
)
c.maximumWorkerProcesses = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "maximum_worker_processes"),
"The maximum number of worker processes that have been created for the application pool since Windows Process Activation Service (WAS) started (MaximumWorkerProcesses)",
[]string{"app"},
nil,
)
c.recentWorkerProcessFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "recent_worker_process_failures"),
"The number of times that worker processes for the application pool failed during the rapid-fail protection interval (RecentWorkerProcessFailures)",
[]string{"app"},
nil,
)
c.timeSinceLastWorkerProcessFailure = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "time_since_last_worker_process_failure"),
"The length of time, in seconds, since the last worker process failure occurred for the application pool (TimeSinceLastWorkerProcessFailure)",
[]string{"app"},
nil,
)
c.totalApplicationPoolRecycles = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "total_application_pool_recycles"),
"The number of times that the application pool has been recycled since Windows Process Activation Service (WAS) started (TotalApplicationPoolRecycles)",
[]string{"app"},
nil,
)
c.totalApplicationPoolUptime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "total_application_pool_start_time"),
"The unix timestamp for the application pool of when the Windows Process Activation Service (WAS) started (TotalApplicationPoolUptime)",
[]string{"app"},
nil,
)
c.totalWorkerProcessesCreated = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "total_worker_processes_created"),
"The number of worker processes created for the application pool since Windows Process Activation Service (WAS) started (TotalWorkerProcessesCreated)",
[]string{"app"},
nil,
)
c.totalWorkerProcessFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "total_worker_process_failures"),
"The number of times that worker processes have crashed since the application pool was started (TotalWorkerProcessFailures)",
[]string{"app"},
nil,
)
c.totalWorkerProcessPingFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "total_worker_process_ping_failures"),
"The number of times that Windows Process Activation Service (WAS) did not receive a response to ping messages sent to a worker process (TotalWorkerProcessPingFailures)",
[]string{"app"},
nil,
)
c.totalWorkerProcessShutdownFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "total_worker_process_shutdown_failures"),
"The number of times that Windows Process Activation Service (WAS) failed to shut down a worker process (TotalWorkerProcessShutdownFailures)",
[]string{"app"},
nil,
)
c.totalWorkerProcessStartupFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "total_worker_process_startup_failures"),
"The number of times that Windows Process Activation Service (WAS) failed to start a worker process (TotalWorkerProcessStartupFailures)",
[]string{"app"},
nil,
)
return nil
}
func (c *Collector) collectAppPoolWAS(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorWebService.Collect()
if err != nil {
return fmt.Errorf("failed to collect APP_POOL_WAS metrics: %w", err)
}
deduplicateIISNames(perfData)
for name, app := range perfData {
if c.config.AppExclude.MatchString(name) || !c.config.AppInclude.MatchString(name) {
continue
}
for key, label := range applicationStates {
isCurrentState := 0.0
if key == uint32(app[CurrentApplicationPoolState].FirstValue) {
isCurrentState = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.currentApplicationPoolState,
prometheus.GaugeValue,
isCurrentState,
name,
label,
)
}
ch <- prometheus.MustNewConstMetric(
c.currentApplicationPoolUptime,
prometheus.GaugeValue,
app[CurrentApplicationPoolUptime].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.currentWorkerProcesses,
prometheus.GaugeValue,
app[CurrentWorkerProcesses].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.maximumWorkerProcesses,
prometheus.GaugeValue,
app[MaximumWorkerProcesses].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.recentWorkerProcessFailures,
prometheus.GaugeValue,
app[RecentWorkerProcessFailures].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.timeSinceLastWorkerProcessFailure,
prometheus.GaugeValue,
app[TimeSinceLastWorkerProcessFailure].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalApplicationPoolRecycles,
prometheus.CounterValue,
app[TotalApplicationPoolRecycles].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalApplicationPoolUptime,
prometheus.CounterValue,
app[TotalApplicationPoolUptime].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalWorkerProcessesCreated,
prometheus.CounterValue,
app[TotalWorkerProcessesCreated].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalWorkerProcessFailures,
prometheus.CounterValue,
app[TotalWorkerProcessFailures].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalWorkerProcessPingFailures,
prometheus.CounterValue,
app[TotalWorkerProcessPingFailures].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalWorkerProcessShutdownFailures,
prometheus.CounterValue,
app[TotalWorkerProcessShutdownFailures].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalWorkerProcessStartupFailures,
prometheus.CounterValue,
app[TotalWorkerProcessStartupFailures].FirstValue,
name,
)
}
return nil
}

View File

@ -1,10 +1,12 @@
//go:build windows
package iis_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/iis"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -1,47 +0,0 @@
package iis
import (
"reflect"
"testing"
)
func TestIISDeduplication(t *testing.T) {
t.Parallel()
start := []perflibAPP_POOL_WAS{
{
Name: "foo",
Frequency_Object: 1,
},
{
Name: "foo1#999",
Frequency_Object: 2,
},
{
Name: "foo#2",
Frequency_Object: 3,
},
{
Name: "bar$2",
Frequency_Object: 4,
},
{
Name: "bar_2",
Frequency_Object: 5,
},
}
expected := make(map[string]perflibAPP_POOL_WAS)
// Should be deduplicated from "foo#2"
expected["foo"] = perflibAPP_POOL_WAS{Name: "foo#2", Frequency_Object: 3}
// Map key should have suffix stripped, but struct name field should be unchanged
expected["foo1"] = perflibAPP_POOL_WAS{Name: "foo1#999", Frequency_Object: 2}
// Map key and Name should be identical, as there is no suffix starting with "#"
expected["bar$2"] = perflibAPP_POOL_WAS{Name: "bar$2", Frequency_Object: 4}
// Map key and Name should be identical, as there is no suffix starting with "#"
expected["bar_2"] = perflibAPP_POOL_WAS{Name: "bar_2", Frequency_Object: 5}
deduplicated := dedupIISNames(start)
if !reflect.DeepEqual(expected, deduplicated) {
t.Errorf("Flattened values do not match!\nExpected result: %+v\nActual result: %+v", expected, deduplicated)
}
}

View File

@ -0,0 +1,745 @@
//go:build windows
package iis
import (
"fmt"
"regexp"
"strings"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorW3SVCW3WP struct {
perfDataCollectorW3SVCW3WP *perfdata.Collector
// W3SVC_W3WP
threads *prometheus.Desc
maximumThreads *prometheus.Desc
requestsTotal *prometheus.Desc
requestsActive *prometheus.Desc
activeFlushedEntries *prometheus.Desc
currentFileCacheMemoryUsage *prometheus.Desc
maximumFileCacheMemoryUsage *prometheus.Desc
fileCacheFlushesTotal *prometheus.Desc
fileCacheQueriesTotal *prometheus.Desc
fileCacheHitsTotal *prometheus.Desc
filesCached *prometheus.Desc
filesCachedTotal *prometheus.Desc
filesFlushedTotal *prometheus.Desc
uriCacheFlushesTotal *prometheus.Desc
uriCacheQueriesTotal *prometheus.Desc
uriCacheHitsTotal *prometheus.Desc
urisCached *prometheus.Desc
urisCachedTotal *prometheus.Desc
urisFlushedTotal *prometheus.Desc
metadataCached *prometheus.Desc
metadataCacheFlushes *prometheus.Desc
metadataCacheQueriesTotal *prometheus.Desc
metadataCacheHitsTotal *prometheus.Desc
metadataCachedTotal *prometheus.Desc
metadataFlushedTotal *prometheus.Desc
outputCacheActiveFlushedItems *prometheus.Desc
outputCacheItems *prometheus.Desc
outputCacheMemoryUsage *prometheus.Desc
outputCacheQueriesTotal *prometheus.Desc
outputCacheHitsTotal *prometheus.Desc
outputCacheFlushedItemsTotal *prometheus.Desc
outputCacheFlushesTotal *prometheus.Desc
// IIS 8+ Only
requestErrorsTotal *prometheus.Desc
webSocketRequestsActive *prometheus.Desc
webSocketConnectionAttempts *prometheus.Desc
webSocketConnectionsAccepted *prometheus.Desc
webSocketConnectionsRejected *prometheus.Desc
}
var workerProcessNameExtractor = regexp.MustCompile(`^(\d+)_(.+)$`)
const (
Threads = "Active Threads Count"
MaximumThreads = "Maximum Threads Count"
RequestsTotal = "Total HTTP Requests Served"
RequestsActive = "Active Requests"
ActiveFlushedEntries = "Active Flushed Entries"
CurrentFileCacheMemoryUsage = "Current File Cache Memory Usage"
MaximumFileCacheMemoryUsage = "Maximum File Cache Memory Usage"
FileCacheFlushesTotal = "File Cache Flushes"
FileCacheHitsTotal = "File Cache Hits"
FileCacheMissesTotal = "File Cache Misses"
FilesCached = "Current Files Cached"
FilesCachedTotal = "Total Files Cached"
FilesFlushedTotal = "Total Flushed Files"
URICacheFlushesTotal = "Total Flushed URIs"
URICacheFlushesTotalKernel = "Total Flushed URIs"
URIsFlushedTotalKernel = "Kernel: Total Flushed URIs"
URICacheHitsTotal = "URI Cache Hits"
URICacheHitsTotalKernel = "Kernel: URI Cache Hits"
URICacheMissesTotal = "URI Cache Misses"
URICacheMissesTotalKernel = "Kernel: URI Cache Misses"
URIsCached = "Current URIs Cached"
URIsCachedKernel = "Kernel: Current URIs Cached"
URIsCachedTotal = "Total URIs Cached"
URIsCachedTotalKernel = "Total URIs Cached"
URIsFlushedTotal = "Total Flushed URIs"
MetaDataCacheHits = "Metadata Cache Hits"
MetaDataCacheMisses = "Metadata Cache Misses"
MetadataCached = "Current Metadata Cached"
MetadataCacheFlushes = "Metadata Cache Flushes"
MetadataCachedTotal = "Total Metadata Cached"
MetadataFlushedTotal = "Total Flushed Metadata"
OutputCacheActiveFlushedItems = "Output Cache Current Flushed Items"
OutputCacheItems = "Output Cache Current Items"
OutputCacheMemoryUsage = "Output Cache Current Memory Usage"
OutputCacheHitsTotal = "Output Cache Total Hits"
OutputCacheMissesTotal = "Output Cache Total Misses"
OutputCacheFlushedItemsTotal = "Output Cache Total Flushed Items"
OutputCacheFlushesTotal = "Output Cache Total Flushes"
// IIS8
RequestErrors500 = "% 500 HTTP Response Sent"
RequestErrors503 = "% 503 HTTP Response Sent"
RequestErrors404 = "% 404 HTTP Response Sent"
RequestErrors403 = "% 403 HTTP Response Sent"
RequestErrors401 = "% 401 HTTP Response Sent"
WebSocketRequestsActive = "WebSocket Active Requests"
WebSocketConnectionAttempts = "WebSocket Connection Attempts / Sec"
WebSocketConnectionsAccepted = "WebSocket Connections Accepted / Sec"
WebSocketConnectionsRejected = "WebSocket Connections Rejected / Sec"
)
func (c *Collector) buildW3SVCW3WP() error {
counters := []string{
Threads,
MaximumThreads,
RequestsTotal,
RequestsActive,
ActiveFlushedEntries,
CurrentFileCacheMemoryUsage,
MaximumFileCacheMemoryUsage,
FileCacheFlushesTotal,
FileCacheHitsTotal,
FileCacheMissesTotal,
FilesCached,
FilesCachedTotal,
FilesFlushedTotal,
URICacheFlushesTotal,
URICacheFlushesTotalKernel,
URIsFlushedTotalKernel,
URICacheHitsTotal,
URICacheHitsTotalKernel,
URICacheMissesTotal,
URICacheMissesTotalKernel,
URIsCached,
URIsCachedKernel,
URIsCachedTotal,
URIsCachedTotalKernel,
URIsFlushedTotal,
MetaDataCacheHits,
MetaDataCacheMisses,
MetadataCached,
MetadataCacheFlushes,
MetadataCachedTotal,
MetadataFlushedTotal,
OutputCacheActiveFlushedItems,
OutputCacheItems,
OutputCacheMemoryUsage,
OutputCacheHitsTotal,
OutputCacheMissesTotal,
OutputCacheFlushedItemsTotal,
OutputCacheFlushesTotal,
}
if c.iisVersion.major >= 8 {
counters = append(counters, []string{
RequestErrors500,
RequestErrors503,
RequestErrors404,
RequestErrors403,
RequestErrors401,
WebSocketRequestsActive,
WebSocketConnectionAttempts,
WebSocketConnectionsAccepted,
WebSocketConnectionsRejected,
}...)
}
var err error
c.perfDataCollectorW3SVCW3WP, err = perfdata.NewCollector("W3SVC_W3WP", perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create W3SVC_W3WP collector: %w", err)
}
// W3SVC_W3WP
c.threads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_threads"),
"Number of threads actively processing requests in the worker process",
[]string{"app", "pid", "state"},
nil,
)
c.maximumThreads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_max_threads"),
"Maximum number of threads to which the thread pool can grow as needed",
[]string{"app", "pid"},
nil,
)
c.requestsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_requests_total"),
"Total number of HTTP requests served by the worker process",
[]string{"app", "pid"},
nil,
)
c.requestsActive = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_current_requests"),
"Current number of requests being processed by the worker process",
[]string{"app", "pid"},
nil,
)
c.activeFlushedEntries = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_cache_active_flushed_entries"),
"Number of file handles cached in user-mode that will be closed when all current transfers complete.",
[]string{"app", "pid"},
nil,
)
c.currentFileCacheMemoryUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_file_cache_memory_bytes"),
"Current number of bytes used by user-mode file cache",
[]string{"app", "pid"},
nil,
)
c.maximumFileCacheMemoryUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_file_cache_max_memory_bytes"),
"Maximum number of bytes used by user-mode file cache",
[]string{"app", "pid"},
nil,
)
c.fileCacheFlushesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_file_cache_flushes_total"),
"Total number of files removed from the user-mode cache",
[]string{"app", "pid"},
nil,
)
c.fileCacheQueriesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_file_cache_queries_total"),
"Total file cache queries (hits + misses)",
[]string{"app", "pid"},
nil,
)
c.fileCacheHitsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_file_cache_hits_total"),
"Total number of successful lookups in the user-mode file cache",
[]string{"app", "pid"},
nil,
)
c.filesCached = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_file_cache_items"),
"Current number of files whose contents are present in user-mode cache",
[]string{"app", "pid"},
nil,
)
c.filesCachedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_file_cache_items_total"),
"Total number of files whose contents were ever added to the user-mode cache (since service startup)",
[]string{"app", "pid"},
nil,
)
c.filesFlushedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_file_cache_items_flushed_total"),
"Total number of file handles that have been removed from the user-mode cache (since service startup)",
[]string{"app", "pid"},
nil,
)
c.uriCacheFlushesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_uri_cache_flushes_total"),
"Total number of URI cache flushes (since service startup)",
[]string{"app", "pid"},
nil,
)
c.uriCacheQueriesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_uri_cache_queries_total"),
"Total number of uri cache queries (hits + misses)",
[]string{"app", "pid"},
nil,
)
c.uriCacheHitsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_uri_cache_hits_total"),
"Total number of successful lookups in the user-mode URI cache (since service startup)",
[]string{"app", "pid"},
nil,
)
c.urisCached = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_uri_cache_items"),
"Number of URI information blocks currently in the user-mode cache",
[]string{"app", "pid"},
nil,
)
c.urisCachedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_uri_cache_items_total"),
"Total number of URI information blocks added to the user-mode cache (since service startup)",
[]string{"app", "pid"},
nil,
)
c.urisFlushedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_uri_cache_items_flushed_total"),
"The number of URI information blocks that have been removed from the user-mode cache (since service startup)",
[]string{"app", "pid"},
nil,
)
c.metadataCached = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_metadata_cache_items"),
"Number of metadata information blocks currently present in user-mode cache",
[]string{"app", "pid"},
nil,
)
c.metadataCacheFlushes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_metadata_cache_flushes_total"),
"Total number of user-mode metadata cache flushes (since service startup)",
[]string{"app", "pid"},
nil,
)
c.metadataCacheQueriesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_metadata_cache_queries_total"),
"Total metadata cache queries (hits + misses)",
[]string{"app", "pid"},
nil,
)
c.metadataCacheHitsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_metadata_cache_hits_total"),
"Total number of successful lookups in the user-mode metadata cache (since service startup)",
[]string{"app", "pid"},
nil,
)
c.metadataCachedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_metadata_cache_items_cached_total"),
"Total number of metadata information blocks added to the user-mode cache (since service startup)",
[]string{"app", "pid"},
nil,
)
c.metadataFlushedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_metadata_cache_items_flushed_total"),
"Total number of metadata information blocks removed from the user-mode cache (since service startup)",
[]string{"app", "pid"},
nil,
)
c.outputCacheActiveFlushedItems = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_output_cache_active_flushed_items"),
"",
[]string{"app", "pid"},
nil,
)
c.outputCacheItems = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_output_cache_items"),
"Number of items current present in output cache",
[]string{"app", "pid"},
nil,
)
c.outputCacheMemoryUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_output_cache_memory_bytes"),
"Current number of bytes used by output cache",
[]string{"app", "pid"},
nil,
)
c.outputCacheQueriesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_output_queries_total"),
"Total number of output cache queries (hits + misses)",
[]string{"app", "pid"},
nil,
)
c.outputCacheHitsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_output_cache_hits_total"),
"Total number of successful lookups in output cache (since service startup)",
[]string{"app", "pid"},
nil,
)
c.outputCacheFlushedItemsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_output_cache_items_flushed_total"),
"Total number of items flushed from output cache (since service startup)",
[]string{"app", "pid"},
nil,
)
c.outputCacheFlushesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_output_cache_flushes_total"),
"Total number of flushes of output cache (since service startup)",
[]string{"app", "pid"},
nil,
)
// W3SVC_W3WP_IIS8
c.requestErrorsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_request_errors_total"),
"Total number of requests that returned an error",
[]string{"app", "pid", "status_code"},
nil,
)
c.webSocketRequestsActive = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_current_websocket_requests"),
"",
[]string{"app", "pid"},
nil,
)
c.webSocketConnectionAttempts = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_websocket_connection_attempts_total"),
"",
[]string{"app", "pid"},
nil,
)
c.webSocketConnectionsAccepted = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_websocket_connection_accepted_total"),
"",
[]string{"app", "pid"},
nil,
)
c.webSocketConnectionsRejected = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "worker_websocket_connection_rejected_total"),
"",
[]string{"app", "pid"},
nil,
)
return nil
}
func (c *Collector) collectW3SVCW3WP(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorW3SVCW3WP.Collect()
if err != nil {
return fmt.Errorf("failed to collect APP_POOL_WAS metrics: %w", err)
}
deduplicateIISNames(perfData)
for name, app := range perfData {
if c.config.AppExclude.MatchString(name) || !c.config.AppInclude.MatchString(name) {
continue
}
// Extract the apppool name from the format <PID>_<NAME>
pid := workerProcessNameExtractor.ReplaceAllString(name, "$1")
name := workerProcessNameExtractor.ReplaceAllString(name, "$2")
if name == "" || name == "_Total" ||
c.config.AppExclude.MatchString(name) ||
!c.config.AppInclude.MatchString(name) {
continue
}
// Duplicate instances are suffixed # with an index number. These should be ignored
if strings.Contains(name, "#") {
continue
}
ch <- prometheus.MustNewConstMetric(
c.threads,
prometheus.GaugeValue,
app[Threads].FirstValue,
name,
pid,
"busy",
)
ch <- prometheus.MustNewConstMetric(
c.maximumThreads,
prometheus.CounterValue,
app[MaximumThreads].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.requestsTotal,
prometheus.CounterValue,
app[RequestsTotal].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.requestsActive,
prometheus.CounterValue,
app[RequestsActive].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.activeFlushedEntries,
prometheus.GaugeValue,
app[ActiveFlushedEntries].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.currentFileCacheMemoryUsage,
prometheus.GaugeValue,
app[CurrentFileCacheMemoryUsage].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.maximumFileCacheMemoryUsage,
prometheus.CounterValue,
app[MaximumFileCacheMemoryUsage].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.fileCacheFlushesTotal,
prometheus.CounterValue,
app[FileCacheFlushesTotal].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.fileCacheQueriesTotal,
prometheus.CounterValue,
app[FileCacheHitsTotal].FirstValue+app[FileCacheMissesTotal].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.fileCacheHitsTotal,
prometheus.CounterValue,
app[FileCacheHitsTotal].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.filesCached,
prometheus.GaugeValue,
app[FilesCached].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.filesCachedTotal,
prometheus.CounterValue,
app[FilesCachedTotal].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.filesFlushedTotal,
prometheus.CounterValue,
app[FilesFlushedTotal].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.uriCacheFlushesTotal,
prometheus.CounterValue,
app[URICacheFlushesTotal].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.uriCacheQueriesTotal,
prometheus.CounterValue,
app[URICacheHitsTotal].FirstValue+app[URICacheMissesTotal].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.uriCacheHitsTotal,
prometheus.CounterValue,
app[URICacheHitsTotal].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.urisCached,
prometheus.GaugeValue,
app[URIsCached].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.urisCachedTotal,
prometheus.CounterValue,
app[URIsCachedTotal].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.urisFlushedTotal,
prometheus.CounterValue,
app[URIsFlushedTotal].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.metadataCached,
prometheus.GaugeValue,
app[MetadataCached].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.metadataCacheFlushes,
prometheus.CounterValue,
app[MetadataCacheFlushes].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.metadataCacheQueriesTotal,
prometheus.CounterValue,
app[MetaDataCacheHits].FirstValue+app[MetaDataCacheMisses].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.metadataCacheHitsTotal,
prometheus.CounterValue,
app[MetaDataCacheHits].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.metadataCachedTotal,
prometheus.CounterValue,
app[MetadataCachedTotal].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.metadataFlushedTotal,
prometheus.CounterValue,
app[MetadataFlushedTotal].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.outputCacheActiveFlushedItems,
prometheus.CounterValue,
app[OutputCacheActiveFlushedItems].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.outputCacheItems,
prometheus.CounterValue,
app[OutputCacheItems].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.outputCacheMemoryUsage,
prometheus.CounterValue,
app[OutputCacheMemoryUsage].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.outputCacheQueriesTotal,
prometheus.CounterValue,
app[OutputCacheHitsTotal].FirstValue+app[OutputCacheMissesTotal].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.outputCacheHitsTotal,
prometheus.CounterValue,
app[OutputCacheHitsTotal].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.outputCacheFlushedItemsTotal,
prometheus.CounterValue,
app[OutputCacheFlushedItemsTotal].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.outputCacheFlushesTotal,
prometheus.CounterValue,
app[OutputCacheFlushesTotal].FirstValue,
name,
pid,
)
if c.iisVersion.major >= 8 {
ch <- prometheus.MustNewConstMetric(
c.requestErrorsTotal,
prometheus.CounterValue,
app[RequestErrors401].FirstValue,
name,
pid,
"401",
)
ch <- prometheus.MustNewConstMetric(
c.requestErrorsTotal,
prometheus.CounterValue,
app[RequestErrors403].FirstValue,
name,
pid,
"403",
)
ch <- prometheus.MustNewConstMetric(
c.requestErrorsTotal,
prometheus.CounterValue,
app[RequestErrors404].FirstValue,
name,
pid,
"404",
)
ch <- prometheus.MustNewConstMetric(
c.requestErrorsTotal,
prometheus.CounterValue,
app[RequestErrors500].FirstValue,
name,
pid,
"500",
)
ch <- prometheus.MustNewConstMetric(
c.requestErrorsTotal,
prometheus.CounterValue,
app[RequestErrors503].FirstValue,
name,
pid,
"503",
)
ch <- prometheus.MustNewConstMetric(
c.webSocketRequestsActive,
prometheus.CounterValue,
app[WebSocketRequestsActive].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.webSocketConnectionAttempts,
prometheus.CounterValue,
app[WebSocketConnectionAttempts].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.webSocketConnectionsAccepted,
prometheus.CounterValue,
app[WebSocketConnectionsAccepted].FirstValue,
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.webSocketConnectionsRejected,
prometheus.CounterValue,
app[WebSocketConnectionsRejected].FirstValue,
name,
pid,
)
}
}
return nil
}

View File

@ -0,0 +1,516 @@
//go:build windows
package iis
import (
"fmt"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorWebService struct {
perfDataCollectorWebService *perfdata.Collector
currentAnonymousUsers *prometheus.Desc
currentBlockedAsyncIORequests *prometheus.Desc
currentCGIRequests *prometheus.Desc
currentConnections *prometheus.Desc
currentISAPIExtensionRequests *prometheus.Desc
currentNonAnonymousUsers *prometheus.Desc
serviceUptime *prometheus.Desc
totalBytesReceived *prometheus.Desc
totalBytesSent *prometheus.Desc
totalAnonymousUsers *prometheus.Desc
totalBlockedAsyncIORequests *prometheus.Desc
totalCGIRequests *prometheus.Desc
totalConnectionAttemptsAllInstances *prometheus.Desc
totalRequests *prometheus.Desc
totalFilesReceived *prometheus.Desc
totalFilesSent *prometheus.Desc
totalISAPIExtensionRequests *prometheus.Desc
totalLockedErrors *prometheus.Desc
totalLogonAttempts *prometheus.Desc
totalNonAnonymousUsers *prometheus.Desc
totalNotFoundErrors *prometheus.Desc
totalRejectedAsyncIORequests *prometheus.Desc
}
const (
CurrentAnonymousUsers = "Current Anonymous Users"
CurrentBlockedAsyncIORequests = "Current Blocked Async I/O Requests"
CurrentCGIRequests = "Current CGI Requests"
CurrentConnections = "Current Connections"
CurrentISAPIExtensionRequests = "Current ISAPI Extension Requests"
CurrentNonAnonymousUsers = "Current NonAnonymous Users"
ServiceUptime = "Service Uptime"
TotalBytesReceived = "Total Bytes Received"
TotalBytesSent = "Total Bytes Sent"
TotalAnonymousUsers = "Total Anonymous Users"
TotalBlockedAsyncIORequests = "Total Blocked Async I/O Requests"
TotalCGIRequests = "Total CGI Requests"
TotalConnectionAttemptsAllInstances = "Total Connection Attempts (all instances)"
TotalFilesReceived = "Total Files Received"
TotalFilesSent = "Total Files Sent"
TotalISAPIExtensionRequests = "Total ISAPI Extension Requests"
TotalLockedErrors = "Total Locked Errors"
TotalLogonAttempts = "Total Logon Attempts"
TotalNonAnonymousUsers = "Total NonAnonymous Users"
TotalNotFoundErrors = "Total Not Found Errors"
TotalRejectedAsyncIORequests = "Total Rejected Async I/O Requests"
TotalCopyRequests = "Total Copy Requests"
TotalDeleteRequests = "Total Delete Requests"
TotalGetRequests = "Total Get Requests"
TotalHeadRequests = "Total Head Requests"
TotalLockRequests = "Total Lock Requests"
TotalMkcolRequests = "Total Mkcol Requests"
TotalMoveRequests = "Total Move Requests"
TotalOptionsRequests = "Total Options Requests"
TotalOtherRequests = "Total Other Request Methods"
TotalPostRequests = "Total Post Requests"
TotalPropfindRequests = "Total Propfind Requests"
TotalProppatchRequests = "Total Proppatch Requests"
TotalPutRequests = "Total Put Requests"
TotalSearchRequests = "Total Search Requests"
TotalTraceRequests = "Total Trace Requests"
TotalUnlockRequests = "Total Unlock Requests"
)
func (c *Collector) buildWebService() error {
var err error
c.perfDataCollectorWebService, err = perfdata.NewCollector("Web Service", perfdata.InstanceAll, []string{
CurrentAnonymousUsers,
CurrentBlockedAsyncIORequests,
CurrentCGIRequests,
CurrentConnections,
CurrentISAPIExtensionRequests,
CurrentNonAnonymousUsers,
ServiceUptime,
TotalBytesReceived,
TotalBytesSent,
TotalAnonymousUsers,
TotalBlockedAsyncIORequests,
TotalCGIRequests,
TotalConnectionAttemptsAllInstances,
TotalFilesReceived,
TotalFilesSent,
TotalISAPIExtensionRequests,
TotalLockedErrors,
TotalLogonAttempts,
TotalNonAnonymousUsers,
TotalNotFoundErrors,
TotalRejectedAsyncIORequests,
TotalCopyRequests,
TotalDeleteRequests,
TotalGetRequests,
TotalHeadRequests,
TotalLockRequests,
TotalMkcolRequests,
TotalMoveRequests,
TotalOptionsRequests,
TotalOtherRequests,
TotalPostRequests,
TotalPropfindRequests,
TotalProppatchRequests,
TotalPutRequests,
TotalSearchRequests,
TotalTraceRequests,
TotalUnlockRequests,
})
if err != nil {
return fmt.Errorf("failed to create Web Service collector: %w", err)
}
c.currentAnonymousUsers = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_anonymous_users"),
"Number of users who currently have an anonymous connection using the Web service (WebService.CurrentAnonymousUsers)",
[]string{"site"},
nil,
)
c.currentBlockedAsyncIORequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_blocked_async_io_requests"),
"Current requests temporarily blocked due to bandwidth throttling settings (WebService.CurrentBlockedAsyncIORequests)",
[]string{"site"},
nil,
)
c.currentCGIRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_cgi_requests"),
"Current number of CGI requests being simultaneously processed by the Web service (WebService.CurrentCGIRequests)",
[]string{"site"},
nil,
)
c.currentConnections = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_connections"),
"Current number of connections established with the Web service (WebService.CurrentConnections)",
[]string{"site"},
nil,
)
c.currentISAPIExtensionRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_isapi_extension_requests"),
"Current number of ISAPI requests being simultaneously processed by the Web service (WebService.CurrentISAPIExtensionRequests)",
[]string{"site"},
nil,
)
c.currentNonAnonymousUsers = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "current_non_anonymous_users"),
"Number of users who currently have a non-anonymous connection using the Web service (WebService.CurrentNonAnonymousUsers)",
[]string{"site"},
nil,
)
c.serviceUptime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "service_uptime"),
"Number of seconds the WebService is up (WebService.ServiceUptime)",
[]string{"site"},
nil,
)
c.totalBytesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "received_bytes_total"),
"Number of data bytes that have been received by the Web service (WebService.TotalBytesReceived)",
[]string{"site"},
nil,
)
c.totalBytesSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sent_bytes_total"),
"Number of data bytes that have been sent by the Web service (WebService.TotalBytesSent)",
[]string{"site"},
nil,
)
c.totalAnonymousUsers = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "anonymous_users_total"),
"Total number of users who established an anonymous connection with the Web service (WebService.TotalAnonymousUsers)",
[]string{"site"},
nil,
)
c.totalBlockedAsyncIORequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "blocked_async_io_requests_total"),
"Total requests temporarily blocked due to bandwidth throttling settings (WebService.TotalBlockedAsyncIORequests)",
[]string{"site"},
nil,
)
c.totalCGIRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "cgi_requests_total"),
"Total CGI requests is the total number of CGI requests (WebService.TotalCGIRequests)",
[]string{"site"},
nil,
)
c.totalConnectionAttemptsAllInstances = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "connection_attempts_all_instances_total"),
"Number of connections that have been attempted using the Web service (WebService.TotalConnectionAttemptsAllInstances)",
[]string{"site"},
nil,
)
c.totalRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "requests_total"),
"Number of HTTP requests (WebService.TotalRequests)",
[]string{"site", "method"},
nil,
)
c.totalFilesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "files_received_total"),
"Number of files received by the Web service (WebService.TotalFilesReceived)",
[]string{"site"},
nil,
)
c.totalFilesSent = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "files_sent_total"),
"Number of files sent by the Web service (WebService.TotalFilesSent)",
[]string{"site"},
nil,
)
c.totalISAPIExtensionRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "ipapi_extension_requests_total"),
"ISAPI Extension Requests received (WebService.TotalISAPIExtensionRequests)",
[]string{"site"},
nil,
)
c.totalLockedErrors = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "locked_errors_total"),
"Number of requests that couldn't be satisfied by the server because the requested resource was locked (WebService.TotalLockedErrors)",
[]string{"site"},
nil,
)
c.totalLogonAttempts = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "logon_attempts_total"),
"Number of logons attempts to the Web Service (WebService.TotalLogonAttempts)",
[]string{"site"},
nil,
)
c.totalNonAnonymousUsers = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "non_anonymous_users_total"),
"Number of users who established a non-anonymous connection with the Web service (WebService.TotalNonAnonymousUsers)",
[]string{"site"},
nil,
)
c.totalNotFoundErrors = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "not_found_errors_total"),
"Number of requests that couldn't be satisfied by the server because the requested document could not be found (WebService.TotalNotFoundErrors)",
[]string{"site"},
nil,
)
c.totalRejectedAsyncIORequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "rejected_async_io_requests_total"),
"Requests rejected due to bandwidth throttling settings (WebService.TotalRejectedAsyncIORequests)",
[]string{"site"},
nil,
)
return nil
}
func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorWebService.Collect()
if err != nil {
return fmt.Errorf("failed to collect Web Service metrics: %w", err)
}
deduplicateIISNames(perfData)
for name, app := range perfData {
if c.config.SiteExclude.MatchString(name) || !c.config.SiteInclude.MatchString(name) {
continue
}
ch <- prometheus.MustNewConstMetric(
c.currentAnonymousUsers,
prometheus.GaugeValue,
app[CurrentAnonymousUsers].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.currentBlockedAsyncIORequests,
prometheus.GaugeValue,
app[CurrentBlockedAsyncIORequests].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.currentCGIRequests,
prometheus.GaugeValue,
app[CurrentCGIRequests].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.currentConnections,
prometheus.GaugeValue,
app[CurrentConnections].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.currentISAPIExtensionRequests,
prometheus.GaugeValue,
app[CurrentISAPIExtensionRequests].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.currentNonAnonymousUsers,
prometheus.GaugeValue,
app[CurrentNonAnonymousUsers].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.serviceUptime,
prometheus.GaugeValue,
app[ServiceUptime].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalBytesReceived,
prometheus.CounterValue,
app[TotalBytesReceived].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalBytesSent,
prometheus.CounterValue,
app[TotalBytesSent].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalAnonymousUsers,
prometheus.CounterValue,
app[TotalAnonymousUsers].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalBlockedAsyncIORequests,
prometheus.CounterValue,
app[TotalBlockedAsyncIORequests].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalCGIRequests,
prometheus.CounterValue,
app[TotalCGIRequests].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalConnectionAttemptsAllInstances,
prometheus.CounterValue,
app[TotalConnectionAttemptsAllInstances].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalFilesReceived,
prometheus.CounterValue,
app[TotalFilesReceived].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalFilesSent,
prometheus.CounterValue,
app[TotalFilesSent].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalISAPIExtensionRequests,
prometheus.CounterValue,
app[TotalISAPIExtensionRequests].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalLockedErrors,
prometheus.CounterValue,
app[TotalLockedErrors].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalLogonAttempts,
prometheus.CounterValue,
app[TotalLogonAttempts].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalNonAnonymousUsers,
prometheus.CounterValue,
app[TotalNonAnonymousUsers].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalNotFoundErrors,
prometheus.CounterValue,
app[TotalNotFoundErrors].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalRejectedAsyncIORequests,
prometheus.CounterValue,
app[TotalRejectedAsyncIORequests].FirstValue,
name,
)
ch <- prometheus.MustNewConstMetric(
c.totalRequests,
prometheus.CounterValue,
app[TotalOtherRequests].FirstValue,
name,
"other",
)
ch <- prometheus.MustNewConstMetric(
c.totalRequests,
prometheus.CounterValue,
app[TotalCopyRequests].FirstValue,
name,
"COPY",
)
ch <- prometheus.MustNewConstMetric(
c.totalRequests,
prometheus.CounterValue,
app[TotalDeleteRequests].FirstValue,
name,
"DELETE",
)
ch <- prometheus.MustNewConstMetric(
c.totalRequests,
prometheus.CounterValue,
app[TotalGetRequests].FirstValue,
name,
"GET",
)
ch <- prometheus.MustNewConstMetric(
c.totalRequests,
prometheus.CounterValue,
app[TotalHeadRequests].FirstValue,
name,
"HEAD",
)
ch <- prometheus.MustNewConstMetric(
c.totalRequests,
prometheus.CounterValue,
app[TotalLockRequests].FirstValue,
name,
"LOCK",
)
ch <- prometheus.MustNewConstMetric(
c.totalRequests,
prometheus.CounterValue,
app[TotalMkcolRequests].FirstValue,
name,
"MKCOL",
)
ch <- prometheus.MustNewConstMetric(
c.totalRequests,
prometheus.CounterValue,
app[TotalMoveRequests].FirstValue,
name,
"MOVE",
)
ch <- prometheus.MustNewConstMetric(
c.totalRequests,
prometheus.CounterValue,
app[TotalOptionsRequests].FirstValue,
name,
"OPTIONS",
)
ch <- prometheus.MustNewConstMetric(
c.totalRequests,
prometheus.CounterValue,
app[TotalPostRequests].FirstValue,
name,
"POST",
)
ch <- prometheus.MustNewConstMetric(
c.totalRequests,
prometheus.CounterValue,
app[TotalPropfindRequests].FirstValue,
name,
"PROPFIND",
)
ch <- prometheus.MustNewConstMetric(
c.totalRequests,
prometheus.CounterValue,
app[TotalProppatchRequests].FirstValue,
name,
"PROPPATCH",
)
ch <- prometheus.MustNewConstMetric(
c.totalRequests,
prometheus.CounterValue,
app[TotalPutRequests].FirstValue,
name,
"PUT",
)
ch <- prometheus.MustNewConstMetric(
c.totalRequests,
prometheus.CounterValue,
app[TotalSearchRequests].FirstValue,
name,
"SEARCH",
)
ch <- prometheus.MustNewConstMetric(
c.totalRequests,
prometheus.CounterValue,
app[TotalTraceRequests].FirstValue,
name,
"TRACE",
)
ch <- prometheus.MustNewConstMetric(
c.totalRequests,
prometheus.CounterValue,
app[TotalUnlockRequests].FirstValue,
name,
"UNLOCK",
)
}
return nil
}

View File

@ -0,0 +1,501 @@
//go:build windows
package iis
import (
"fmt"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorWebServiceCache struct {
perfDataCollectorWebServiceCache *perfdata.Collector
serviceCacheActiveFlushedEntries *prometheus.Desc
serviceCacheCurrentFileCacheMemoryUsage *prometheus.Desc
serviceCacheMaximumFileCacheMemoryUsage *prometheus.Desc
serviceCacheFileCacheFlushesTotal *prometheus.Desc
serviceCacheFileCacheQueriesTotal *prometheus.Desc
serviceCacheFileCacheHitsTotal *prometheus.Desc
serviceCacheFilesCached *prometheus.Desc
serviceCacheFilesCachedTotal *prometheus.Desc
serviceCacheFilesFlushedTotal *prometheus.Desc
serviceCacheURICacheFlushesTotal *prometheus.Desc
serviceCacheURICacheQueriesTotal *prometheus.Desc
serviceCacheURICacheHitsTotal *prometheus.Desc
serviceCacheURIsCached *prometheus.Desc
serviceCacheURIsCachedTotal *prometheus.Desc
serviceCacheURIsFlushedTotal *prometheus.Desc
serviceCacheMetadataCached *prometheus.Desc
serviceCacheMetadataCacheFlushes *prometheus.Desc
serviceCacheMetadataCacheQueriesTotal *prometheus.Desc
serviceCacheMetadataCacheHitsTotal *prometheus.Desc
serviceCacheMetadataCachedTotal *prometheus.Desc
serviceCacheMetadataFlushedTotal *prometheus.Desc
serviceCacheOutputCacheActiveFlushedItems *prometheus.Desc
serviceCacheOutputCacheItems *prometheus.Desc
serviceCacheOutputCacheMemoryUsage *prometheus.Desc
serviceCacheOutputCacheQueriesTotal *prometheus.Desc
serviceCacheOutputCacheHitsTotal *prometheus.Desc
serviceCacheOutputCacheFlushedItemsTotal *prometheus.Desc
serviceCacheOutputCacheFlushesTotal *prometheus.Desc
}
const (
ServiceCacheActiveFlushedEntries = "Active Flushed Entries"
ServiceCacheCurrentFileCacheMemoryUsage = "Current File Cache Memory Usage"
ServiceCacheMaximumFileCacheMemoryUsage = "Maximum File Cache Memory Usage"
ServiceCacheFileCacheFlushesTotal = "File Cache Flushes"
ServiceCacheFileCacheHitsTotal = "File Cache Hits"
ServiceCacheFileCacheMissesTotal = "File Cache Misses"
ServiceCacheFilesCached = "Current Files Cached"
ServiceCacheFilesCachedTotal = "Total Files Cached"
ServiceCacheFilesFlushedTotal = "Total Flushed Files"
ServiceCacheURICacheFlushesTotal = "Total Flushed URIs"
ServiceCacheURICacheFlushesTotalKernel = "Total Flushed URIs"
ServiceCacheURIsFlushedTotalKernel = "Kernel: Total Flushed URIs"
ServiceCacheURICacheHitsTotal = "URI Cache Hits"
ServiceCacheURICacheHitsTotalKernel = "Kernel: URI Cache Hits"
ServiceCacheURICacheMissesTotal = "URI Cache Misses"
ServiceCacheURICacheMissesTotalKernel = "Kernel: URI Cache Misses"
ServiceCacheURIsCached = "Current URIs Cached"
ServiceCacheURIsCachedKernel = "Kernel: Current URIs Cached"
ServiceCacheURIsCachedTotal = "Total URIs Cached"
ServiceCacheURIsCachedTotalKernel = "Total URIs Cached"
ServiceCacheURIsFlushedTotal = "Total Flushed URIs"
ServiceCacheMetaDataCacheHits = "Metadata Cache Hits"
ServiceCacheMetaDataCacheMisses = "Metadata Cache Misses"
ServiceCacheMetadataCached = "Current Metadata Cached"
ServiceCacheMetadataCacheFlushes = "Metadata Cache Flushes"
ServiceCacheMetadataCachedTotal = "Total Metadata Cached"
ServiceCacheMetadataFlushedTotal = "Total Flushed Metadata"
ServiceCacheOutputCacheActiveFlushedItems = "Output Cache Current Flushed Items"
ServiceCacheOutputCacheItems = "Output Cache Current Items"
ServiceCacheOutputCacheMemoryUsage = "Output Cache Current Memory Usage"
ServiceCacheOutputCacheHitsTotal = "Output Cache Total Hits"
ServiceCacheOutputCacheMissesTotal = "Output Cache Total Misses"
ServiceCacheOutputCacheFlushedItemsTotal = "Output Cache Total Flushed Items"
ServiceCacheOutputCacheFlushesTotal = "Output Cache Total Flushes"
)
func (c *Collector) buildWebServiceCache() error {
var err error
c.perfDataCollectorWebService, err = perfdata.NewCollector("Web Service Cache", perfdata.InstanceAll, []string{
ServiceCacheActiveFlushedEntries,
ServiceCacheCurrentFileCacheMemoryUsage,
ServiceCacheMaximumFileCacheMemoryUsage,
ServiceCacheFileCacheFlushesTotal,
ServiceCacheFileCacheHitsTotal,
ServiceCacheFileCacheMissesTotal,
ServiceCacheFilesCached,
ServiceCacheFilesCachedTotal,
ServiceCacheFilesFlushedTotal,
ServiceCacheURICacheFlushesTotal,
ServiceCacheURICacheFlushesTotalKernel,
ServiceCacheURIsFlushedTotalKernel,
ServiceCacheURICacheHitsTotal,
ServiceCacheURICacheHitsTotalKernel,
ServiceCacheURICacheMissesTotal,
ServiceCacheURICacheMissesTotalKernel,
ServiceCacheURIsCached,
ServiceCacheURIsCachedKernel,
ServiceCacheURIsCachedTotal,
ServiceCacheURIsCachedTotalKernel,
ServiceCacheURIsFlushedTotal,
ServiceCacheMetaDataCacheHits,
ServiceCacheMetaDataCacheMisses,
ServiceCacheMetadataCached,
ServiceCacheMetadataCacheFlushes,
ServiceCacheMetadataCachedTotal,
ServiceCacheMetadataFlushedTotal,
ServiceCacheOutputCacheActiveFlushedItems,
ServiceCacheOutputCacheItems,
ServiceCacheOutputCacheMemoryUsage,
ServiceCacheOutputCacheHitsTotal,
ServiceCacheOutputCacheMissesTotal,
ServiceCacheOutputCacheFlushedItemsTotal,
ServiceCacheOutputCacheFlushesTotal,
})
if err != nil {
return fmt.Errorf("failed to create Web Service Cache collector: %w", err)
}
// Web Service Cache
c.serviceCacheActiveFlushedEntries = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_cache_active_flushed_entries"),
"Number of file handles cached that will be closed when all current transfers complete.",
nil,
nil,
)
c.serviceCacheCurrentFileCacheMemoryUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_file_cache_memory_bytes"),
"Current number of bytes used by file cache",
nil,
nil,
)
c.serviceCacheMaximumFileCacheMemoryUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_file_cache_max_memory_bytes"),
"Maximum number of bytes used by file cache",
nil,
nil,
)
c.serviceCacheFileCacheFlushesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_file_cache_flushes_total"),
"Total number of file cache flushes (since service startup)",
nil,
nil,
)
c.serviceCacheFileCacheQueriesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_file_cache_queries_total"),
"Total number of file cache queries (hits + misses)",
nil,
nil,
)
c.serviceCacheFileCacheHitsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_file_cache_hits_total"),
"Total number of successful lookups in the user-mode file cache",
nil,
nil,
)
c.serviceCacheFilesCached = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_file_cache_items"),
"Current number of files whose contents are present in cache",
nil,
nil,
)
c.serviceCacheFilesCachedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_file_cache_items_total"),
"Total number of files whose contents were ever added to the cache (since service startup)",
nil,
nil,
)
c.serviceCacheFilesFlushedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_file_cache_items_flushed_total"),
"Total number of file handles that have been removed from the cache (since service startup)",
nil,
nil,
)
c.serviceCacheURICacheFlushesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_uri_cache_flushes_total"),
"Total number of URI cache flushes (since service startup)",
[]string{"mode"},
nil,
)
c.serviceCacheURICacheQueriesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_uri_cache_queries_total"),
"Total number of uri cache queries (hits + misses)",
[]string{"mode"},
nil,
)
c.serviceCacheURICacheHitsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_uri_cache_hits_total"),
"Total number of successful lookups in the URI cache (since service startup)",
[]string{"mode"},
nil,
)
c.serviceCacheURIsCached = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_uri_cache_items"),
"Number of URI information blocks currently in the cache",
[]string{"mode"},
nil,
)
c.serviceCacheURIsCachedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_uri_cache_items_total"),
"Total number of URI information blocks added to the cache (since service startup)",
[]string{"mode"},
nil,
)
c.serviceCacheURIsFlushedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_uri_cache_items_flushed_total"),
"The number of URI information blocks that have been removed from the cache (since service startup)",
[]string{"mode"},
nil,
)
c.serviceCacheMetadataCached = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_metadata_cache_items"),
"Number of metadata information blocks currently present in cache",
nil,
nil,
)
c.serviceCacheMetadataCacheFlushes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_metadata_cache_flushes_total"),
"Total number of metadata cache flushes (since service startup)",
nil,
nil,
)
c.serviceCacheMetadataCacheQueriesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_metadata_cache_queries_total"),
"Total metadata cache queries (hits + misses)",
nil,
nil,
)
c.serviceCacheMetadataCacheHitsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_metadata_cache_hits_total"),
"Total number of successful lookups in the metadata cache (since service startup)",
nil,
nil,
)
c.serviceCacheMetadataCachedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_metadata_cache_items_cached_total"),
"Total number of metadata information blocks added to the cache (since service startup)",
nil,
nil,
)
c.serviceCacheMetadataFlushedTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_metadata_cache_items_flushed_total"),
"Total number of metadata information blocks removed from the cache (since service startup)",
nil,
nil,
)
c.serviceCacheOutputCacheActiveFlushedItems = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_output_cache_active_flushed_items"),
"",
nil,
nil,
)
c.serviceCacheOutputCacheItems = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_output_cache_items"),
"Number of items current present in output cache",
nil,
nil,
)
c.serviceCacheOutputCacheMemoryUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_output_cache_memory_bytes"),
"Current number of bytes used by output cache",
nil,
nil,
)
c.serviceCacheOutputCacheQueriesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_output_cache_queries_total"),
"Total output cache queries (hits + misses)",
nil,
nil,
)
c.serviceCacheOutputCacheHitsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_output_cache_hits_total"),
"Total number of successful lookups in output cache (since service startup)",
nil,
nil,
)
c.serviceCacheOutputCacheFlushedItemsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_output_cache_items_flushed_total"),
"Total number of items flushed from output cache (since service startup)",
nil,
nil,
)
c.serviceCacheOutputCacheFlushesTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "server_output_cache_flushes_total"),
"Total number of flushes of output cache (since service startup)",
nil,
nil,
)
return nil
}
func (c *Collector) collectWebServiceCache(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollectorWebService.Collect()
if err != nil {
return fmt.Errorf("failed to collect Web Service Cache metrics: %w", err)
}
deduplicateIISNames(perfData)
for name, app := range perfData {
if c.config.SiteExclude.MatchString(name) || !c.config.SiteInclude.MatchString(name) {
continue
}
ch <- prometheus.MustNewConstMetric(
c.serviceCacheActiveFlushedEntries,
prometheus.GaugeValue,
app[ServiceCacheActiveFlushedEntries].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheCurrentFileCacheMemoryUsage,
prometheus.GaugeValue,
app[ServiceCacheCurrentFileCacheMemoryUsage].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMaximumFileCacheMemoryUsage,
prometheus.CounterValue,
app[ServiceCacheMaximumFileCacheMemoryUsage].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFileCacheFlushesTotal,
prometheus.CounterValue,
app[ServiceCacheFileCacheFlushesTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFileCacheQueriesTotal,
prometheus.CounterValue,
app[ServiceCacheFileCacheHitsTotal].FirstValue+app[ServiceCacheFileCacheMissesTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFileCacheHitsTotal,
prometheus.CounterValue,
app[ServiceCacheFileCacheHitsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFilesCached,
prometheus.GaugeValue,
app[ServiceCacheFilesCached].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFilesCachedTotal,
prometheus.CounterValue,
app[ServiceCacheFilesCachedTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFilesFlushedTotal,
prometheus.CounterValue,
app[ServiceCacheFilesFlushedTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheFlushesTotal,
prometheus.CounterValue,
app[ServiceCacheURICacheFlushesTotal].FirstValue,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheFlushesTotal,
prometheus.CounterValue,
app[ServiceCacheURICacheFlushesTotalKernel].FirstValue,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheQueriesTotal,
prometheus.CounterValue,
app[ServiceCacheURICacheHitsTotal].FirstValue+app[ServiceCacheURICacheMissesTotal].FirstValue,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheQueriesTotal,
prometheus.CounterValue,
app[ServiceCacheURICacheHitsTotalKernel].FirstValue+app[ServiceCacheURICacheMissesTotalKernel].FirstValue,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheHitsTotal,
prometheus.CounterValue,
app[ServiceCacheURICacheHitsTotal].FirstValue,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheHitsTotal,
prometheus.CounterValue,
app[ServiceCacheURICacheHitsTotalKernel].FirstValue,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsCached,
prometheus.GaugeValue,
app[ServiceCacheURIsCached].FirstValue,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsCached,
prometheus.GaugeValue,
app[ServiceCacheURIsCachedKernel].FirstValue,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsCachedTotal,
prometheus.CounterValue,
app[ServiceCacheURIsCachedTotal].FirstValue,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsCachedTotal,
prometheus.CounterValue,
app[ServiceCacheURIsCachedTotalKernel].FirstValue,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsFlushedTotal,
prometheus.CounterValue,
app[ServiceCacheURIsFlushedTotal].FirstValue,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsFlushedTotal,
prometheus.CounterValue,
app[ServiceCacheURIsFlushedTotalKernel].FirstValue,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataCached,
prometheus.GaugeValue,
app[ServiceCacheMetadataCached].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataCacheFlushes,
prometheus.CounterValue,
app[ServiceCacheMetadataCacheFlushes].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataCacheQueriesTotal,
prometheus.CounterValue,
app[ServiceCacheMetaDataCacheHits].FirstValue+app[ServiceCacheMetaDataCacheMisses].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataCacheHitsTotal,
prometheus.CounterValue,
0, // app[ServiceCacheMetadataCacheHitsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataCachedTotal,
prometheus.CounterValue,
app[ServiceCacheMetadataCachedTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataFlushedTotal,
prometheus.CounterValue,
app[ServiceCacheMetadataFlushedTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheActiveFlushedItems,
prometheus.CounterValue,
app[ServiceCacheOutputCacheActiveFlushedItems].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheItems,
prometheus.CounterValue,
app[ServiceCacheOutputCacheItems].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheMemoryUsage,
prometheus.CounterValue,
app[ServiceCacheOutputCacheMemoryUsage].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheQueriesTotal,
prometheus.CounterValue,
app[ServiceCacheOutputCacheHitsTotal].FirstValue+app[ServiceCacheOutputCacheMissesTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheHitsTotal,
prometheus.CounterValue,
app[ServiceCacheOutputCacheHitsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheFlushedItemsTotal,
prometheus.CounterValue,
app[ServiceCacheOutputCacheFlushedItemsTotal].FirstValue,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheFlushesTotal,
prometheus.CounterValue,
app[ServiceCacheOutputCacheFlushesTotal].FirstValue,
)
}
return nil
}

View File

@ -53,11 +53,7 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
return nil
}
@ -74,20 +70,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
if err := c.collect(ch); err != nil {
logger.Error("failed collecting license metrics",
slog.Any("err", err),
)
return err
}
return nil
}
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
status, err := slc.SLIsWindowsGenuineLocal()
if err != nil {
return err

View File

@ -1,10 +1,12 @@
//go:build windows
package license_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/license"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -1,3 +1,5 @@
//go:build windows
package logical_disk
const (
@ -18,26 +20,3 @@ const (
percentIdleTime = "% Idle Time"
splitIOPerSec = "Split IO/Sec"
)
// Win32_PerfRawData_PerfDisk_LogicalDisk docs:
// - https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71) - Win32_PerfRawData_PerfDisk_LogicalDisk class
// - https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference.
type logicalDisk struct {
Name string
CurrentDiskQueueLength float64 `perflib:"Current Disk Queue Length"`
AvgDiskReadQueueLength float64 `perflib:"Avg. Disk Read Queue Length"`
AvgDiskWriteQueueLength float64 `perflib:"Avg. Disk Write Queue Length"`
DiskReadBytesPerSec float64 `perflib:"Disk Read Bytes/sec"`
DiskReadsPerSec float64 `perflib:"Disk Reads/sec"`
DiskWriteBytesPerSec float64 `perflib:"Disk Write Bytes/sec"`
DiskWritesPerSec float64 `perflib:"Disk Writes/sec"`
PercentDiskReadTime float64 `perflib:"% Disk Read Time"`
PercentDiskWriteTime float64 `perflib:"% Disk Write Time"`
PercentFreeSpace float64 `perflib:"% Free Space_Base"`
PercentFreeSpace_Base float64 `perflib:"Free Megabytes"`
PercentIdleTime float64 `perflib:"% Idle Time"`
SplitIOPerSec float64 `perflib:"Split IO/Sec"`
AvgDiskSecPerRead float64 `perflib:"Avg. Disk sec/Read"`
AvgDiskSecPerWrite float64 `perflib:"Avg. Disk sec/Write"`
AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"`
}

View File

@ -14,9 +14,6 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/perfdata/perftypes"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/toggle"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows"
@ -37,8 +34,9 @@ var ConfigDefaults = Config{
// A Collector is a Prometheus Collector for perflib logicalDisk metrics.
type Collector struct {
config Config
logger *slog.Logger
perfDataCollector perfdata.Collector
perfDataCollector *perfdata.Collector
avgReadQueue *prometheus.Desc
avgWriteQueue *prometheus.Desc
@ -128,45 +126,35 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if toggle.IsPDHEnabled() {
return []string{}, nil
}
return []string{"LogicalDisk"}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if toggle.IsPDHEnabled() {
counters := []string{
currentDiskQueueLength,
avgDiskReadQueueLength,
avgDiskWriteQueueLength,
diskReadBytesPerSec,
diskReadsPerSec,
diskWriteBytesPerSec,
diskWritesPerSec,
percentDiskReadTime,
percentDiskWriteTime,
percentFreeSpace,
freeSpace,
percentIdleTime,
splitIOPerSec,
avgDiskSecPerRead,
avgDiskSecPerWrite,
avgDiskSecPerTransfer,
}
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.logger = logger.With(slog.String("collector", Name))
var err error
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "LogicalDisk", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
}
c.perfDataCollector, err = perfdata.NewCollector("LogicalDisk", perfdata.InstanceAll, []string{
currentDiskQueueLength,
avgDiskReadQueueLength,
avgDiskWriteQueueLength,
diskReadBytesPerSec,
diskReadsPerSec,
diskWriteBytesPerSec,
diskWritesPerSec,
percentDiskReadTime,
percentDiskWriteTime,
percentFreeSpace,
freeSpace,
percentIdleTime,
splitIOPerSec,
avgDiskSecPerRead,
avgDiskSecPerWrite,
avgDiskSecPerTransfer,
})
if err != nil {
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
}
c.information = prometheus.NewDesc(
@ -298,25 +286,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
if toggle.IsPDHEnabled() {
return c.collectPDH(logger, ch)
}
if err := c.collect(ctx, logger, ch); err != nil {
logger.Error("failed collecting logical_disk metrics",
slog.Any("err", err),
)
return err
}
return nil
}
func (c *Collector) collectPDH(logger *slog.Logger, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
var (
err error
diskID string
@ -337,14 +307,14 @@ func (c *Collector) collectPDH(logger *slog.Logger, ch chan<- prometheus.Metric)
diskID, err = getDiskIDByVolume(name)
if err != nil {
logger.Warn("failed to get disk ID for "+name,
c.logger.Warn("failed to get disk ID for "+name,
slog.Any("err", err),
)
}
info, err = getVolumeInfo(name)
if err != nil {
logger.Warn("failed to get volume information for "+name,
c.logger.Warn("failed to get volume information for "+name,
slog.Any("err", err),
)
}
@ -371,14 +341,14 @@ func (c *Collector) collectPDH(logger *slog.Logger, ch chan<- prometheus.Metric)
ch <- prometheus.MustNewConstMetric(
c.avgReadQueue,
prometheus.GaugeValue,
volume[avgDiskReadQueueLength].FirstValue*perftypes.TicksToSecondScaleFactor,
volume[avgDiskReadQueueLength].FirstValue*perfdata.TicksToSecondScaleFactor,
name,
)
ch <- prometheus.MustNewConstMetric(
c.avgWriteQueue,
prometheus.GaugeValue,
volume[avgDiskWriteQueueLength].FirstValue*perftypes.TicksToSecondScaleFactor,
volume[avgDiskWriteQueueLength].FirstValue*perfdata.TicksToSecondScaleFactor,
name,
)
@ -455,21 +425,21 @@ func (c *Collector) collectPDH(logger *slog.Logger, ch chan<- prometheus.Metric)
ch <- prometheus.MustNewConstMetric(
c.readLatency,
prometheus.CounterValue,
volume[avgDiskSecPerRead].FirstValue*perftypes.TicksToSecondScaleFactor,
volume[avgDiskSecPerRead].FirstValue*perfdata.TicksToSecondScaleFactor,
name,
)
ch <- prometheus.MustNewConstMetric(
c.writeLatency,
prometheus.CounterValue,
volume[avgDiskSecPerWrite].FirstValue*perftypes.TicksToSecondScaleFactor,
volume[avgDiskSecPerWrite].FirstValue*perfdata.TicksToSecondScaleFactor,
name,
)
ch <- prometheus.MustNewConstMetric(
c.readWriteLatency,
prometheus.CounterValue,
volume[avgDiskSecPerTransfer].FirstValue*perftypes.TicksToSecondScaleFactor,
volume[avgDiskSecPerTransfer].FirstValue*perfdata.TicksToSecondScaleFactor,
name,
)
}
@ -477,167 +447,6 @@ func (c *Collector) collectPDH(logger *slog.Logger, ch chan<- prometheus.Metric)
return nil
}
func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var (
err error
diskID string
info volumeInfo
dst []logicalDisk
)
if err = v1.UnmarshalObject(ctx.PerfObjects["LogicalDisk"], &dst, logger); err != nil {
return err
}
for _, volume := range dst {
if volume.Name == "_Total" ||
c.config.VolumeExclude.MatchString(volume.Name) ||
!c.config.VolumeInclude.MatchString(volume.Name) {
continue
}
diskID, err = getDiskIDByVolume(volume.Name)
if err != nil {
logger.Warn("failed to get disk ID for "+volume.Name,
slog.Any("err", err),
)
}
info, err = getVolumeInfo(volume.Name)
if err != nil {
logger.Warn("failed to get volume information for %s"+volume.Name,
slog.Any("err", err),
)
}
ch <- prometheus.MustNewConstMetric(
c.information,
prometheus.GaugeValue,
1,
diskID,
info.volumeType,
volume.Name,
info.label,
info.filesystem,
info.serialNumber,
)
ch <- prometheus.MustNewConstMetric(
c.requestsQueued,
prometheus.GaugeValue,
volume.CurrentDiskQueueLength,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.avgReadQueue,
prometheus.GaugeValue,
volume.AvgDiskReadQueueLength*perftypes.TicksToSecondScaleFactor,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.avgWriteQueue,
prometheus.GaugeValue,
volume.AvgDiskWriteQueueLength*perftypes.TicksToSecondScaleFactor,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readBytesTotal,
prometheus.CounterValue,
volume.DiskReadBytesPerSec,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readsTotal,
prometheus.CounterValue,
volume.DiskReadsPerSec,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writeBytesTotal,
prometheus.CounterValue,
volume.DiskWriteBytesPerSec,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writesTotal,
prometheus.CounterValue,
volume.DiskWritesPerSec,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readTime,
prometheus.CounterValue,
volume.PercentDiskReadTime,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writeTime,
prometheus.CounterValue,
volume.PercentDiskWriteTime,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.freeSpace,
prometheus.GaugeValue,
volume.PercentFreeSpace_Base*1024*1024,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalSpace,
prometheus.GaugeValue,
volume.PercentFreeSpace*1024*1024,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.idleTime,
prometheus.CounterValue,
volume.PercentIdleTime,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.splitIOs,
prometheus.CounterValue,
volume.SplitIOPerSec,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readLatency,
prometheus.CounterValue,
volume.AvgDiskSecPerRead*perftypes.TicksToSecondScaleFactor,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writeLatency,
prometheus.CounterValue,
volume.AvgDiskSecPerWrite*perftypes.TicksToSecondScaleFactor,
volume.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readWriteLatency,
prometheus.CounterValue,
volume.AvgDiskSecPerTransfer*perftypes.TicksToSecondScaleFactor,
volume.Name,
)
}
return nil
}
func getDriveType(driveType uint32) string {
switch driveType {
case windows.DRIVE_UNKNOWN:

View File

@ -1,3 +1,5 @@
//go:build windows
package logical_disk_test
import (
@ -5,8 +7,8 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/collector/logical_disk"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -46,11 +46,7 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
return nil
}
@ -67,15 +63,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, _ *slog.Logger, ch chan<- prometheus.Metric) error {
if err := c.collect(ch); err != nil {
return err
}
return nil
}
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
logonSessions, err := secur32.GetLogonSessions()
if err != nil {
return fmt.Errorf("failed to get logon sessions: %w", err)

View File

@ -1,10 +1,12 @@
//go:build windows
package logon_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/logon"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -1,3 +1,5 @@
//go:build windows
package memory
const (
@ -36,40 +38,3 @@ const (
transitionPagesRePurposedPerSec = "Transition Pages RePurposed/sec"
writeCopiesPerSec = "Write Copies/sec"
)
type memory struct {
AvailableBytes float64 `perflib:"Available Bytes"`
AvailableKBytes float64 `perflib:"Available KBytes"`
AvailableMBytes float64 `perflib:"Available MBytes"`
CacheBytes float64 `perflib:"Cache Bytes"`
CacheBytesPeak float64 `perflib:"Cache Bytes Peak"`
CacheFaultsPerSec float64 `perflib:"Cache Faults/sec"`
CommitLimit float64 `perflib:"Commit Limit"`
CommittedBytes float64 `perflib:"Committed Bytes"`
DemandZeroFaultsPerSec float64 `perflib:"Demand Zero Faults/sec"`
FreeAndZeroPageListBytes float64 `perflib:"Free & Zero Page List Bytes"`
FreeSystemPageTableEntries float64 `perflib:"Free System Page Table Entries"`
ModifiedPageListBytes float64 `perflib:"Modified Page List Bytes"`
PageFaultsPerSec float64 `perflib:"Page Faults/sec"`
PageReadsPerSec float64 `perflib:"Page Reads/sec"`
PagesInputPerSec float64 `perflib:"Pages Input/sec"`
PagesOutputPerSec float64 `perflib:"Pages Output/sec"`
PagesPerSec float64 `perflib:"Pages/sec"`
PageWritesPerSec float64 `perflib:"Page Writes/sec"`
PoolNonpagedAllocs float64 `perflib:"Pool Nonpaged Allocs"`
PoolNonpagedBytes float64 `perflib:"Pool Nonpaged Bytes"`
PoolPagedAllocs float64 `perflib:"Pool Paged Allocs"`
PoolPagedBytes float64 `perflib:"Pool Paged Bytes"`
PoolPagedResidentBytes float64 `perflib:"Pool Paged Resident Bytes"`
StandbyCacheCoreBytes float64 `perflib:"Standby Cache Core Bytes"`
StandbyCacheNormalPriorityBytes float64 `perflib:"Standby Cache Normal Priority Bytes"`
StandbyCacheReserveBytes float64 `perflib:"Standby Cache Reserve Bytes"`
SystemCacheResidentBytes float64 `perflib:"System Cache Resident Bytes"`
SystemCodeResidentBytes float64 `perflib:"System Code Resident Bytes"`
SystemCodeTotalBytes float64 `perflib:"System Code Total Bytes"`
SystemDriverResidentBytes float64 `perflib:"System Driver Resident Bytes"`
SystemDriverTotalBytes float64 `perflib:"System Driver Total Bytes"`
TransitionFaultsPerSec float64 `perflib:"Transition Faults/sec"`
TransitionPagesRePurposedPerSec float64 `perflib:"Transition Pages RePurposed/sec"`
WriteCopiesPerSec float64 `perflib:"Write Copies/sec"`
}

View File

@ -14,9 +14,6 @@ import (
"github.com/prometheus-community/windows_exporter/internal/headers/sysinfoapi"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/perfdata/perftypes"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/toggle"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -31,7 +28,7 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
perfDataCollector perfdata.Collector
perfDataCollector *perfdata.Collector
// Performance metrics
availableBytes *prometheus.Desc
@ -94,62 +91,56 @@ func (c *Collector) GetName() string {
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if toggle.IsPDHEnabled() {
return []string{}, nil
}
return []string{"Memory"}, nil
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
if toggle.IsPDHEnabled() {
counters := []string{
availableBytes,
availableKBytes,
availableMBytes,
cacheBytes,
cacheBytesPeak,
cacheFaultsPerSec,
commitLimit,
committedBytes,
demandZeroFaultsPerSec,
freeAndZeroPageListBytes,
freeSystemPageTableEntries,
modifiedPageListBytes,
pageFaultsPerSec,
pageReadsPerSec,
pagesInputPerSec,
pagesOutputPerSec,
pagesPerSec,
pageWritesPerSec,
poolNonpagedAllocs,
poolNonpagedBytes,
poolPagedAllocs,
poolPagedBytes,
poolPagedResidentBytes,
standbyCacheCoreBytes,
standbyCacheNormalPriorityBytes,
standbyCacheReserveBytes,
systemCacheResidentBytes,
systemCodeResidentBytes,
systemCodeTotalBytes,
systemDriverResidentBytes,
systemDriverTotalBytes,
transitionFaultsPerSec,
transitionPagesRePurposedPerSec,
writeCopiesPerSec,
}
counters := []string{
availableBytes,
availableKBytes,
availableMBytes,
cacheBytes,
cacheBytesPeak,
cacheFaultsPerSec,
commitLimit,
committedBytes,
demandZeroFaultsPerSec,
freeAndZeroPageListBytes,
freeSystemPageTableEntries,
modifiedPageListBytes,
pageFaultsPerSec,
pageReadsPerSec,
pagesInputPerSec,
pagesOutputPerSec,
pagesPerSec,
pageWritesPerSec,
poolNonpagedAllocs,
poolNonpagedBytes,
poolPagedAllocs,
poolPagedBytes,
poolPagedResidentBytes,
standbyCacheCoreBytes,
standbyCacheNormalPriorityBytes,
standbyCacheReserveBytes,
systemCacheResidentBytes,
systemCodeResidentBytes,
systemCodeTotalBytes,
systemDriverResidentBytes,
systemDriverTotalBytes,
transitionFaultsPerSec,
transitionPagesRePurposedPerSec,
writeCopiesPerSec,
}
var err error
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Memory", perfdata.AllInstances, counters)
if err != nil {
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
}
c.perfDataCollector, err = perfdata.NewCollector("Memory", perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create Memory collector: %w", err)
}
c.availableBytes = prometheus.NewDesc(
@ -380,32 +371,15 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs := make([]error, 0, 2)
var err error
if toggle.IsPDHEnabled() {
err = c.collectPDH(ch)
} else {
err = c.collectPerformanceData(ctx, logger, ch)
}
if err != nil {
logger.Error("failed collecting memory metrics",
slog.Any("err", err),
)
errs = append(errs, err)
if err := c.collectPDH(ch); err != nil {
errs = append(errs, fmt.Errorf("failed collecting memory metrics: %w", err))
}
if err := c.collectGlobalMemoryStatus(ch); err != nil {
logger.Error("failed collecting memory metrics",
slog.Any("err", err),
)
errs = append(errs, err)
errs = append(errs, fmt.Errorf("failed collecting global memory metrics: %w", err))
}
return errors.Join(errs...)
@ -438,217 +412,13 @@ func (c *Collector) collectGlobalMemoryStatus(ch chan<- prometheus.Metric) error
return nil
}
func (c *Collector) collectPerformanceData(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
var dst []memory
if err := v1.UnmarshalObject(ctx.PerfObjects["Memory"], &dst, logger); err != nil {
return err
}
ch <- prometheus.MustNewConstMetric(
c.availableBytes,
prometheus.GaugeValue,
dst[0].AvailableBytes,
)
ch <- prometheus.MustNewConstMetric(
c.cacheBytes,
prometheus.GaugeValue,
dst[0].CacheBytes,
)
ch <- prometheus.MustNewConstMetric(
c.cacheBytesPeak,
prometheus.GaugeValue,
dst[0].CacheBytesPeak,
)
ch <- prometheus.MustNewConstMetric(
c.cacheFaultsTotal,
prometheus.CounterValue,
dst[0].CacheFaultsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.commitLimit,
prometheus.GaugeValue,
dst[0].CommitLimit,
)
ch <- prometheus.MustNewConstMetric(
c.committedBytes,
prometheus.GaugeValue,
dst[0].CommittedBytes,
)
ch <- prometheus.MustNewConstMetric(
c.demandZeroFaultsTotal,
prometheus.CounterValue,
dst[0].DemandZeroFaultsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.freeAndZeroPageListBytes,
prometheus.GaugeValue,
dst[0].FreeAndZeroPageListBytes,
)
ch <- prometheus.MustNewConstMetric(
c.freeSystemPageTableEntries,
prometheus.GaugeValue,
dst[0].FreeSystemPageTableEntries,
)
ch <- prometheus.MustNewConstMetric(
c.modifiedPageListBytes,
prometheus.GaugeValue,
dst[0].ModifiedPageListBytes,
)
ch <- prometheus.MustNewConstMetric(
c.pageFaultsTotal,
prometheus.CounterValue,
dst[0].PageFaultsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.swapPageReadsTotal,
prometheus.CounterValue,
dst[0].PageReadsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.swapPagesReadTotal,
prometheus.CounterValue,
dst[0].PagesInputPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.swapPagesWrittenTotal,
prometheus.CounterValue,
dst[0].PagesOutputPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.swapPageOperationsTotal,
prometheus.CounterValue,
dst[0].PagesPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.swapPageWritesTotal,
prometheus.CounterValue,
dst[0].PageWritesPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.poolNonPagedAllocationsTotal,
prometheus.GaugeValue,
dst[0].PoolNonpagedAllocs,
)
ch <- prometheus.MustNewConstMetric(
c.poolNonPagedBytes,
prometheus.GaugeValue,
dst[0].PoolNonpagedBytes,
)
ch <- prometheus.MustNewConstMetric(
c.poolPagedAllocationsTotal,
prometheus.CounterValue,
dst[0].PoolPagedAllocs,
)
ch <- prometheus.MustNewConstMetric(
c.poolPagedBytes,
prometheus.GaugeValue,
dst[0].PoolPagedBytes,
)
ch <- prometheus.MustNewConstMetric(
c.poolPagedResidentBytes,
prometheus.GaugeValue,
dst[0].PoolPagedResidentBytes,
)
ch <- prometheus.MustNewConstMetric(
c.standbyCacheCoreBytes,
prometheus.GaugeValue,
dst[0].StandbyCacheCoreBytes,
)
ch <- prometheus.MustNewConstMetric(
c.standbyCacheNormalPriorityBytes,
prometheus.GaugeValue,
dst[0].StandbyCacheNormalPriorityBytes,
)
ch <- prometheus.MustNewConstMetric(
c.standbyCacheReserveBytes,
prometheus.GaugeValue,
dst[0].StandbyCacheReserveBytes,
)
ch <- prometheus.MustNewConstMetric(
c.systemCacheResidentBytes,
prometheus.GaugeValue,
dst[0].SystemCacheResidentBytes,
)
ch <- prometheus.MustNewConstMetric(
c.systemCodeResidentBytes,
prometheus.GaugeValue,
dst[0].SystemCodeResidentBytes,
)
ch <- prometheus.MustNewConstMetric(
c.systemCodeTotalBytes,
prometheus.GaugeValue,
dst[0].SystemCodeTotalBytes,
)
ch <- prometheus.MustNewConstMetric(
c.systemDriverResidentBytes,
prometheus.GaugeValue,
dst[0].SystemDriverResidentBytes,
)
ch <- prometheus.MustNewConstMetric(
c.systemDriverTotalBytes,
prometheus.GaugeValue,
dst[0].SystemDriverTotalBytes,
)
ch <- prometheus.MustNewConstMetric(
c.transitionFaultsTotal,
prometheus.CounterValue,
dst[0].TransitionFaultsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.transitionPagesRepurposedTotal,
prometheus.CounterValue,
dst[0].TransitionPagesRePurposedPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.writeCopiesTotal,
prometheus.CounterValue,
dst[0].WriteCopiesPerSec,
)
return nil
}
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
perfData, err := c.perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect Memory metrics: %w", err)
}
data, ok := perfData[perftypes.EmptyInstance]
data, ok := perfData[perfdata.EmptyInstance]
if !ok {
return errors.New("perflib query for Memory returned empty result set")

View File

@ -1,10 +1,12 @@
//go:build windows
package memory_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/memory"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -1,3 +1,5 @@
//go:build windows
package mscluster
import (
@ -9,7 +11,6 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -217,7 +218,7 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{"Memory"}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
return nil
}
@ -257,7 +258,7 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, _ *slog.Logger, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
if len(c.config.CollectorsEnabled) == 0 {
return nil
}

View File

@ -1,3 +1,5 @@
//go:build windows
package mscluster
import (

View File

@ -1,3 +1,5 @@
//go:build windows
package mscluster
import (

View File

@ -1,3 +1,5 @@
//go:build windows
package mscluster
import (

View File

@ -1,3 +1,5 @@
//go:build windows
package mscluster
import (

View File

@ -1,3 +1,5 @@
//go:build windows
package mscluster
import (

View File

@ -68,11 +68,7 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
return nil
}
@ -117,21 +113,6 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
if err := c.collect(ch); err != nil {
logger.Error("failed collecting msmq metrics",
slog.Any("err", err),
)
return err
}
return nil
}
type msmqQueue struct {
Name string `mi:"Name"`
@ -141,7 +122,9 @@ type msmqQueue struct {
MessagesInQueue uint64 `mi:"MessagesInQueue"`
}
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
var dst []msmqQueue
query := "SELECT * FROM Win32_PerfRawData_MSMQ_MSMQQueue"

View File

@ -1,10 +1,12 @@
//go:build windows
package msmq_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/msmq"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,766 @@
//go:build windows
package mssql
import (
"fmt"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorAccessMethods struct {
accessMethodsPerfDataCollectors map[string]*perfdata.Collector
accessMethodsAUcleanupbatches *prometheus.Desc
accessMethodsAUcleanups *prometheus.Desc
accessMethodsByReferenceLobCreateCount *prometheus.Desc
accessMethodsByReferenceLobUseCount *prometheus.Desc
accessMethodsCountLobReadahead *prometheus.Desc
accessMethodsCountPullInRow *prometheus.Desc
accessMethodsCountPushOffRow *prometheus.Desc
accessMethodsDeferreddroppedAUs *prometheus.Desc
accessMethodsDeferredDroppedrowsets *prometheus.Desc
accessMethodsDroppedrowsetcleanups *prometheus.Desc
accessMethodsDroppedrowsetsskipped *prometheus.Desc
accessMethodsExtentDeallocations *prometheus.Desc
accessMethodsExtentsAllocated *prometheus.Desc
accessMethodsFailedAUcleanupbatches *prometheus.Desc
accessMethodsFailedleafpagecookie *prometheus.Desc
accessMethodsFailedtreepagecookie *prometheus.Desc
accessMethodsForwardedRecords *prometheus.Desc
accessMethodsFreeSpacePageFetches *prometheus.Desc
accessMethodsFreeSpaceScans *prometheus.Desc
accessMethodsFullScans *prometheus.Desc
accessMethodsIndexSearches *prometheus.Desc
accessMethodsInSysXactwaits *prometheus.Desc
accessMethodsLobHandleCreateCount *prometheus.Desc
accessMethodsLobHandleDestroyCount *prometheus.Desc
accessMethodsLobSSProviderCreateCount *prometheus.Desc
accessMethodsLobSSProviderDestroyCount *prometheus.Desc
accessMethodsLobSSProviderTruncationCount *prometheus.Desc
accessMethodsMixedPageAllocations *prometheus.Desc
accessMethodsPageCompressionAttempts *prometheus.Desc
accessMethodsPageDeallocations *prometheus.Desc
accessMethodsPagesAllocated *prometheus.Desc
accessMethodsPagesCompressed *prometheus.Desc
accessMethodsPageSplits *prometheus.Desc
accessMethodsProbeScans *prometheus.Desc
accessMethodsRangeScans *prometheus.Desc
accessMethodsScanPointRevalidations *prometheus.Desc
accessMethodsSkippedGhostedRecords *prometheus.Desc
accessMethodsTableLockEscalations *prometheus.Desc
accessMethodsUsedleafpagecookie *prometheus.Desc
accessMethodsUsedtreepagecookie *prometheus.Desc
accessMethodsWorkfilesCreated *prometheus.Desc
accessMethodsWorktablesCreated *prometheus.Desc
accessMethodsWorktablesFromCacheHits *prometheus.Desc
accessMethodsWorktablesFromCacheLookups *prometheus.Desc
}
const (
accessMethodsAUCleanupbatchesPerSec = "AU cleanup batches/sec"
accessMethodsAUCleanupsPerSec = "AU cleanups/sec"
accessMethodsByReferenceLobCreateCount = "By-reference Lob Create Count"
accessMethodsByReferenceLobUseCount = "By-reference Lob Use Count"
accessMethodsCountLobReadahead = "Count Lob Readahead"
accessMethodsCountPullInRow = "Count Pull In Row"
accessMethodsCountPushOffRow = "Count Push Off Row"
accessMethodsDeferredDroppedAUs = "Deferred dropped AUs"
accessMethodsDeferredDroppedRowsets = "Deferred Dropped rowsets"
accessMethodsDroppedRowsetCleanupsPerSec = "Dropped rowset cleanups/sec"
accessMethodsDroppedRowsetsSkippedPerSec = "Dropped rowsets skipped/sec"
accessMethodsExtentDeallocationsPerSec = "Extent Deallocations/sec"
accessMethodsExtentsAllocatedPerSec = "Extents Allocated/sec"
accessMethodsFailedAUCleanupBatchesPerSec = "Failed AU cleanup batches/sec"
accessMethodsFailedLeafPageCookie = "Failed leaf page cookie"
accessMethodsFailedTreePageCookie = "Failed tree page cookie"
accessMethodsForwardedRecordsPerSec = "Forwarded Records/sec"
accessMethodsFreeSpacePageFetchesPerSec = "FreeSpace Page Fetches/sec"
accessMethodsFreeSpaceScansPerSec = "FreeSpace Scans/sec"
accessMethodsFullScansPerSec = "Full Scans/sec"
accessMethodsIndexSearchesPerSec = "Index Searches/sec"
accessMethodsInSysXactWaitsPerSec = "InSysXact waits/sec"
accessMethodsLobHandleCreateCount = "LobHandle Create Count"
accessMethodsLobHandleDestroyCount = "LobHandle Destroy Count"
accessMethodsLobSSProviderCreateCount = "LobSS Provider Create Count"
accessMethodsLobSSProviderDestroyCount = "LobSS Provider Destroy Count"
accessMethodsLobSSProviderTruncationCount = "LobSS Provider Truncation Count"
accessMethodsMixedPageAllocationsPerSec = "Mixed page allocations/sec"
accessMethodsPageCompressionAttemptsPerSec = "Page compression attempts/sec"
accessMethodsPageDeallocationsPerSec = "Page Deallocations/sec"
accessMethodsPagesAllocatedPerSec = "Pages Allocated/sec"
accessMethodsPagesCompressedPerSec = "Pages compressed/sec"
accessMethodsPageSplitsPerSec = "Page Splits/sec"
accessMethodsProbeScansPerSec = "Probe Scans/sec"
accessMethodsRangeScansPerSec = "Range Scans/sec"
accessMethodsScanPointRevalidationsPerSec = "Scan Point Revalidations/sec"
accessMethodsSkippedGhostedRecordsPerSec = "Skipped Ghosted Records/sec"
accessMethodsTableLockEscalationsPerSec = "Table Lock Escalations/sec"
accessMethodsUsedLeafPageCookie = "Used leaf page cookie"
accessMethodsUsedTreePageCookie = "Used tree page cookie"
accessMethodsWorkfilesCreatedPerSec = "Workfiles Created/sec"
accessMethodsWorktablesCreatedPerSec = "Worktables Created/sec"
accessMethodsWorktablesFromCacheRatio = "Worktables From Cache Ratio"
accessMethodsWorktablesFromCacheRatioBase = "Worktables From Cache Base"
)
func (c *Collector) buildAccessMethods() error {
var err error
c.accessMethodsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
counters := []string{
accessMethodsAUCleanupbatchesPerSec,
accessMethodsAUCleanupsPerSec,
accessMethodsByReferenceLobCreateCount,
accessMethodsByReferenceLobUseCount,
accessMethodsCountLobReadahead,
accessMethodsCountPullInRow,
accessMethodsCountPushOffRow,
accessMethodsDeferredDroppedAUs,
accessMethodsDeferredDroppedRowsets,
accessMethodsDroppedRowsetCleanupsPerSec,
accessMethodsDroppedRowsetsSkippedPerSec,
accessMethodsExtentDeallocationsPerSec,
accessMethodsExtentsAllocatedPerSec,
accessMethodsFailedAUCleanupBatchesPerSec,
accessMethodsFailedLeafPageCookie,
accessMethodsFailedTreePageCookie,
accessMethodsForwardedRecordsPerSec,
accessMethodsFreeSpacePageFetchesPerSec,
accessMethodsFreeSpaceScansPerSec,
accessMethodsFullScansPerSec,
accessMethodsIndexSearchesPerSec,
accessMethodsInSysXactWaitsPerSec,
accessMethodsLobHandleCreateCount,
accessMethodsLobHandleDestroyCount,
accessMethodsLobSSProviderCreateCount,
accessMethodsLobSSProviderDestroyCount,
accessMethodsLobSSProviderTruncationCount,
accessMethodsMixedPageAllocationsPerSec,
accessMethodsPageCompressionAttemptsPerSec,
accessMethodsPageDeallocationsPerSec,
accessMethodsPagesAllocatedPerSec,
accessMethodsPagesCompressedPerSec,
accessMethodsPageSplitsPerSec,
accessMethodsProbeScansPerSec,
accessMethodsRangeScansPerSec,
accessMethodsScanPointRevalidationsPerSec,
accessMethodsSkippedGhostedRecordsPerSec,
accessMethodsTableLockEscalationsPerSec,
accessMethodsUsedLeafPageCookie,
accessMethodsUsedTreePageCookie,
accessMethodsWorkfilesCreatedPerSec,
accessMethodsWorktablesCreatedPerSec,
accessMethodsWorktablesFromCacheRatio,
accessMethodsWorktablesFromCacheRatioBase,
}
for sqlInstance := range c.mssqlInstances {
c.accessMethodsPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Access Methods"), nil, counters)
if err != nil {
return fmt.Errorf("failed to create AccessMethods collector for instance %s: %w", sqlInstance, err)
}
}
// Win32_PerfRawData_{instance}_SQLServerAccessMethods
c.accessMethodsAUcleanupbatches = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_au_batch_cleanups"),
"(AccessMethods.AUcleanupbatches)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsAUcleanups = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_au_cleanups"),
"(AccessMethods.AUcleanups)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsByReferenceLobCreateCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_by_reference_lob_creates"),
"(AccessMethods.ByreferenceLobCreateCount)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsByReferenceLobUseCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_by_reference_lob_uses"),
"(AccessMethods.ByreferenceLobUseCount)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsCountLobReadahead = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_lob_read_aheads"),
"(AccessMethods.CountLobReadahead)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsCountPullInRow = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_column_value_pulls"),
"(AccessMethods.CountPullInRow)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsCountPushOffRow = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_column_value_pushes"),
"(AccessMethods.CountPushOffRow)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsDeferreddroppedAUs = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_deferred_dropped_aus"),
"(AccessMethods.DeferreddroppedAUs)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsDeferredDroppedrowsets = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_deferred_dropped_rowsets"),
"(AccessMethods.DeferredDroppedrowsets)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsDroppedrowsetcleanups = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_dropped_rowset_cleanups"),
"(AccessMethods.Droppedrowsetcleanups)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsDroppedrowsetsskipped = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_dropped_rowset_skips"),
"(AccessMethods.Droppedrowsetsskipped)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsExtentDeallocations = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_extent_deallocations"),
"(AccessMethods.ExtentDeallocations)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsExtentsAllocated = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_extent_allocations"),
"(AccessMethods.ExtentsAllocated)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsFailedAUcleanupbatches = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_au_batch_cleanup_failures"),
"(AccessMethods.FailedAUcleanupbatches)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsFailedleafpagecookie = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_leaf_page_cookie_failures"),
"(AccessMethods.Failedleafpagecookie)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsFailedtreepagecookie = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_tree_page_cookie_failures"),
"(AccessMethods.Failedtreepagecookie)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsForwardedRecords = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_forwarded_records"),
"(AccessMethods.ForwardedRecords)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsFreeSpacePageFetches = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_free_space_page_fetches"),
"(AccessMethods.FreeSpacePageFetches)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsFreeSpaceScans = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_free_space_scans"),
"(AccessMethods.FreeSpaceScans)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsFullScans = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_full_scans"),
"(AccessMethods.FullScans)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsIndexSearches = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_index_searches"),
"(AccessMethods.IndexSearches)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsInSysXactwaits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_insysxact_waits"),
"(AccessMethods.InSysXactwaits)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsLobHandleCreateCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_lob_handle_creates"),
"(AccessMethods.LobHandleCreateCount)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsLobHandleDestroyCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_lob_handle_destroys"),
"(AccessMethods.LobHandleDestroyCount)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsLobSSProviderCreateCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_lob_ss_provider_creates"),
"(AccessMethods.LobSSProviderCreateCount)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsLobSSProviderDestroyCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_lob_ss_provider_destroys"),
"(AccessMethods.LobSSProviderDestroyCount)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsLobSSProviderTruncationCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_lob_ss_provider_truncations"),
"(AccessMethods.LobSSProviderTruncationCount)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsMixedPageAllocations = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_mixed_page_allocations"),
"(AccessMethods.MixedpageallocationsPersec)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsPageCompressionAttempts = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_page_compression_attempts"),
"(AccessMethods.PagecompressionattemptsPersec)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsPageDeallocations = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_page_deallocations"),
"(AccessMethods.PageDeallocationsPersec)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsPagesAllocated = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_page_allocations"),
"(AccessMethods.PagesAllocatedPersec)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsPagesCompressed = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_page_compressions"),
"(AccessMethods.PagescompressedPersec)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsPageSplits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_page_splits"),
"(AccessMethods.PageSplitsPersec)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsProbeScans = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_probe_scans"),
"(AccessMethods.ProbeScansPersec)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsRangeScans = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_range_scans"),
"(AccessMethods.RangeScansPersec)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsScanPointRevalidations = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_scan_point_revalidations"),
"(AccessMethods.ScanPointRevalidationsPersec)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsSkippedGhostedRecords = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_ghost_record_skips"),
"(AccessMethods.SkippedGhostedRecordsPersec)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsTableLockEscalations = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_table_lock_escalations"),
"(AccessMethods.TableLockEscalationsPersec)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsUsedleafpagecookie = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_leaf_page_cookie_uses"),
"(AccessMethods.Usedleafpagecookie)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsUsedtreepagecookie = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_tree_page_cookie_uses"),
"(AccessMethods.Usedtreepagecookie)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsWorkfilesCreated = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_workfile_creates"),
"(AccessMethods.WorkfilesCreatedPersec)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsWorktablesCreated = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_worktables_creates"),
"(AccessMethods.WorktablesCreatedPersec)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsWorktablesFromCacheHits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_worktables_from_cache_hits"),
"(AccessMethods.WorktablesFromCacheRatio)",
[]string{"mssql_instance"},
nil,
)
c.accessMethodsWorktablesFromCacheLookups = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "accessmethods_worktables_from_cache_lookups"),
"(AccessMethods.WorktablesFromCacheRatio_Base)",
[]string{"mssql_instance"},
nil,
)
return nil
}
func (c *Collector) collectAccessMethods(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorAccessMethods, c.accessMethodsPerfDataCollectors, c.collectAccessMethodsInstance)
}
func (c *Collector) collectAccessMethodsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
perfData, err := perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "AccessMethods"), err)
}
data, ok := perfData[perfdata.EmptyInstance]
if !ok {
return fmt.Errorf("perflib query for %s returned empty result set", c.mssqlGetPerfObjectName(sqlInstance, "AccessMethods"))
}
ch <- prometheus.MustNewConstMetric(
c.accessMethodsAUcleanupbatches,
prometheus.CounterValue,
data[accessMethodsAUCleanupbatchesPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsAUcleanups,
prometheus.CounterValue,
data[accessMethodsAUCleanupsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsByReferenceLobCreateCount,
prometheus.CounterValue,
data[accessMethodsByReferenceLobCreateCount].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsByReferenceLobUseCount,
prometheus.CounterValue,
data[accessMethodsByReferenceLobUseCount].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsCountLobReadahead,
prometheus.CounterValue,
data[accessMethodsCountLobReadahead].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsCountPullInRow,
prometheus.CounterValue,
data[accessMethodsCountPullInRow].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsCountPushOffRow,
prometheus.CounterValue,
data[accessMethodsCountPushOffRow].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsDeferreddroppedAUs,
prometheus.GaugeValue,
data[accessMethodsDeferredDroppedAUs].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsDeferredDroppedrowsets,
prometheus.GaugeValue,
data[accessMethodsDeferredDroppedRowsets].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsDroppedrowsetcleanups,
prometheus.CounterValue,
data[accessMethodsDroppedRowsetCleanupsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsDroppedrowsetsskipped,
prometheus.CounterValue,
data[accessMethodsDroppedRowsetsSkippedPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsExtentDeallocations,
prometheus.CounterValue,
data[accessMethodsExtentDeallocationsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsExtentsAllocated,
prometheus.CounterValue,
data[accessMethodsExtentsAllocatedPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFailedAUcleanupbatches,
prometheus.CounterValue,
data[accessMethodsFailedAUCleanupBatchesPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFailedleafpagecookie,
prometheus.CounterValue,
data[accessMethodsFailedLeafPageCookie].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFailedtreepagecookie,
prometheus.CounterValue,
data[accessMethodsFailedTreePageCookie].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsForwardedRecords,
prometheus.CounterValue,
data[accessMethodsForwardedRecordsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFreeSpacePageFetches,
prometheus.CounterValue,
data[accessMethodsFreeSpacePageFetchesPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFreeSpaceScans,
prometheus.CounterValue,
data[accessMethodsFreeSpaceScansPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsFullScans,
prometheus.CounterValue,
data[accessMethodsFullScansPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsIndexSearches,
prometheus.CounterValue,
data[accessMethodsIndexSearchesPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsInSysXactwaits,
prometheus.CounterValue,
data[accessMethodsInSysXactWaitsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsLobHandleCreateCount,
prometheus.CounterValue,
data[accessMethodsLobHandleCreateCount].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsLobHandleDestroyCount,
prometheus.CounterValue,
data[accessMethodsLobHandleDestroyCount].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsLobSSProviderCreateCount,
prometheus.CounterValue,
data[accessMethodsLobSSProviderCreateCount].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsLobSSProviderDestroyCount,
prometheus.CounterValue,
data[accessMethodsLobSSProviderDestroyCount].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsLobSSProviderTruncationCount,
prometheus.CounterValue,
data[accessMethodsLobSSProviderTruncationCount].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsMixedPageAllocations,
prometheus.CounterValue,
data[accessMethodsMixedPageAllocationsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsPageCompressionAttempts,
prometheus.CounterValue,
data[accessMethodsPageCompressionAttemptsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsPageDeallocations,
prometheus.CounterValue,
data[accessMethodsPageDeallocationsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsPagesAllocated,
prometheus.CounterValue,
data[accessMethodsPagesAllocatedPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsPagesCompressed,
prometheus.CounterValue,
data[accessMethodsPagesCompressedPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsPageSplits,
prometheus.CounterValue,
data[accessMethodsPageSplitsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsProbeScans,
prometheus.CounterValue,
data[accessMethodsProbeScansPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsRangeScans,
prometheus.CounterValue,
data[accessMethodsRangeScansPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsScanPointRevalidations,
prometheus.CounterValue,
data[accessMethodsScanPointRevalidationsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsSkippedGhostedRecords,
prometheus.CounterValue,
data[accessMethodsSkippedGhostedRecordsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsTableLockEscalations,
prometheus.CounterValue,
data[accessMethodsTableLockEscalationsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsUsedleafpagecookie,
prometheus.CounterValue,
data[accessMethodsUsedLeafPageCookie].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsUsedtreepagecookie,
prometheus.CounterValue,
data[accessMethodsUsedTreePageCookie].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsWorkfilesCreated,
prometheus.CounterValue,
data[accessMethodsWorkfilesCreatedPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsWorktablesCreated,
prometheus.CounterValue,
data[accessMethodsWorktablesCreatedPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsWorktablesFromCacheHits,
prometheus.CounterValue,
data[accessMethodsWorktablesFromCacheRatio].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.accessMethodsWorktablesFromCacheLookups,
prometheus.CounterValue,
data[accessMethodsWorktablesFromCacheRatioBase].FirstValue,
sqlInstance,
)
return nil
}
func (c *Collector) closeAccessMethods() {
for _, perfDataCollector := range c.accessMethodsPerfDataCollectors {
perfDataCollector.Close()
}
}

View File

@ -0,0 +1,204 @@
//go:build windows
package mssql
import (
"fmt"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus-community/windows_exporter/internal/utils"
"github.com/prometheus/client_golang/prometheus"
)
type collectorAvailabilityReplica struct {
availabilityReplicaPerfDataCollectors map[string]*perfdata.Collector
availReplicaBytesReceivedFromReplica *prometheus.Desc
availReplicaBytesSentToReplica *prometheus.Desc
availReplicaBytesSentToTransport *prometheus.Desc
availReplicaFlowControl *prometheus.Desc
availReplicaFlowControlTimeMS *prometheus.Desc
availReplicaReceivesFromReplica *prometheus.Desc
availReplicaResentMessages *prometheus.Desc
availReplicaSendsToReplica *prometheus.Desc
availReplicaSendsToTransport *prometheus.Desc
}
const (
availReplicaBytesReceivedFromReplicaPerSec = "Bytes Received from Replica/sec"
availReplicaBytesSentToReplicaPerSec = "Bytes Sent to Replica/sec"
availReplicaBytesSentToTransportPerSec = "Bytes Sent to Transport/sec"
availReplicaFlowControlPerSec = "Flow Control/sec"
availReplicaFlowControlTimeMSPerSec = "Flow Control Time (ms/sec)"
availReplicaReceivesFromReplicaPerSec = "Receives from Replica/sec"
availReplicaResentMessagesPerSec = "Resent Messages/sec"
availReplicaSendsToReplicaPerSec = "Sends to Replica/sec"
availReplicaSendsToTransportPerSec = "Sends to Transport/sec"
)
func (c *Collector) buildAvailabilityReplica() error {
var err error
c.availabilityReplicaPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
counters := []string{
availReplicaBytesReceivedFromReplicaPerSec,
availReplicaBytesSentToReplicaPerSec,
availReplicaBytesSentToTransportPerSec,
availReplicaFlowControlPerSec,
availReplicaFlowControlTimeMSPerSec,
availReplicaReceivesFromReplicaPerSec,
availReplicaResentMessagesPerSec,
availReplicaSendsToReplicaPerSec,
availReplicaSendsToTransportPerSec,
}
for sqlInstance := range c.mssqlInstances {
c.availabilityReplicaPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create Availability Replica collector for instance %s: %w", sqlInstance, err)
}
}
// Win32_PerfRawData_{instance}_SQLServerAvailabilityReplica
c.availReplicaBytesReceivedFromReplica = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "availreplica_received_from_replica_bytes"),
"(AvailabilityReplica.BytesReceivedfromReplica)",
[]string{"mssql_instance", "replica"},
nil,
)
c.availReplicaBytesSentToReplica = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "availreplica_sent_to_replica_bytes"),
"(AvailabilityReplica.BytesSenttoReplica)",
[]string{"mssql_instance", "replica"},
nil,
)
c.availReplicaBytesSentToTransport = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "availreplica_sent_to_transport_bytes"),
"(AvailabilityReplica.BytesSenttoTransport)",
[]string{"mssql_instance", "replica"},
nil,
)
c.availReplicaFlowControl = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "availreplica_initiated_flow_controls"),
"(AvailabilityReplica.FlowControl)",
[]string{"mssql_instance", "replica"},
nil,
)
c.availReplicaFlowControlTimeMS = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "availreplica_flow_control_wait_seconds"),
"(AvailabilityReplica.FlowControlTimems)",
[]string{"mssql_instance", "replica"},
nil,
)
c.availReplicaReceivesFromReplica = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "availreplica_receives_from_replica"),
"(AvailabilityReplica.ReceivesfromReplica)",
[]string{"mssql_instance", "replica"},
nil,
)
c.availReplicaResentMessages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "availreplica_resent_messages"),
"(AvailabilityReplica.ResentMessages)",
[]string{"mssql_instance", "replica"},
nil,
)
c.availReplicaSendsToReplica = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "availreplica_sends_to_replica"),
"(AvailabilityReplica.SendstoReplica)",
[]string{"mssql_instance", "replica"},
nil,
)
c.availReplicaSendsToTransport = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "availreplica_sends_to_transport"),
"(AvailabilityReplica.SendstoTransport)",
[]string{"mssql_instance", "replica"},
nil,
)
return nil
}
func (c *Collector) collectAvailabilityReplica(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorAvailabilityReplica, c.availabilityReplicaPerfDataCollectors, c.collectAvailabilityReplicaInstance)
}
func (c *Collector) collectAvailabilityReplicaInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
perfData, err := perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), err)
}
for replicaName, data := range perfData {
ch <- prometheus.MustNewConstMetric(
c.availReplicaBytesReceivedFromReplica,
prometheus.CounterValue,
data[availReplicaBytesReceivedFromReplicaPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaBytesSentToReplica,
prometheus.CounterValue,
data[availReplicaBytesSentToReplicaPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaBytesSentToTransport,
prometheus.CounterValue,
data[availReplicaBytesSentToTransportPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaFlowControl,
prometheus.CounterValue,
data[availReplicaFlowControlPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaFlowControlTimeMS,
prometheus.CounterValue,
utils.MilliSecToSec(data[availReplicaFlowControlTimeMSPerSec].FirstValue),
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaReceivesFromReplica,
prometheus.CounterValue,
data[availReplicaReceivesFromReplicaPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaResentMessages,
prometheus.CounterValue,
data[availReplicaResentMessagesPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaSendsToReplica,
prometheus.CounterValue,
data[availReplicaSendsToReplicaPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.availReplicaSendsToTransport,
prometheus.CounterValue,
data[availReplicaSendsToTransportPerSec].FirstValue,
sqlInstance, replicaName,
)
}
return nil
}
func (c *Collector) closeAvailabilityReplica() {
for _, perfDataCollector := range c.availabilityReplicaPerfDataCollectors {
perfDataCollector.Close()
}
}

View File

@ -0,0 +1,426 @@
//go:build windows
package mssql
import (
"fmt"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorBufferManager struct {
bufManPerfDataCollectors map[string]*perfdata.Collector
bufManBackgroundwriterpages *prometheus.Desc
bufManBuffercachehits *prometheus.Desc
bufManBuffercachelookups *prometheus.Desc
bufManCheckpointpages *prometheus.Desc
bufManDatabasepages *prometheus.Desc
bufManExtensionallocatedpages *prometheus.Desc
bufManExtensionfreepages *prometheus.Desc
bufManExtensioninuseaspercentage *prometheus.Desc
bufManExtensionoutstandingIOcounter *prometheus.Desc
bufManExtensionpageevictions *prometheus.Desc
bufManExtensionpagereads *prometheus.Desc
bufManExtensionpageunreferencedtime *prometheus.Desc
bufManExtensionpagewrites *prometheus.Desc
bufManFreeliststalls *prometheus.Desc
bufManIntegralControllerSlope *prometheus.Desc
bufManLazywrites *prometheus.Desc
bufManPagelifeexpectancy *prometheus.Desc
bufManPagelookups *prometheus.Desc
bufManPagereads *prometheus.Desc
bufManPagewrites *prometheus.Desc
bufManReadaheadpages *prometheus.Desc
bufManReadaheadtime *prometheus.Desc
bufManTargetpages *prometheus.Desc
}
const (
bufManBackgroundWriterPagesPerSec = "Background writer pages/sec"
bufManBufferCacheHitRatio = "Buffer cache hit ratio"
bufManBufferCacheHitRatioBase = "Buffer cache hit ratio base"
bufManCheckpointPagesPerSec = "Checkpoint pages/sec"
bufManDatabasePages = "Database pages"
bufManExtensionAllocatedPages = "Extension allocated pages"
bufManExtensionFreePages = "Extension free pages"
bufManExtensionInUseAsPercentage = "Extension in use as percentage"
bufManExtensionOutstandingIOCounter = "Extension outstanding IO counter"
bufManExtensionPageEvictionsPerSec = "Extension page evictions/sec"
bufManExtensionPageReadsPerSec = "Extension page reads/sec"
bufManExtensionPageUnreferencedTime = "Extension page unreferenced time"
bufManExtensionPageWritesPerSec = "Extension page writes/sec"
bufManFreeListStallsPerSec = "Free list stalls/sec"
bufManIntegralControllerSlope = "Integral Controller Slope"
bufManLazyWritesPerSec = "Lazy writes/sec"
bufManPageLifeExpectancy = "Page life expectancy"
bufManPageLookupsPerSec = "Page lookups/sec"
bufManPageReadsPerSec = "Page reads/sec"
bufManPageWritesPerSec = "Page writes/sec"
bufManReadaheadPagesPerSec = "Readahead pages/sec"
bufManReadaheadTimePerSec = "Readahead time/sec"
bufManTargetPages = "Target pages"
)
func (c *Collector) buildBufferManager() error {
var err error
c.bufManPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
counters := []string{
bufManBackgroundWriterPagesPerSec,
bufManBufferCacheHitRatio,
bufManBufferCacheHitRatioBase,
bufManCheckpointPagesPerSec,
bufManDatabasePages,
bufManExtensionAllocatedPages,
bufManExtensionFreePages,
bufManExtensionInUseAsPercentage,
bufManExtensionOutstandingIOCounter,
bufManExtensionPageEvictionsPerSec,
bufManExtensionPageReadsPerSec,
bufManExtensionPageUnreferencedTime,
bufManExtensionPageWritesPerSec,
bufManFreeListStallsPerSec,
bufManIntegralControllerSlope,
bufManLazyWritesPerSec,
bufManPageLifeExpectancy,
bufManPageLookupsPerSec,
bufManPageReadsPerSec,
bufManPageWritesPerSec,
bufManReadaheadPagesPerSec,
bufManReadaheadTimePerSec,
bufManTargetPages,
}
for sqlInstance := range c.mssqlInstances {
c.bufManPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Buffer Manager"), nil, counters)
if err != nil {
return fmt.Errorf("failed to create Buffer Manager collector for instance %s: %w", sqlInstance, err)
}
}
c.bufManBackgroundwriterpages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_background_writer_pages"),
"(BufferManager.Backgroundwriterpages)",
[]string{"mssql_instance"},
nil,
)
c.bufManBuffercachehits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_buffer_cache_hits"),
"(BufferManager.Buffercachehitratio)",
[]string{"mssql_instance"},
nil,
)
c.bufManBuffercachelookups = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_buffer_cache_lookups"),
"(BufferManager.Buffercachehitratio_Base)",
[]string{"mssql_instance"},
nil,
)
c.bufManCheckpointpages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_checkpoint_pages"),
"(BufferManager.Checkpointpages)",
[]string{"mssql_instance"},
nil,
)
c.bufManDatabasepages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_database_pages"),
"(BufferManager.Databasepages)",
[]string{"mssql_instance"},
nil,
)
c.bufManExtensionallocatedpages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_extension_allocated_pages"),
"(BufferManager.Extensionallocatedpages)",
[]string{"mssql_instance"},
nil,
)
c.bufManExtensionfreepages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_extension_free_pages"),
"(BufferManager.Extensionfreepages)",
[]string{"mssql_instance"},
nil,
)
c.bufManExtensioninuseaspercentage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_extension_in_use_as_percentage"),
"(BufferManager.Extensioninuseaspercentage)",
[]string{"mssql_instance"},
nil,
)
c.bufManExtensionoutstandingIOcounter = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_extension_outstanding_io"),
"(BufferManager.ExtensionoutstandingIOcounter)",
[]string{"mssql_instance"},
nil,
)
c.bufManExtensionpageevictions = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_extension_page_evictions"),
"(BufferManager.Extensionpageevictions)",
[]string{"mssql_instance"},
nil,
)
c.bufManExtensionpagereads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_extension_page_reads"),
"(BufferManager.Extensionpagereads)",
[]string{"mssql_instance"},
nil,
)
c.bufManExtensionpageunreferencedtime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_extension_page_unreferenced_seconds"),
"(BufferManager.Extensionpageunreferencedtime)",
[]string{"mssql_instance"},
nil,
)
c.bufManExtensionpagewrites = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_extension_page_writes"),
"(BufferManager.Extensionpagewrites)",
[]string{"mssql_instance"},
nil,
)
c.bufManFreeliststalls = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_free_list_stalls"),
"(BufferManager.Freeliststalls)",
[]string{"mssql_instance"},
nil,
)
c.bufManIntegralControllerSlope = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_integral_controller_slope"),
"(BufferManager.IntegralControllerSlope)",
[]string{"mssql_instance"},
nil,
)
c.bufManLazywrites = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_lazywrites"),
"(BufferManager.Lazywrites)",
[]string{"mssql_instance"},
nil,
)
c.bufManPagelifeexpectancy = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_page_life_expectancy_seconds"),
"(BufferManager.Pagelifeexpectancy)",
[]string{"mssql_instance"},
nil,
)
c.bufManPagelookups = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_page_lookups"),
"(BufferManager.Pagelookups)",
[]string{"mssql_instance"},
nil,
)
c.bufManPagereads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_page_reads"),
"(BufferManager.Pagereads)",
[]string{"mssql_instance"},
nil,
)
c.bufManPagewrites = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_page_writes"),
"(BufferManager.Pagewrites)",
[]string{"mssql_instance"},
nil,
)
c.bufManReadaheadpages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_read_ahead_pages"),
"(BufferManager.Readaheadpages)",
[]string{"mssql_instance"},
nil,
)
c.bufManReadaheadtime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_read_ahead_issuing_seconds"),
"(BufferManager.Readaheadtime)",
[]string{"mssql_instance"},
nil,
)
c.bufManTargetpages = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bufman_target_pages"),
"(BufferManager.Targetpages)",
[]string{"mssql_instance"},
nil,
)
return nil
}
func (c *Collector) collectBufferManager(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorBufferManager, c.bufManPerfDataCollectors, c.collectBufferManagerInstance)
}
func (c *Collector) collectBufferManagerInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
perfData, err := perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Buffer Manager"), err)
}
for _, data := range perfData {
ch <- prometheus.MustNewConstMetric(
c.bufManBackgroundwriterpages,
prometheus.CounterValue,
data[bufManBackgroundWriterPagesPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManBuffercachehits,
prometheus.GaugeValue,
data[bufManBufferCacheHitRatio].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManBuffercachelookups,
prometheus.GaugeValue,
data[bufManBufferCacheHitRatioBase].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManCheckpointpages,
prometheus.CounterValue,
data[bufManCheckpointPagesPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManDatabasepages,
prometheus.GaugeValue,
data[bufManDatabasePages].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionallocatedpages,
prometheus.GaugeValue,
data[bufManExtensionAllocatedPages].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionfreepages,
prometheus.GaugeValue,
data[bufManExtensionFreePages].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensioninuseaspercentage,
prometheus.GaugeValue,
data[bufManExtensionInUseAsPercentage].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionoutstandingIOcounter,
prometheus.GaugeValue,
data[bufManExtensionOutstandingIOCounter].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionpageevictions,
prometheus.CounterValue,
data[bufManExtensionPageEvictionsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionpagereads,
prometheus.CounterValue,
data[bufManExtensionPageReadsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionpageunreferencedtime,
prometheus.GaugeValue,
data[bufManExtensionPageUnreferencedTime].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManExtensionpagewrites,
prometheus.CounterValue,
data[bufManExtensionPageWritesPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManFreeliststalls,
prometheus.CounterValue,
data[bufManFreeListStallsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManIntegralControllerSlope,
prometheus.GaugeValue,
data[bufManIntegralControllerSlope].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManLazywrites,
prometheus.CounterValue,
data[bufManLazyWritesPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManPagelifeexpectancy,
prometheus.GaugeValue,
data[bufManPageLifeExpectancy].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManPagelookups,
prometheus.CounterValue,
data[bufManPageLookupsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManPagereads,
prometheus.CounterValue,
data[bufManPageReadsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManPagewrites,
prometheus.CounterValue,
data[bufManPageWritesPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManReadaheadpages,
prometheus.CounterValue,
data[bufManReadaheadPagesPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManReadaheadtime,
prometheus.CounterValue,
data[bufManReadaheadTimePerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.bufManTargetpages,
prometheus.GaugeValue,
data[bufManTargetPages].FirstValue,
sqlInstance,
)
}
return nil
}
func (c *Collector) closeBufferManager() {
for _, perfDataCollectors := range c.bufManPerfDataCollectors {
perfDataCollectors.Close()
}
}

View File

@ -0,0 +1,826 @@
//go:build windows
package mssql
import (
"fmt"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorDatabases struct {
databasesPerfDataCollectors map[string]*perfdata.Collector
databasesActiveParallelRedoThreads *prometheus.Desc
databasesActiveTransactions *prometheus.Desc
databasesBackupPerRestoreThroughput *prometheus.Desc
databasesBulkCopyRows *prometheus.Desc
databasesBulkCopyThroughput *prometheus.Desc
databasesCommitTableEntries *prometheus.Desc
databasesDataFilesSizeKB *prometheus.Desc
databasesDBCCLogicalScanBytes *prometheus.Desc
databasesGroupCommitTime *prometheus.Desc
databasesLogBytesFlushed *prometheus.Desc
databasesLogCacheHits *prometheus.Desc
databasesLogCacheLookups *prometheus.Desc
databasesLogCacheReads *prometheus.Desc
databasesLogFilesSizeKB *prometheus.Desc
databasesLogFilesUsedSizeKB *prometheus.Desc
databasesLogFlushes *prometheus.Desc
databasesLogFlushWaits *prometheus.Desc
databasesLogFlushWaitTime *prometheus.Desc
databasesLogFlushWriteTimeMS *prometheus.Desc
databasesLogGrowths *prometheus.Desc
databasesLogPoolCacheMisses *prometheus.Desc
databasesLogPoolDiskReads *prometheus.Desc
databasesLogPoolHashDeletes *prometheus.Desc
databasesLogPoolHashInserts *prometheus.Desc
databasesLogPoolInvalidHashEntry *prometheus.Desc
databasesLogPoolLogScanPushes *prometheus.Desc
databasesLogPoolLogWriterPushes *prometheus.Desc
databasesLogPoolPushEmptyFreePool *prometheus.Desc
databasesLogPoolPushLowMemory *prometheus.Desc
databasesLogPoolPushNoFreeBuffer *prometheus.Desc
databasesLogPoolReqBehindTrunc *prometheus.Desc
databasesLogPoolRequestsOldVLF *prometheus.Desc
databasesLogPoolRequests *prometheus.Desc
databasesLogPoolTotalActiveLogSize *prometheus.Desc
databasesLogPoolTotalSharedPoolSize *prometheus.Desc
databasesLogShrinks *prometheus.Desc
databasesLogTruncations *prometheus.Desc
databasesPercentLogUsed *prometheus.Desc
databasesReplPendingXacts *prometheus.Desc
databasesReplTransRate *prometheus.Desc
databasesShrinkDataMovementBytes *prometheus.Desc
databasesTrackedTransactions *prometheus.Desc
databasesTransactions *prometheus.Desc
databasesWriteTransactions *prometheus.Desc
databasesXTPControllerDLCLatencyPerFetch *prometheus.Desc
databasesXTPControllerDLCPeakLatency *prometheus.Desc
databasesXTPControllerLogProcessed *prometheus.Desc
databasesXTPMemoryUsedKB *prometheus.Desc
}
const (
databasesActiveParallelRedoThreads = "Active parallel redo threads"
databasesActiveTransactions = "Active Transactions"
databasesBackupPerRestoreThroughputPerSec = "Backup/Restore Throughput/sec"
databasesBulkCopyRowsPerSec = "Bulk Copy Rows/sec"
databasesBulkCopyThroughputPerSec = "Bulk Copy Throughput/sec"
databasesCommitTableEntries = "Commit table entries"
databasesDataFilesSizeKB = "Data File(s) Size (KB)"
databasesDBCCLogicalScanBytesPerSec = "DBCC Logical Scan Bytes/sec"
databasesGroupCommitTimePerSec = "Group Commit Time/sec"
databasesLogBytesFlushedPerSec = "Log Bytes Flushed/sec"
databasesLogCacheHitRatio = "Log Cache Hit Ratio"
databasesLogCacheHitRatioBase = "Log Cache Hit Ratio Base"
databasesLogCacheReadsPerSec = "Log Cache Reads/sec"
databasesLogFilesSizeKB = "Log File(s) Size (KB)"
databasesLogFilesUsedSizeKB = "Log File(s) Used Size (KB)"
databasesLogFlushesPerSec = "Log Flushes/sec"
databasesLogFlushWaitsPerSec = "Log Flush Waits/sec"
databasesLogFlushWaitTime = "Log Flush Wait Time"
databasesLogFlushWriteTimeMS = "Log Flush Write Time (ms)"
databasesLogGrowths = "Log Growths"
databasesLogPoolCacheMissesPerSec = "Log Pool Cache Misses/sec"
databasesLogPoolDiskReadsPerSec = "Log Pool Disk Reads/sec"
databasesLogPoolHashDeletesPerSec = "Log Pool Hash Deletes/sec"
databasesLogPoolHashInsertsPerSec = "Log Pool Hash Inserts/sec"
databasesLogPoolInvalidHashEntryPerSec = "Log Pool Invalid Hash Entry/sec"
databasesLogPoolLogScanPushesPerSec = "Log Pool Log Scan Pushes/sec"
databasesLogPoolLogWriterPushesPerSec = "Log Pool LogWriter Pushes/sec"
databasesLogPoolPushEmptyFreePoolPerSec = "Log Pool Push Empty FreePool/sec"
databasesLogPoolPushLowMemoryPerSec = "Log Pool Push Low Memory/sec"
databasesLogPoolPushNoFreeBufferPerSec = "Log Pool Push No Free Buffer/sec"
databasesLogPoolReqBehindTruncPerSec = "Log Pool Req. Behind Trunc/sec"
databasesLogPoolRequestsOldVLFPerSec = "Log Pool Requests Old VLF/sec"
databasesLogPoolRequestsPerSec = "Log Pool Requests/sec"
databasesLogPoolTotalActiveLogSize = "Log Pool Total Active Log Size"
databasesLogPoolTotalSharedPoolSize = "Log Pool Total Shared Pool Size"
databasesLogShrinks = "Log Shrinks"
databasesLogTruncations = "Log Truncations"
databasesPercentLogUsed = "Percent Log Used"
databasesReplPendingXacts = "Repl. Pending Xacts"
databasesReplTransRate = "Repl. Trans. Rate"
databasesShrinkDataMovementBytesPerSec = "Shrink Data Movement Bytes/sec"
databasesTrackedTransactionsPerSec = "Tracked transactions/sec"
databasesTransactionsPerSec = "Transactions/sec"
databasesWriteTransactionsPerSec = "Write Transactions/sec"
databasesXTPControllerDLCLatencyPerFetch = "XTP Controller DLC Latency/Fetch"
databasesXTPControllerDLCPeakLatency = "XTP Controller DLC Peak Latency"
databasesXTPControllerLogProcessedPerSec = "XTP Controller Log Processed/sec"
databasesXTPMemoryUsedKB = "XTP Memory Used (KB)"
)
func (c *Collector) buildDatabases() error {
var err error
c.databasesPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
counters := []string{
databasesActiveParallelRedoThreads,
databasesActiveTransactions,
databasesBackupPerRestoreThroughputPerSec,
databasesBulkCopyRowsPerSec,
databasesBulkCopyThroughputPerSec,
databasesCommitTableEntries,
databasesDataFilesSizeKB,
databasesDBCCLogicalScanBytesPerSec,
databasesGroupCommitTimePerSec,
databasesLogBytesFlushedPerSec,
databasesLogCacheHitRatio,
databasesLogCacheHitRatioBase,
databasesLogCacheReadsPerSec,
databasesLogFilesSizeKB,
databasesLogFilesUsedSizeKB,
databasesLogFlushesPerSec,
databasesLogFlushWaitsPerSec,
databasesLogFlushWaitTime,
databasesLogFlushWriteTimeMS,
databasesLogGrowths,
databasesLogPoolCacheMissesPerSec,
databasesLogPoolDiskReadsPerSec,
databasesLogPoolHashDeletesPerSec,
databasesLogPoolHashInsertsPerSec,
databasesLogPoolInvalidHashEntryPerSec,
databasesLogPoolLogScanPushesPerSec,
databasesLogPoolLogWriterPushesPerSec,
databasesLogPoolPushEmptyFreePoolPerSec,
databasesLogPoolPushLowMemoryPerSec,
databasesLogPoolPushNoFreeBufferPerSec,
databasesLogPoolReqBehindTruncPerSec,
databasesLogPoolRequestsOldVLFPerSec,
databasesLogPoolRequestsPerSec,
databasesLogPoolTotalActiveLogSize,
databasesLogPoolTotalSharedPoolSize,
databasesLogShrinks,
databasesLogTruncations,
databasesPercentLogUsed,
databasesReplPendingXacts,
databasesReplTransRate,
databasesShrinkDataMovementBytesPerSec,
databasesTrackedTransactionsPerSec,
databasesTransactionsPerSec,
databasesWriteTransactionsPerSec,
databasesXTPControllerDLCLatencyPerFetch,
databasesXTPControllerDLCPeakLatency,
databasesXTPControllerLogProcessedPerSec,
databasesXTPMemoryUsedKB,
}
for sqlInstance := range c.mssqlInstances {
c.databasesPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Databases"), perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create Databases collector for instance %s: %w", sqlInstance, err)
}
}
c.databasesActiveParallelRedoThreads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_active_parallel_redo_threads"),
"(Databases.ActiveParallelredothreads)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesActiveTransactions = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_active_transactions"),
"(Databases.ActiveTransactions)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesBackupPerRestoreThroughput = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_backup_restore_operations"),
"(Databases.BackupPerRestoreThroughput)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesBulkCopyRows = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_bulk_copy_rows"),
"(Databases.BulkCopyRows)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesBulkCopyThroughput = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_bulk_copy_bytes"),
"(Databases.BulkCopyThroughput)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesCommitTableEntries = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_commit_table_entries"),
"(Databases.Committableentries)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesDataFilesSizeKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_data_files_size_bytes"),
"(Databases.DataFilesSizeKB)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesDBCCLogicalScanBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_dbcc_logical_scan_bytes"),
"(Databases.DBCCLogicalScanBytes)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesGroupCommitTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_group_commit_stall_seconds"),
"(Databases.GroupCommitTime)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogBytesFlushed = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_flushed_bytes"),
"(Databases.LogBytesFlushed)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogCacheHits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_cache_hits"),
"(Databases.LogCacheHitRatio)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogCacheLookups = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_cache_lookups"),
"(Databases.LogCacheHitRatio_Base)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogCacheReads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_cache_reads"),
"(Databases.LogCacheReads)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogFilesSizeKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_files_size_bytes"),
"(Databases.LogFilesSizeKB)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogFilesUsedSizeKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_files_used_size_bytes"),
"(Databases.LogFilesUsedSizeKB)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogFlushes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_flushes"),
"(Databases.LogFlushes)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogFlushWaits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_flush_waits"),
"(Databases.LogFlushWaits)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogFlushWaitTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_flush_wait_seconds"),
"(Databases.LogFlushWaitTime)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogFlushWriteTimeMS = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_flush_write_seconds"),
"(Databases.LogFlushWriteTimems)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogGrowths = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_growths"),
"(Databases.LogGrowths)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogPoolCacheMisses = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_pool_cache_misses"),
"(Databases.LogPoolCacheMisses)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogPoolDiskReads = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_pool_disk_reads"),
"(Databases.LogPoolDiskReads)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogPoolHashDeletes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_pool_hash_deletes"),
"(Databases.LogPoolHashDeletes)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogPoolHashInserts = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_pool_hash_inserts"),
"(Databases.LogPoolHashInserts)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogPoolInvalidHashEntry = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_pool_invalid_hash_entries"),
"(Databases.LogPoolInvalidHashEntry)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogPoolLogScanPushes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_pool_log_scan_pushes"),
"(Databases.LogPoolLogScanPushes)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogPoolLogWriterPushes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_pool_log_writer_pushes"),
"(Databases.LogPoolLogWriterPushes)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogPoolPushEmptyFreePool = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_pool_empty_free_pool_pushes"),
"(Databases.LogPoolPushEmptyFreePool)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogPoolPushLowMemory = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_pool_low_memory_pushes"),
"(Databases.LogPoolPushLowMemory)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogPoolPushNoFreeBuffer = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_pool_no_free_buffer_pushes"),
"(Databases.LogPoolPushNoFreeBuffer)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogPoolReqBehindTrunc = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_pool_req_behind_trunc"),
"(Databases.LogPoolReqBehindTrunc)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogPoolRequestsOldVLF = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_pool_requests_old_vlf"),
"(Databases.LogPoolRequestsOldVLF)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogPoolRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_pool_requests"),
"(Databases.LogPoolRequests)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogPoolTotalActiveLogSize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_pool_total_active_log_bytes"),
"(Databases.LogPoolTotalActiveLogSize)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogPoolTotalSharedPoolSize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_pool_total_shared_pool_bytes"),
"(Databases.LogPoolTotalSharedPoolSize)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogShrinks = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_shrinks"),
"(Databases.LogShrinks)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesLogTruncations = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_truncations"),
"(Databases.LogTruncations)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesPercentLogUsed = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_log_used_percent"),
"(Databases.PercentLogUsed)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesReplPendingXacts = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_pending_repl_transactions"),
"(Databases.ReplPendingTransactions)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesReplTransRate = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_repl_transactions"),
"(Databases.ReplTranactions)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesShrinkDataMovementBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_shrink_data_movement_bytes"),
"(Databases.ShrinkDataMovementBytes)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesTrackedTransactions = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_tracked_transactions"),
"(Databases.Trackedtransactions)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesTransactions = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_transactions"),
"(Databases.Transactions)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesWriteTransactions = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_write_transactions"),
"(Databases.WriteTransactions)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesXTPControllerDLCLatencyPerFetch = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_xtp_controller_dlc_fetch_latency_seconds"),
"(Databases.XTPControllerDLCLatencyPerFetch)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesXTPControllerDLCPeakLatency = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_xtp_controller_dlc_peak_latency_seconds"),
"(Databases.XTPControllerDLCPeakLatency)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesXTPControllerLogProcessed = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_xtp_controller_log_processed_bytes"),
"(Databases.XTPControllerLogProcessed)",
[]string{"mssql_instance", "database"},
nil,
)
c.databasesXTPMemoryUsedKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "databases_xtp_memory_used_bytes"),
"(Databases.XTPMemoryUsedKB)",
[]string{"mssql_instance", "database"},
nil,
)
return nil
}
func (c *Collector) collectDatabases(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorDatabases, c.databasesPerfDataCollectors, c.collectDatabasesInstance)
}
func (c *Collector) collectDatabasesInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
perfData, err := perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Databases"), err)
}
for dbName, data := range perfData {
ch <- prometheus.MustNewConstMetric(
c.databasesActiveParallelRedoThreads,
prometheus.GaugeValue,
data[databasesActiveParallelRedoThreads].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesActiveTransactions,
prometheus.GaugeValue,
data[databasesActiveTransactions].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesBackupPerRestoreThroughput,
prometheus.CounterValue,
data[databasesBackupPerRestoreThroughputPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesBulkCopyRows,
prometheus.CounterValue,
data[databasesBulkCopyRowsPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesBulkCopyThroughput,
prometheus.CounterValue,
data[databasesBulkCopyThroughputPerSec].FirstValue*1024,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesCommitTableEntries,
prometheus.GaugeValue,
data[databasesCommitTableEntries].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesDataFilesSizeKB,
prometheus.GaugeValue,
data[databasesDataFilesSizeKB].FirstValue*1024,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesDBCCLogicalScanBytes,
prometheus.CounterValue,
data[databasesDBCCLogicalScanBytesPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesGroupCommitTime,
prometheus.CounterValue,
data[databasesGroupCommitTimePerSec].FirstValue/1000000.0,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogBytesFlushed,
prometheus.CounterValue,
data[databasesLogBytesFlushedPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogCacheHits,
prometheus.GaugeValue,
data[databasesLogCacheHitRatio].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogCacheLookups,
prometheus.GaugeValue,
data[databasesLogCacheHitRatioBase].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogCacheReads,
prometheus.CounterValue,
data[databasesLogCacheReadsPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFilesSizeKB,
prometheus.GaugeValue,
data[databasesLogFilesSizeKB].FirstValue*1024,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFilesUsedSizeKB,
prometheus.GaugeValue,
data[databasesLogFilesUsedSizeKB].FirstValue*1024,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFlushes,
prometheus.CounterValue,
data[databasesLogFlushesPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFlushWaits,
prometheus.CounterValue,
data[databasesLogFlushWaitsPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFlushWaitTime,
prometheus.GaugeValue,
data[databasesLogFlushWaitTime].FirstValue/1000.0,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogFlushWriteTimeMS,
prometheus.GaugeValue,
data[databasesLogFlushWriteTimeMS].FirstValue/1000.0,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogGrowths,
prometheus.GaugeValue,
data[databasesLogGrowths].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolCacheMisses,
prometheus.CounterValue,
data[databasesLogPoolCacheMissesPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolDiskReads,
prometheus.CounterValue,
data[databasesLogPoolDiskReadsPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolHashDeletes,
prometheus.CounterValue,
data[databasesLogPoolHashDeletesPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolHashInserts,
prometheus.CounterValue,
data[databasesLogPoolHashInsertsPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolInvalidHashEntry,
prometheus.CounterValue,
data[databasesLogPoolInvalidHashEntryPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolLogScanPushes,
prometheus.CounterValue,
data[databasesLogPoolLogScanPushesPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolLogWriterPushes,
prometheus.CounterValue,
data[databasesLogPoolLogWriterPushesPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolPushEmptyFreePool,
prometheus.CounterValue,
data[databasesLogPoolPushEmptyFreePoolPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolPushLowMemory,
prometheus.CounterValue,
data[databasesLogPoolPushLowMemoryPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolPushNoFreeBuffer,
prometheus.CounterValue,
data[databasesLogPoolPushNoFreeBufferPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolReqBehindTrunc,
prometheus.CounterValue,
data[databasesLogPoolReqBehindTruncPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolRequestsOldVLF,
prometheus.CounterValue,
data[databasesLogPoolRequestsOldVLFPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolRequests,
prometheus.CounterValue,
data[databasesLogPoolRequestsPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolTotalActiveLogSize,
prometheus.GaugeValue,
data[databasesLogPoolTotalActiveLogSize].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogPoolTotalSharedPoolSize,
prometheus.GaugeValue,
data[databasesLogPoolTotalSharedPoolSize].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogShrinks,
prometheus.GaugeValue,
data[databasesLogShrinks].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesLogTruncations,
prometheus.GaugeValue,
data[databasesLogTruncations].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesPercentLogUsed,
prometheus.GaugeValue,
data[databasesPercentLogUsed].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesReplPendingXacts,
prometheus.GaugeValue,
data[databasesReplPendingXacts].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesReplTransRate,
prometheus.CounterValue,
data[databasesReplTransRate].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesShrinkDataMovementBytes,
prometheus.CounterValue,
data[databasesShrinkDataMovementBytesPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesTrackedTransactions,
prometheus.CounterValue,
data[databasesTrackedTransactionsPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesTransactions,
prometheus.CounterValue,
data[databasesTransactionsPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesWriteTransactions,
prometheus.CounterValue,
data[databasesWriteTransactionsPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesXTPControllerDLCLatencyPerFetch,
prometheus.GaugeValue,
data[databasesXTPControllerDLCLatencyPerFetch].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesXTPControllerDLCPeakLatency,
prometheus.GaugeValue,
data[databasesXTPControllerDLCPeakLatency].FirstValue*1000000.0,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesXTPControllerLogProcessed,
prometheus.CounterValue,
data[databasesXTPControllerLogProcessedPerSec].FirstValue,
sqlInstance, dbName,
)
ch <- prometheus.MustNewConstMetric(
c.databasesXTPMemoryUsedKB,
prometheus.GaugeValue,
data[databasesXTPMemoryUsedKB].FirstValue*1024,
sqlInstance, dbName,
)
}
return nil
}
func (c *Collector) closeDatabases() {
for _, collector := range c.databasesPerfDataCollectors {
collector.Close()
}
}

View File

@ -0,0 +1,443 @@
//go:build windows
package mssql
import (
"fmt"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorDatabaseReplica struct {
dbReplicaPerfDataCollectors map[string]*perfdata.Collector
dbReplicaDatabaseFlowControlDelay *prometheus.Desc
dbReplicaDatabaseFlowControls *prometheus.Desc
dbReplicaFileBytesReceived *prometheus.Desc
dbReplicaGroupCommits *prometheus.Desc
dbReplicaGroupCommitTime *prometheus.Desc
dbReplicaLogApplyPendingQueue *prometheus.Desc
dbReplicaLogApplyReadyQueue *prometheus.Desc
dbReplicaLogBytesCompressed *prometheus.Desc
dbReplicaLogBytesDecompressed *prometheus.Desc
dbReplicaLogBytesReceived *prometheus.Desc
dbReplicaLogCompressionCachehits *prometheus.Desc
dbReplicaLogCompressionCachemisses *prometheus.Desc
dbReplicaLogCompressions *prometheus.Desc
dbReplicaLogDecompressions *prometheus.Desc
dbReplicaLogremainingforundo *prometheus.Desc
dbReplicaLogSendQueue *prometheus.Desc
dbReplicaMirroredWritetransactions *prometheus.Desc
dbReplicaRecoveryQueue *prometheus.Desc
dbReplicaRedoblocked *prometheus.Desc
dbReplicaRedoBytesRemaining *prometheus.Desc
dbReplicaRedoneBytes *prometheus.Desc
dbReplicaRedones *prometheus.Desc
dbReplicaTotalLogrequiringundo *prometheus.Desc
dbReplicaTransactionDelay *prometheus.Desc
}
const (
dbReplicaDatabaseFlowControlDelay = "Database Flow Control Delay"
dbReplicaDatabaseFlowControlsPerSec = "Database Flow Controls/sec"
dbReplicaFileBytesReceivedPerSec = "File Bytes Received/sec"
dbReplicaGroupCommitsPerSec = "Group Commits/Sec"
dbReplicaGroupCommitTime = "Group Commit Time"
dbReplicaLogApplyPendingQueue = "Log Apply Pending Queue"
dbReplicaLogApplyReadyQueue = "Log Apply Ready Queue"
dbReplicaLogBytesCompressedPerSec = "Log Bytes Compressed/sec"
dbReplicaLogBytesDecompressedPerSec = "Log Bytes Decompressed/sec"
dbReplicaLogBytesReceivedPerSec = "Log Bytes Received/sec"
dbReplicaLogCompressionCacheHitsPerSec = "Log Compression Cache hits/sec"
dbReplicaLogCompressionCacheMissesPerSec = "Log Compression Cache misses/sec"
dbReplicaLogCompressionsPerSec = "Log Compressions/sec"
dbReplicaLogDecompressionsPerSec = "Log Decompressions/sec"
dbReplicaLogRemainingForUndo = "Log remaining for undo"
dbReplicaLogSendQueue = "Log Send Queue"
dbReplicaMirroredWriteTransactionsPerSec = "Mirrored Write Transactions/sec"
dbReplicaRecoveryQueue = "Recovery Queue"
dbReplicaRedoBlockedPerSec = "Redo blocked/sec"
dbReplicaRedoBytesRemaining = "Redo Bytes Remaining"
dbReplicaRedoneBytesPerSec = "Redone Bytes/sec"
dbReplicaRedonesPerSec = "Redones/sec"
dbReplicaTotalLogRequiringUndo = "Total Log requiring undo"
dbReplicaTransactionDelay = "Transaction Delay"
)
func (c *Collector) buildDatabaseReplica() error {
var err error
c.dbReplicaPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
counters := []string{
dbReplicaDatabaseFlowControlDelay,
dbReplicaDatabaseFlowControlsPerSec,
dbReplicaFileBytesReceivedPerSec,
dbReplicaGroupCommitsPerSec,
dbReplicaGroupCommitTime,
dbReplicaLogApplyPendingQueue,
dbReplicaLogApplyReadyQueue,
dbReplicaLogBytesCompressedPerSec,
dbReplicaLogBytesDecompressedPerSec,
dbReplicaLogBytesReceivedPerSec,
dbReplicaLogCompressionCacheHitsPerSec,
dbReplicaLogCompressionCacheMissesPerSec,
dbReplicaLogCompressionsPerSec,
dbReplicaLogDecompressionsPerSec,
dbReplicaLogRemainingForUndo,
dbReplicaLogSendQueue,
dbReplicaMirroredWriteTransactionsPerSec,
dbReplicaRecoveryQueue,
dbReplicaRedoBlockedPerSec,
dbReplicaRedoBytesRemaining,
dbReplicaRedoneBytesPerSec,
dbReplicaRedonesPerSec,
dbReplicaTotalLogRequiringUndo,
dbReplicaTransactionDelay,
}
for sqlInstance := range c.mssqlInstances {
c.dbReplicaPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create Database Replica collector for instance %s: %w", sqlInstance, err)
}
}
// Win32_PerfRawData_{instance}_SQLServerDatabaseReplica
c.dbReplicaDatabaseFlowControlDelay = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_database_flow_control_wait_seconds"),
"(DatabaseReplica.DatabaseFlowControlDelay)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaDatabaseFlowControls = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_database_initiated_flow_controls"),
"(DatabaseReplica.DatabaseFlowControls)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaFileBytesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_received_file_bytes"),
"(DatabaseReplica.FileBytesReceived)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaGroupCommits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_group_commits"),
"(DatabaseReplica.GroupCommits)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaGroupCommitTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_group_commit_stall_seconds"),
"(DatabaseReplica.GroupCommitTime)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaLogApplyPendingQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_log_apply_pending_queue"),
"(DatabaseReplica.LogApplyPendingQueue)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaLogApplyReadyQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_log_apply_ready_queue"),
"(DatabaseReplica.LogApplyReadyQueue)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaLogBytesCompressed = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_log_compressed_bytes"),
"(DatabaseReplica.LogBytesCompressed)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaLogBytesDecompressed = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_log_decompressed_bytes"),
"(DatabaseReplica.LogBytesDecompressed)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaLogBytesReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_log_received_bytes"),
"(DatabaseReplica.LogBytesReceived)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaLogCompressionCachehits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_log_compression_cachehits"),
"(DatabaseReplica.LogCompressionCachehits)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaLogCompressionCachemisses = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_log_compression_cachemisses"),
"(DatabaseReplica.LogCompressionCachemisses)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaLogCompressions = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_log_compressions"),
"(DatabaseReplica.LogCompressions)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaLogDecompressions = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_log_decompressions"),
"(DatabaseReplica.LogDecompressions)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaLogremainingforundo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_log_remaining_for_undo"),
"(DatabaseReplica.Logremainingforundo)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaLogSendQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_log_send_queue"),
"(DatabaseReplica.LogSendQueue)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaMirroredWritetransactions = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_mirrored_write_transactions"),
"(DatabaseReplica.MirroredWriteTransactions)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaRecoveryQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_recovery_queue_records"),
"(DatabaseReplica.RecoveryQueue)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaRedoblocked = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_redo_blocks"),
"(DatabaseReplica.Redoblocked)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaRedoBytesRemaining = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_redo_remaining_bytes"),
"(DatabaseReplica.RedoBytesRemaining)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaRedoneBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_redone_bytes"),
"(DatabaseReplica.RedoneBytes)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaRedones = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_redones"),
"(DatabaseReplica.Redones)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaTotalLogrequiringundo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_total_log_requiring_undo"),
"(DatabaseReplica.TotalLogrequiringundo)",
[]string{"mssql_instance", "replica"},
nil,
)
c.dbReplicaTransactionDelay = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dbreplica_transaction_delay_seconds"),
"(DatabaseReplica.TransactionDelay)",
[]string{"mssql_instance", "replica"},
nil,
)
return nil
}
func (c *Collector) collectDatabaseReplica(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorDatabaseReplica, c.dbReplicaPerfDataCollectors, c.collectDatabaseReplicaInstance)
}
func (c *Collector) collectDatabaseReplicaInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
perfData, err := perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), err)
}
for replicaName, data := range perfData {
ch <- prometheus.MustNewConstMetric(
c.dbReplicaDatabaseFlowControlDelay,
prometheus.GaugeValue,
data[dbReplicaDatabaseFlowControlDelay].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaDatabaseFlowControls,
prometheus.CounterValue,
data[dbReplicaDatabaseFlowControlsPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaFileBytesReceived,
prometheus.CounterValue,
data[dbReplicaFileBytesReceivedPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaGroupCommits,
prometheus.CounterValue,
data[dbReplicaGroupCommitsPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaGroupCommitTime,
prometheus.GaugeValue,
data[dbReplicaGroupCommitTime].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogApplyPendingQueue,
prometheus.GaugeValue,
data[dbReplicaLogApplyPendingQueue].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogApplyReadyQueue,
prometheus.GaugeValue,
data[dbReplicaLogApplyReadyQueue].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogBytesCompressed,
prometheus.CounterValue,
data[dbReplicaLogBytesCompressedPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogBytesDecompressed,
prometheus.CounterValue,
data[dbReplicaLogBytesDecompressedPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogBytesReceived,
prometheus.CounterValue,
data[dbReplicaLogBytesReceivedPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogCompressionCachehits,
prometheus.CounterValue,
data[dbReplicaLogCompressionCacheHitsPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogCompressionCachemisses,
prometheus.CounterValue,
data[dbReplicaLogCompressionCacheMissesPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogCompressions,
prometheus.CounterValue,
data[dbReplicaLogCompressionsPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogDecompressions,
prometheus.CounterValue,
data[dbReplicaLogDecompressionsPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogremainingforundo,
prometheus.GaugeValue,
data[dbReplicaLogRemainingForUndo].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaLogSendQueue,
prometheus.GaugeValue,
data[dbReplicaLogSendQueue].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaMirroredWritetransactions,
prometheus.CounterValue,
data[dbReplicaMirroredWriteTransactionsPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaRecoveryQueue,
prometheus.GaugeValue,
data[dbReplicaRecoveryQueue].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaRedoblocked,
prometheus.CounterValue,
data[dbReplicaRedoBlockedPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaRedoBytesRemaining,
prometheus.GaugeValue,
data[dbReplicaRedoBytesRemaining].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaRedoneBytes,
prometheus.CounterValue,
data[dbReplicaRedoneBytesPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaRedones,
prometheus.CounterValue,
data[dbReplicaRedonesPerSec].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaTotalLogrequiringundo,
prometheus.GaugeValue,
data[dbReplicaTotalLogRequiringUndo].FirstValue,
sqlInstance, replicaName,
)
ch <- prometheus.MustNewConstMetric(
c.dbReplicaTransactionDelay,
prometheus.GaugeValue,
data[dbReplicaTransactionDelay].FirstValue/1000.0,
sqlInstance, replicaName,
)
}
return nil
}
func (c *Collector) closeDatabaseReplica() {
for _, collector := range c.dbReplicaPerfDataCollectors {
collector.Close()
}
}

View File

@ -0,0 +1,446 @@
//go:build windows
package mssql
import (
"fmt"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorGeneralStatistics struct {
genStatsPerfDataCollectors map[string]*perfdata.Collector
genStatsActiveTempTables *prometheus.Desc
genStatsConnectionReset *prometheus.Desc
genStatsEventNotificationsDelayedDrop *prometheus.Desc
genStatsHTTPAuthenticatedRequests *prometheus.Desc
genStatsLogicalConnections *prometheus.Desc
genStatsLogins *prometheus.Desc
genStatsLogouts *prometheus.Desc
genStatsMarsDeadlocks *prometheus.Desc
genStatsNonAtomicYieldRate *prometheus.Desc
genStatsProcessesBlocked *prometheus.Desc
genStatsSOAPEmptyRequests *prometheus.Desc
genStatsSOAPMethodInvocations *prometheus.Desc
genStatsSOAPSessionInitiateRequests *prometheus.Desc
genStatsSOAPSessionTerminateRequests *prometheus.Desc
genStatsSOAPSQLRequests *prometheus.Desc
genStatsSOAPWSDLRequests *prometheus.Desc
genStatsSQLTraceIOProviderLockWaits *prometheus.Desc
genStatsTempDBRecoveryUnitID *prometheus.Desc
genStatsTempDBrowSetID *prometheus.Desc
genStatsTempTablesCreationRate *prometheus.Desc
genStatsTempTablesForDestruction *prometheus.Desc
genStatsTraceEventNotificationQueue *prometheus.Desc
genStatsTransactions *prometheus.Desc
genStatsUserConnections *prometheus.Desc
}
const (
genStatsActiveTempTables = "Active Temp Tables"
genStatsConnectionResetPerSec = "Connection Reset/sec"
genStatsEventNotificationsDelayedDrop = "Event Notifications Delayed Drop"
genStatsHTTPAuthenticatedRequests = "HTTP Authenticated Requests"
genStatsLogicalConnections = "Logical Connections"
genStatsLoginsPerSec = "Logins/sec"
genStatsLogoutsPerSec = "Logouts/sec"
genStatsMarsDeadlocks = "Mars Deadlocks"
genStatsNonatomicYieldRate = "Non-atomic yield rate"
genStatsProcessesBlocked = "Processes blocked"
genStatsSOAPEmptyRequests = "SOAP Empty Requests"
genStatsSOAPMethodInvocations = "SOAP Method Invocations"
genStatsSOAPSessionInitiateRequests = "SOAP Session Initiate Requests"
genStatsSOAPSessionTerminateRequests = "SOAP Session Terminate Requests"
genStatsSOAPSQLRequests = "SOAP SQL Requests"
genStatsSOAPWSDLRequests = "SOAP WSDL Requests"
genStatsSQLTraceIOProviderLockWaits = "SQL Trace IO Provider Lock Waits"
genStatsTempdbRecoveryUnitID = "Tempdb recovery unit id"
genStatsTempdbRowsetID = "Tempdb rowset id"
genStatsTempTablesCreationRate = "Temp Tables Creation Rate"
genStatsTempTablesForDestruction = "Temp Tables For Destruction"
genStatsTraceEventNotificationQueue = "Trace Event Notification Queue"
genStatsTransactions = "Transactions"
genStatsUserConnections = "User Connections"
)
func (c *Collector) buildGeneralStatistics() error {
var err error
c.genStatsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
counters := []string{
genStatsActiveTempTables,
genStatsConnectionResetPerSec,
genStatsEventNotificationsDelayedDrop,
genStatsHTTPAuthenticatedRequests,
genStatsLogicalConnections,
genStatsLoginsPerSec,
genStatsLogoutsPerSec,
genStatsMarsDeadlocks,
genStatsNonatomicYieldRate,
genStatsProcessesBlocked,
genStatsSOAPEmptyRequests,
genStatsSOAPMethodInvocations,
genStatsSOAPSessionInitiateRequests,
genStatsSOAPSessionTerminateRequests,
genStatsSOAPSQLRequests,
genStatsSOAPWSDLRequests,
genStatsSQLTraceIOProviderLockWaits,
genStatsTempdbRecoveryUnitID,
genStatsTempdbRowsetID,
genStatsTempTablesCreationRate,
genStatsTempTablesForDestruction,
genStatsTraceEventNotificationQueue,
genStatsTransactions,
genStatsUserConnections,
}
for sqlInstance := range c.mssqlInstances {
c.genStatsPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"), nil, counters)
if err != nil {
return fmt.Errorf("failed to create General Statistics collector for instance %s: %w", sqlInstance, err)
}
}
// Win32_PerfRawData_{instance}_SQLServerGeneralStatistics
c.genStatsActiveTempTables = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_active_temp_tables"),
"(GeneralStatistics.ActiveTempTables)",
[]string{"mssql_instance"},
nil,
)
c.genStatsConnectionReset = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_connection_resets"),
"(GeneralStatistics.ConnectionReset)",
[]string{"mssql_instance"},
nil,
)
c.genStatsEventNotificationsDelayedDrop = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_event_notifications_delayed_drop"),
"(GeneralStatistics.EventNotificationsDelayedDrop)",
[]string{"mssql_instance"},
nil,
)
c.genStatsHTTPAuthenticatedRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_http_authenticated_requests"),
"(GeneralStatistics.HTTPAuthenticatedRequests)",
[]string{"mssql_instance"},
nil,
)
c.genStatsLogicalConnections = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_logical_connections"),
"(GeneralStatistics.LogicalConnections)",
[]string{"mssql_instance"},
nil,
)
c.genStatsLogins = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_logins"),
"(GeneralStatistics.Logins)",
[]string{"mssql_instance"},
nil,
)
c.genStatsLogouts = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_logouts"),
"(GeneralStatistics.Logouts)",
[]string{"mssql_instance"},
nil,
)
c.genStatsMarsDeadlocks = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_mars_deadlocks"),
"(GeneralStatistics.MarsDeadlocks)",
[]string{"mssql_instance"},
nil,
)
c.genStatsNonAtomicYieldRate = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_non_atomic_yields"),
"(GeneralStatistics.Nonatomicyields)",
[]string{"mssql_instance"},
nil,
)
c.genStatsProcessesBlocked = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_blocked_processes"),
"(GeneralStatistics.Processesblocked)",
[]string{"mssql_instance"},
nil,
)
c.genStatsSOAPEmptyRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_soap_empty_requests"),
"(GeneralStatistics.SOAPEmptyRequests)",
[]string{"mssql_instance"},
nil,
)
c.genStatsSOAPMethodInvocations = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_soap_method_invocations"),
"(GeneralStatistics.SOAPMethodInvocations)",
[]string{"mssql_instance"},
nil,
)
c.genStatsSOAPSessionInitiateRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_soap_session_initiate_requests"),
"(GeneralStatistics.SOAPSessionInitiateRequests)",
[]string{"mssql_instance"},
nil,
)
c.genStatsSOAPSessionTerminateRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_soap_session_terminate_requests"),
"(GeneralStatistics.SOAPSessionTerminateRequests)",
[]string{"mssql_instance"},
nil,
)
c.genStatsSOAPSQLRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_soapsql_requests"),
"(GeneralStatistics.SOAPSQLRequests)",
[]string{"mssql_instance"},
nil,
)
c.genStatsSOAPWSDLRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_soapwsdl_requests"),
"(GeneralStatistics.SOAPWSDLRequests)",
[]string{"mssql_instance"},
nil,
)
c.genStatsSQLTraceIOProviderLockWaits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_sql_trace_io_provider_lock_waits"),
"(GeneralStatistics.SQLTraceIOProviderLockWaits)",
[]string{"mssql_instance"},
nil,
)
c.genStatsTempDBRecoveryUnitID = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_tempdb_recovery_unit_ids_generated"),
"(GeneralStatistics.Tempdbrecoveryunitid)",
[]string{"mssql_instance"},
nil,
)
c.genStatsTempDBrowSetID = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_tempdb_rowset_ids_generated"),
"(GeneralStatistics.Tempdbrowsetid)",
[]string{"mssql_instance"},
nil,
)
c.genStatsTempTablesCreationRate = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_temp_tables_creations"),
"(GeneralStatistics.TempTablesCreations)",
[]string{"mssql_instance"},
nil,
)
c.genStatsTempTablesForDestruction = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_temp_tables_awaiting_destruction"),
"(GeneralStatistics.TempTablesForDestruction)",
[]string{"mssql_instance"},
nil,
)
c.genStatsTraceEventNotificationQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_trace_event_notification_queue_size"),
"(GeneralStatistics.TraceEventNotificationQueue)",
[]string{"mssql_instance"},
nil,
)
c.genStatsTransactions = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_transactions"),
"(GeneralStatistics.Transactions)",
[]string{"mssql_instance"},
nil,
)
c.genStatsUserConnections = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "genstats_user_connections"),
"(GeneralStatistics.UserConnections)",
[]string{"mssql_instance"},
nil,
)
return nil
}
func (c *Collector) collectGeneralStatistics(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorGeneralStatistics, c.genStatsPerfDataCollectors, c.collectGeneralStatisticsInstance)
}
func (c *Collector) collectGeneralStatisticsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
perfData, err := perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"), err)
}
data, ok := perfData[perfdata.EmptyInstance]
if !ok {
return fmt.Errorf("perflib query for %s returned empty result set", c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"))
}
ch <- prometheus.MustNewConstMetric(
c.genStatsActiveTempTables,
prometheus.GaugeValue,
data[genStatsActiveTempTables].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsConnectionReset,
prometheus.CounterValue,
data[genStatsConnectionResetPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsEventNotificationsDelayedDrop,
prometheus.GaugeValue,
data[genStatsEventNotificationsDelayedDrop].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsHTTPAuthenticatedRequests,
prometheus.GaugeValue,
data[genStatsHTTPAuthenticatedRequests].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsLogicalConnections,
prometheus.GaugeValue,
data[genStatsLogicalConnections].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsLogins,
prometheus.CounterValue,
data[genStatsLoginsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsLogouts,
prometheus.CounterValue,
data[genStatsLogoutsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsMarsDeadlocks,
prometheus.GaugeValue,
data[genStatsMarsDeadlocks].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsNonAtomicYieldRate,
prometheus.CounterValue,
data[genStatsNonatomicYieldRate].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsProcessesBlocked,
prometheus.GaugeValue,
data[genStatsProcessesBlocked].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPEmptyRequests,
prometheus.GaugeValue,
data[genStatsSOAPEmptyRequests].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPMethodInvocations,
prometheus.GaugeValue,
data[genStatsSOAPMethodInvocations].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPSessionInitiateRequests,
prometheus.GaugeValue,
data[genStatsSOAPSessionInitiateRequests].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPSessionTerminateRequests,
prometheus.GaugeValue,
data[genStatsSOAPSessionTerminateRequests].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPSQLRequests,
prometheus.GaugeValue,
data[genStatsSOAPSQLRequests].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSOAPWSDLRequests,
prometheus.GaugeValue,
data[genStatsSOAPWSDLRequests].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsSQLTraceIOProviderLockWaits,
prometheus.GaugeValue,
data[genStatsSQLTraceIOProviderLockWaits].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTempDBRecoveryUnitID,
prometheus.GaugeValue,
data[genStatsTempdbRecoveryUnitID].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTempDBrowSetID,
prometheus.GaugeValue,
data[genStatsTempdbRowsetID].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTempTablesCreationRate,
prometheus.CounterValue,
data[genStatsTempTablesCreationRate].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTempTablesForDestruction,
prometheus.GaugeValue,
data[genStatsTempTablesForDestruction].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTraceEventNotificationQueue,
prometheus.GaugeValue,
data[genStatsTraceEventNotificationQueue].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsTransactions,
prometheus.GaugeValue,
data[genStatsTransactions].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.genStatsUserConnections,
prometheus.GaugeValue,
data[genStatsUserConnections].FirstValue,
sqlInstance,
)
return nil
}
func (c *Collector) closeGeneralStatistics() {
for _, perfDataCollector := range c.genStatsPerfDataCollectors {
perfDataCollector.Close()
}
}

View File

@ -0,0 +1,187 @@
//go:build windows
package mssql
import (
"fmt"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorLocks struct {
locksPerfDataCollectors map[string]*perfdata.Collector
// Win32_PerfRawData_{instance}_SQLServerLocks
locksWaitTime *prometheus.Desc
locksCount *prometheus.Desc
locksLockRequests *prometheus.Desc
locksLockTimeouts *prometheus.Desc
locksLockTimeoutstimeout0 *prometheus.Desc
locksLockWaits *prometheus.Desc
locksLockWaitTimeMS *prometheus.Desc
locksNumberOfDeadlocks *prometheus.Desc
}
const (
locksAverageWaitTimeMS = "Average Wait Time (ms)"
locksAverageWaitTimeMSBase = "Average Wait Time Base"
locksLockRequestsPerSec = "Lock Requests/sec"
locksLockTimeoutsPerSec = "Lock Timeouts/sec"
locksLockTimeoutsTimeout0PerSec = "Lock Timeouts (timeout > 0)/sec"
locksLockWaitsPerSec = "Lock Waits/sec"
locksLockWaitTimeMS = "Lock Wait Time (ms)"
locksNumberOfDeadlocksPerSec = "Number of Deadlocks/sec"
)
func (c *Collector) buildLocks() error {
var err error
c.locksPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
counters := []string{
locksAverageWaitTimeMS,
locksAverageWaitTimeMSBase,
locksLockRequestsPerSec,
locksLockTimeoutsPerSec,
locksLockTimeoutsTimeout0PerSec,
locksLockWaitsPerSec,
locksLockWaitTimeMS,
locksNumberOfDeadlocksPerSec,
}
for sqlInstance := range c.mssqlInstances {
c.locksPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Locks"), perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create Locks collector for instance %s: %w", sqlInstance, err)
}
}
c.locksWaitTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "locks_wait_time_seconds"),
"(Locks.AverageWaitTimems Total time in seconds which locks have been holding resources)",
[]string{"mssql_instance", "resource"},
nil,
)
c.locksCount = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "locks_count"),
"(Locks.AverageWaitTimems_Base count of how often requests have run into locks)",
[]string{"mssql_instance", "resource"},
nil,
)
c.locksLockRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "locks_lock_requests"),
"(Locks.LockRequests)",
[]string{"mssql_instance", "resource"},
nil,
)
c.locksLockTimeouts = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "locks_lock_timeouts"),
"(Locks.LockTimeouts)",
[]string{"mssql_instance", "resource"},
nil,
)
c.locksLockTimeoutstimeout0 = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "locks_lock_timeouts_excluding_NOWAIT"),
"(Locks.LockTimeoutstimeout0)",
[]string{"mssql_instance", "resource"},
nil,
)
c.locksLockWaits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "locks_lock_waits"),
"(Locks.LockWaits)",
[]string{"mssql_instance", "resource"},
nil,
)
c.locksLockWaitTimeMS = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "locks_lock_wait_seconds"),
"(Locks.LockWaitTimems)",
[]string{"mssql_instance", "resource"},
nil,
)
c.locksNumberOfDeadlocks = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "locks_deadlocks"),
"(Locks.NumberOfDeadlocks)",
[]string{"mssql_instance", "resource"},
nil,
)
return nil
}
func (c *Collector) collectLocks(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorLocks, c.locksPerfDataCollectors, c.collectLocksInstance)
}
func (c *Collector) collectLocksInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
perfData, err := perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Locks"), err)
}
for lockResourceName, data := range perfData {
ch <- prometheus.MustNewConstMetric(
c.locksWaitTime,
prometheus.GaugeValue,
data[locksAverageWaitTimeMS].FirstValue/1000.0,
sqlInstance, lockResourceName,
)
ch <- prometheus.MustNewConstMetric(
c.locksCount,
prometheus.GaugeValue,
data[locksAverageWaitTimeMSBase].FirstValue/1000.0,
sqlInstance, lockResourceName,
)
ch <- prometheus.MustNewConstMetric(
c.locksLockRequests,
prometheus.CounterValue,
data[locksLockRequestsPerSec].FirstValue,
sqlInstance, lockResourceName,
)
ch <- prometheus.MustNewConstMetric(
c.locksLockTimeouts,
prometheus.CounterValue,
data[locksLockTimeoutsPerSec].FirstValue,
sqlInstance, lockResourceName,
)
ch <- prometheus.MustNewConstMetric(
c.locksLockTimeoutstimeout0,
prometheus.CounterValue,
data[locksLockTimeoutsTimeout0PerSec].FirstValue,
sqlInstance, lockResourceName,
)
ch <- prometheus.MustNewConstMetric(
c.locksLockWaits,
prometheus.CounterValue,
data[locksLockWaitsPerSec].FirstValue,
sqlInstance, lockResourceName,
)
ch <- prometheus.MustNewConstMetric(
c.locksLockWaitTimeMS,
prometheus.GaugeValue,
data[locksLockWaitTimeMS].FirstValue/1000.0,
sqlInstance, lockResourceName,
)
ch <- prometheus.MustNewConstMetric(
c.locksNumberOfDeadlocks,
prometheus.CounterValue,
data[locksNumberOfDeadlocksPerSec].FirstValue,
sqlInstance, lockResourceName,
)
}
return nil
}
func (c *Collector) closeLocks() {
for _, perfDataCollector := range c.locksPerfDataCollectors {
perfDataCollector.Close()
}
}

View File

@ -0,0 +1,381 @@
//go:build windows
package mssql
import (
"fmt"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorMemoryManager struct {
memMgrPerfDataCollectors map[string]*perfdata.Collector
memMgrConnectionMemoryKB *prometheus.Desc
memMgrDatabaseCacheMemoryKB *prometheus.Desc
memMgrExternalBenefitOfMemory *prometheus.Desc
memMgrFreeMemoryKB *prometheus.Desc
memMgrGrantedWorkspaceMemoryKB *prometheus.Desc
memMgrLockBlocks *prometheus.Desc
memMgrLockBlocksAllocated *prometheus.Desc
memMgrLockMemoryKB *prometheus.Desc
memMgrLockOwnerBlocks *prometheus.Desc
memMgrLockOwnerBlocksAllocated *prometheus.Desc
memMgrLogPoolMemoryKB *prometheus.Desc
memMgrMaximumWorkspaceMemoryKB *prometheus.Desc
memMgrMemoryGrantsOutstanding *prometheus.Desc
memMgrMemoryGrantsPending *prometheus.Desc
memMgrOptimizerMemoryKB *prometheus.Desc
memMgrReservedServerMemoryKB *prometheus.Desc
memMgrSQLCacheMemoryKB *prometheus.Desc
memMgrStolenServerMemoryKB *prometheus.Desc
memMgrTargetServerMemoryKB *prometheus.Desc
memMgrTotalServerMemoryKB *prometheus.Desc
}
const (
memMgrConnectionMemoryKB = "Connection Memory (KB)"
memMgrDatabaseCacheMemoryKB = "Database Cache Memory (KB)"
memMgrExternalBenefitOfMemory = "External benefit of memory"
memMgrFreeMemoryKB = "Free Memory (KB)"
memMgrGrantedWorkspaceMemoryKB = "Granted Workspace Memory (KB)"
memMgrLockBlocks = "Lock Blocks"
memMgrLockBlocksAllocated = "Lock Blocks Allocated"
memMgrLockMemoryKB = "Lock Memory (KB)"
memMgrLockOwnerBlocks = "Lock Owner Blocks"
memMgrLockOwnerBlocksAllocated = "Lock Owner Blocks Allocated"
memMgrLogPoolMemoryKB = "Log Pool Memory (KB)"
memMgrMaximumWorkspaceMemoryKB = "Maximum Workspace Memory (KB)"
memMgrMemoryGrantsOutstanding = "Memory Grants Outstanding"
memMgrMemoryGrantsPending = "Memory Grants Pending"
memMgrOptimizerMemoryKB = "Optimizer Memory (KB)"
memMgrReservedServerMemoryKB = "Reserved Server Memory (KB)"
memMgrSQLCacheMemoryKB = "SQL Cache Memory (KB)"
memMgrStolenServerMemoryKB = "Stolen Server Memory (KB)"
memMgrTargetServerMemoryKB = "Target Server Memory (KB)"
memMgrTotalServerMemoryKB = "Total Server Memory (KB)"
)
func (c *Collector) buildMemoryManager() error {
var err error
c.memMgrPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
counters := []string{
memMgrConnectionMemoryKB,
memMgrDatabaseCacheMemoryKB,
memMgrExternalBenefitOfMemory,
memMgrFreeMemoryKB,
memMgrGrantedWorkspaceMemoryKB,
memMgrLockBlocks,
memMgrLockBlocksAllocated,
memMgrLockMemoryKB,
memMgrLockOwnerBlocks,
memMgrLockOwnerBlocksAllocated,
memMgrLogPoolMemoryKB,
memMgrMaximumWorkspaceMemoryKB,
memMgrMemoryGrantsOutstanding,
memMgrMemoryGrantsPending,
memMgrOptimizerMemoryKB,
memMgrReservedServerMemoryKB,
memMgrSQLCacheMemoryKB,
memMgrStolenServerMemoryKB,
memMgrTargetServerMemoryKB,
memMgrTotalServerMemoryKB,
}
for sqlInstance := range c.mssqlInstances {
c.memMgrPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create Locks collector for instance %s: %w", sqlInstance, err)
}
}
c.memMgrConnectionMemoryKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_connection_memory_bytes"),
"(MemoryManager.ConnectionMemoryKB)",
[]string{"mssql_instance"},
nil,
)
c.memMgrDatabaseCacheMemoryKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_database_cache_memory_bytes"),
"(MemoryManager.DatabaseCacheMemoryKB)",
[]string{"mssql_instance"},
nil,
)
c.memMgrExternalBenefitOfMemory = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_external_benefit_of_memory"),
"(MemoryManager.Externalbenefitofmemory)",
[]string{"mssql_instance"},
nil,
)
c.memMgrFreeMemoryKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_free_memory_bytes"),
"(MemoryManager.FreeMemoryKB)",
[]string{"mssql_instance"},
nil,
)
c.memMgrGrantedWorkspaceMemoryKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_granted_workspace_memory_bytes"),
"(MemoryManager.GrantedWorkspaceMemoryKB)",
[]string{"mssql_instance"},
nil,
)
c.memMgrLockBlocks = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_lock_blocks"),
"(MemoryManager.LockBlocks)",
[]string{"mssql_instance"},
nil,
)
c.memMgrLockBlocksAllocated = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_allocated_lock_blocks"),
"(MemoryManager.LockBlocksAllocated)",
[]string{"mssql_instance"},
nil,
)
c.memMgrLockMemoryKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_lock_memory_bytes"),
"(MemoryManager.LockMemoryKB)",
[]string{"mssql_instance"},
nil,
)
c.memMgrLockOwnerBlocks = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_lock_owner_blocks"),
"(MemoryManager.LockOwnerBlocks)",
[]string{"mssql_instance"},
nil,
)
c.memMgrLockOwnerBlocksAllocated = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_allocated_lock_owner_blocks"),
"(MemoryManager.LockOwnerBlocksAllocated)",
[]string{"mssql_instance"},
nil,
)
c.memMgrLogPoolMemoryKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_log_pool_memory_bytes"),
"(MemoryManager.LogPoolMemoryKB)",
[]string{"mssql_instance"},
nil,
)
c.memMgrMaximumWorkspaceMemoryKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_maximum_workspace_memory_bytes"),
"(MemoryManager.MaximumWorkspaceMemoryKB)",
[]string{"mssql_instance"},
nil,
)
c.memMgrMemoryGrantsOutstanding = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_outstanding_memory_grants"),
"(MemoryManager.MemoryGrantsOutstanding)",
[]string{"mssql_instance"},
nil,
)
c.memMgrMemoryGrantsPending = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_pending_memory_grants"),
"(MemoryManager.MemoryGrantsPending)",
[]string{"mssql_instance"},
nil,
)
c.memMgrOptimizerMemoryKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_optimizer_memory_bytes"),
"(MemoryManager.OptimizerMemoryKB)",
[]string{"mssql_instance"},
nil,
)
c.memMgrReservedServerMemoryKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_reserved_server_memory_bytes"),
"(MemoryManager.ReservedServerMemoryKB)",
[]string{"mssql_instance"},
nil,
)
c.memMgrSQLCacheMemoryKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_sql_cache_memory_bytes"),
"(MemoryManager.SQLCacheMemoryKB)",
[]string{"mssql_instance"},
nil,
)
c.memMgrStolenServerMemoryKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_stolen_server_memory_bytes"),
"(MemoryManager.StolenServerMemoryKB)",
[]string{"mssql_instance"},
nil,
)
c.memMgrTargetServerMemoryKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_target_server_memory_bytes"),
"(MemoryManager.TargetServerMemoryKB)",
[]string{"mssql_instance"},
nil,
)
c.memMgrTotalServerMemoryKB = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "memmgr_total_server_memory_bytes"),
"(MemoryManager.TotalServerMemoryKB)",
[]string{"mssql_instance"},
nil,
)
return nil
}
func (c *Collector) collectMemoryManager(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorMemoryManager, c.memMgrPerfDataCollectors, c.collectMemoryManagerInstance)
}
func (c *Collector) collectMemoryManagerInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
perfData, err := perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), err)
}
data, ok := perfData[perfdata.EmptyInstance]
if !ok {
return fmt.Errorf("perflib query for %s returned empty result set", c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"))
}
ch <- prometheus.MustNewConstMetric(
c.memMgrConnectionMemoryKB,
prometheus.GaugeValue,
data[memMgrConnectionMemoryKB].FirstValue*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrDatabaseCacheMemoryKB,
prometheus.GaugeValue,
data[memMgrDatabaseCacheMemoryKB].FirstValue*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrExternalBenefitOfMemory,
prometheus.GaugeValue,
data[memMgrExternalBenefitOfMemory].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrFreeMemoryKB,
prometheus.GaugeValue,
data[memMgrFreeMemoryKB].FirstValue*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrGrantedWorkspaceMemoryKB,
prometheus.GaugeValue,
data[memMgrGrantedWorkspaceMemoryKB].FirstValue*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLockBlocks,
prometheus.GaugeValue,
data[memMgrLockBlocks].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLockBlocksAllocated,
prometheus.GaugeValue,
data[memMgrLockBlocksAllocated].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLockMemoryKB,
prometheus.GaugeValue,
data[memMgrLockMemoryKB].FirstValue*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLockOwnerBlocks,
prometheus.GaugeValue,
data[memMgrLockOwnerBlocks].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLockOwnerBlocksAllocated,
prometheus.GaugeValue,
data[memMgrLockOwnerBlocksAllocated].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrLogPoolMemoryKB,
prometheus.GaugeValue,
data[memMgrLogPoolMemoryKB].FirstValue*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrMaximumWorkspaceMemoryKB,
prometheus.GaugeValue,
data[memMgrMaximumWorkspaceMemoryKB].FirstValue*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrMemoryGrantsOutstanding,
prometheus.GaugeValue,
data[memMgrMemoryGrantsOutstanding].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrMemoryGrantsPending,
prometheus.GaugeValue,
data[memMgrMemoryGrantsPending].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrOptimizerMemoryKB,
prometheus.GaugeValue,
data[memMgrOptimizerMemoryKB].FirstValue*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrReservedServerMemoryKB,
prometheus.GaugeValue,
data[memMgrReservedServerMemoryKB].FirstValue*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrSQLCacheMemoryKB,
prometheus.GaugeValue,
data[memMgrSQLCacheMemoryKB].FirstValue*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrStolenServerMemoryKB,
prometheus.GaugeValue,
data[memMgrStolenServerMemoryKB].FirstValue*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrTargetServerMemoryKB,
prometheus.GaugeValue,
data[memMgrTargetServerMemoryKB].FirstValue*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.memMgrTotalServerMemoryKB,
prometheus.GaugeValue,
data[memMgrTotalServerMemoryKB].FirstValue*1024,
sqlInstance,
)
return nil
}
func (c *Collector) closeMemoryManager() {
for _, perfDataCollector := range c.memMgrPerfDataCollectors {
perfDataCollector.Close()
}
}

View File

@ -0,0 +1,76 @@
//go:build windows
package mssql
import (
"fmt"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorSQLErrors struct {
sqlErrorsPerfDataCollectors map[string]*perfdata.Collector
// Win32_PerfRawData_{instance}_SQLServerSQLErrors
sqlErrorsTotal *prometheus.Desc
}
const (
sqlErrorsErrorsPerSec = "Errors/sec"
)
func (c *Collector) buildSQLErrors() error {
var err error
c.genStatsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
counters := []string{
sqlErrorsErrorsPerSec,
}
for sqlInstance := range c.mssqlInstances {
c.genStatsPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create SQL Errors collector for instance %s: %w", sqlInstance, err)
}
}
// Win32_PerfRawData_{instance}_SQLServerSQLErrors
c.sqlErrorsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sql_errors_total"),
"(SQLErrors.Total)",
[]string{"mssql_instance", "resource"},
nil,
)
return nil
}
func (c *Collector) collectSQLErrors(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorSQLErrors, c.dbReplicaPerfDataCollectors, c.collectSQLErrorsInstance)
}
func (c *Collector) collectSQLErrorsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
perfData, err := perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), err)
}
for resource, data := range perfData {
ch <- prometheus.MustNewConstMetric(
c.sqlErrorsTotal,
prometheus.CounterValue,
data[sqlErrorsErrorsPerSec].FirstValue,
sqlInstance, resource,
)
}
return nil
}
func (c *Collector) closeSQLErrors() {
for _, perfDataCollector := range c.sqlErrorsPerfDataCollectors {
perfDataCollector.Close()
}
}

View File

@ -0,0 +1,237 @@
//go:build windows
package mssql
import (
"fmt"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorSQLStats struct {
sqlStatsPerfDataCollectors map[string]*perfdata.Collector
sqlStatsAutoParamAttempts *prometheus.Desc
sqlStatsBatchRequests *prometheus.Desc
sqlStatsFailedAutoParams *prometheus.Desc
sqlStatsForcedParameterizations *prometheus.Desc
sqlStatsGuidedplanexecutions *prometheus.Desc
sqlStatsMisguidedplanexecutions *prometheus.Desc
sqlStatsSafeAutoParams *prometheus.Desc
sqlStatsSQLAttentionrate *prometheus.Desc
sqlStatsSQLCompilations *prometheus.Desc
sqlStatsSQLReCompilations *prometheus.Desc
sqlStatsUnsafeAutoParams *prometheus.Desc
}
const (
sqlStatsAutoParamAttemptsPerSec = "Auto-Param Attempts/sec"
sqlStatsBatchRequestsPerSec = "Batch Requests/sec"
sqlStatsFailedAutoParamsPerSec = "Failed Auto-Params/sec"
sqlStatsForcedParameterizationsPerSec = "Forced Parameterizations/sec"
sqlStatsGuidedplanexecutionsPerSec = "Guided plan executions/sec"
sqlStatsMisguidedplanexecutionsPerSec = "Misguided plan executions/sec"
sqlStatsSafeAutoParamsPerSec = "Safe Auto-Params/sec"
sqlStatsSQLAttentionrate = "SQL Attention rate"
sqlStatsSQLCompilationsPerSec = "SQL Compilations/sec"
sqlStatsSQLReCompilationsPerSec = "SQL Re-Compilations/sec"
sqlStatsUnsafeAutoParamsPerSec = "Unsafe Auto-Params/sec"
)
func (c *Collector) buildSQLStats() error {
var err error
c.genStatsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
counters := []string{
sqlStatsAutoParamAttemptsPerSec,
sqlStatsBatchRequestsPerSec,
sqlStatsFailedAutoParamsPerSec,
sqlStatsForcedParameterizationsPerSec,
sqlStatsGuidedplanexecutionsPerSec,
sqlStatsMisguidedplanexecutionsPerSec,
sqlStatsSafeAutoParamsPerSec,
sqlStatsSQLAttentionrate,
sqlStatsSQLCompilationsPerSec,
sqlStatsSQLReCompilationsPerSec,
sqlStatsUnsafeAutoParamsPerSec,
}
for sqlInstance := range c.mssqlInstances {
c.genStatsPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"), nil, counters)
if err != nil {
return fmt.Errorf("failed to create SQL Statistics collector for instance %s: %w", sqlInstance, err)
}
}
c.sqlStatsAutoParamAttempts = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sqlstats_auto_parameterization_attempts"),
"(SQLStatistics.AutoParamAttempts)",
[]string{"mssql_instance"},
nil,
)
c.sqlStatsBatchRequests = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sqlstats_batch_requests"),
"(SQLStatistics.BatchRequests)",
[]string{"mssql_instance"},
nil,
)
c.sqlStatsFailedAutoParams = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sqlstats_failed_auto_parameterization_attempts"),
"(SQLStatistics.FailedAutoParams)",
[]string{"mssql_instance"},
nil,
)
c.sqlStatsForcedParameterizations = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sqlstats_forced_parameterizations"),
"(SQLStatistics.ForcedParameterizations)",
[]string{"mssql_instance"},
nil,
)
c.sqlStatsGuidedplanexecutions = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sqlstats_guided_plan_executions"),
"(SQLStatistics.Guidedplanexecutions)",
[]string{"mssql_instance"},
nil,
)
c.sqlStatsMisguidedplanexecutions = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sqlstats_misguided_plan_executions"),
"(SQLStatistics.Misguidedplanexecutions)",
[]string{"mssql_instance"},
nil,
)
c.sqlStatsSafeAutoParams = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sqlstats_safe_auto_parameterization_attempts"),
"(SQLStatistics.SafeAutoParams)",
[]string{"mssql_instance"},
nil,
)
c.sqlStatsSQLAttentionrate = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sqlstats_sql_attentions"),
"(SQLStatistics.SQLAttentions)",
[]string{"mssql_instance"},
nil,
)
c.sqlStatsSQLCompilations = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sqlstats_sql_compilations"),
"(SQLStatistics.SQLCompilations)",
[]string{"mssql_instance"},
nil,
)
c.sqlStatsSQLReCompilations = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sqlstats_sql_recompilations"),
"(SQLStatistics.SQLReCompilations)",
[]string{"mssql_instance"},
nil,
)
c.sqlStatsUnsafeAutoParams = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "sqlstats_unsafe_auto_parameterization_attempts"),
"(SQLStatistics.UnsafeAutoParams)",
[]string{"mssql_instance"},
nil,
)
return nil
}
func (c *Collector) collectSQLStats(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorSQLStats, c.sqlStatsPerfDataCollectors, c.collectSQLStatsInstance)
}
func (c *Collector) collectSQLStatsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
perfData, err := perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"), err)
}
data, ok := perfData[perfdata.EmptyInstance]
if !ok {
return fmt.Errorf("perflib query for %s returned empty result set", c.mssqlGetPerfObjectName(sqlInstance, "SQL Statistics"))
}
ch <- prometheus.MustNewConstMetric(
c.sqlStatsAutoParamAttempts,
prometheus.CounterValue,
data[sqlStatsAutoParamAttemptsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsBatchRequests,
prometheus.CounterValue,
data[sqlStatsBatchRequestsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsFailedAutoParams,
prometheus.CounterValue,
data[sqlStatsFailedAutoParamsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsForcedParameterizations,
prometheus.CounterValue,
data[sqlStatsForcedParameterizationsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsGuidedplanexecutions,
prometheus.CounterValue,
data[sqlStatsGuidedplanexecutionsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsMisguidedplanexecutions,
prometheus.CounterValue,
data[sqlStatsMisguidedplanexecutionsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsSafeAutoParams,
prometheus.CounterValue,
data[sqlStatsSafeAutoParamsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsSQLAttentionrate,
prometheus.CounterValue,
data[sqlStatsSQLAttentionrate].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsSQLCompilations,
prometheus.CounterValue,
data[sqlStatsSQLCompilationsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsSQLReCompilations,
prometheus.CounterValue,
data[sqlStatsSQLReCompilationsPerSec].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.sqlStatsUnsafeAutoParams,
prometheus.CounterValue,
data[sqlStatsUnsafeAutoParamsPerSec].FirstValue,
sqlInstance,
)
return nil
}
func (c *Collector) closeSQLStats() {
for _, perfDataCollector := range c.sqlStatsPerfDataCollectors {
perfDataCollector.Close()
}
}

View File

@ -1,10 +1,12 @@
//go:build windows
package mssql_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/mssql"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -0,0 +1,271 @@
//go:build windows
package mssql
import (
"fmt"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorTransactions struct {
transactionsPerfDataCollectors map[string]*perfdata.Collector
transactionsTempDbFreeSpaceBytes *prometheus.Desc
transactionsLongestTransactionRunningSeconds *prometheus.Desc
transactionsNonSnapshotVersionActiveTotal *prometheus.Desc
transactionsSnapshotActiveTotal *prometheus.Desc
transactionsActive *prometheus.Desc
transactionsUpdateConflictsTotal *prometheus.Desc
transactionsUpdateSnapshotActiveTotal *prometheus.Desc
transactionsVersionCleanupRateBytes *prometheus.Desc
transactionsVersionGenerationRateBytes *prometheus.Desc
transactionsVersionStoreSizeBytes *prometheus.Desc
transactionsVersionStoreUnits *prometheus.Desc
transactionsVersionStoreCreationUnits *prometheus.Desc
transactionsVersionStoreTruncationUnits *prometheus.Desc
}
const (
transactionsFreeSpaceintempdbKB = "Free Space in tempdb (KB)"
transactionsLongestTransactionRunningTime = "Longest Transaction Running Time"
transactionsNonSnapshotVersionTransactions = "NonSnapshot Version Transactions"
transactionsSnapshotTransactions = "Snapshot Transactions"
transactionsTransactions = "Transactions"
transactionsUpdateconflictratio = "Update conflict ratio"
transactionsUpdateSnapshotTransactions = "Update Snapshot Transactions"
transactionsVersionCleanuprateKBPers = "Version Cleanup rate (KB/s)"
transactionsVersionGenerationrateKBPers = "Version Generation rate (KB/s)"
transactionsVersionStoreSizeKB = "Version Store Size (KB)"
transactionsVersionStoreunitcount = "Version Store unit count"
transactionsVersionStoreunitcreation = "Version Store unit creation"
transactionsVersionStoreunittruncation = "Version Store unit truncation"
)
func (c *Collector) buildTransactions() error {
var err error
c.transactionsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
counters := []string{
transactionsFreeSpaceintempdbKB,
transactionsLongestTransactionRunningTime,
transactionsNonSnapshotVersionTransactions,
transactionsSnapshotTransactions,
transactionsTransactions,
transactionsUpdateconflictratio,
transactionsUpdateSnapshotTransactions,
transactionsVersionCleanuprateKBPers,
transactionsVersionGenerationrateKBPers,
transactionsVersionStoreSizeKB,
transactionsVersionStoreunitcount,
transactionsVersionStoreunitcreation,
transactionsVersionStoreunittruncation,
}
for sqlInstance := range c.mssqlInstances {
c.transactionsPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Transactions"), nil, counters)
if err != nil {
return fmt.Errorf("failed to create Transactions collector for instance %s: %w", sqlInstance, err)
}
}
c.transactionsTempDbFreeSpaceBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transactions_tempdb_free_space_bytes"),
"(Transactions.FreeSpaceInTempDbKB)",
[]string{"mssql_instance"},
nil,
)
c.transactionsLongestTransactionRunningSeconds = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transactions_longest_transaction_running_seconds"),
"(Transactions.LongestTransactionRunningTime)",
[]string{"mssql_instance"},
nil,
)
c.transactionsNonSnapshotVersionActiveTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transactions_nonsnapshot_version_active_total"),
"(Transactions.NonSnapshotVersionTransactions)",
[]string{"mssql_instance"},
nil,
)
c.transactionsSnapshotActiveTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transactions_snapshot_active_total"),
"(Transactions.SnapshotTransactions)",
[]string{"mssql_instance"},
nil,
)
c.transactionsActive = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transactions_active"),
"(Transactions.Transactions)",
[]string{"mssql_instance"},
nil,
)
c.transactionsUpdateConflictsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transactions_update_conflicts_total"),
"(Transactions.UpdateConflictRatio)",
[]string{"mssql_instance"},
nil,
)
c.transactionsUpdateSnapshotActiveTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transactions_update_snapshot_active_total"),
"(Transactions.UpdateSnapshotTransactions)",
[]string{"mssql_instance"},
nil,
)
c.transactionsVersionCleanupRateBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transactions_version_cleanup_rate_bytes"),
"(Transactions.VersionCleanupRateKBs)",
[]string{"mssql_instance"},
nil,
)
c.transactionsVersionGenerationRateBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transactions_version_generation_rate_bytes"),
"(Transactions.VersionGenerationRateKBs)",
[]string{"mssql_instance"},
nil,
)
c.transactionsVersionStoreSizeBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transactions_version_store_size_bytes"),
"(Transactions.VersionStoreSizeKB)",
[]string{"mssql_instance"},
nil,
)
c.transactionsVersionStoreUnits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transactions_version_store_units"),
"(Transactions.VersionStoreUnitCount)",
[]string{"mssql_instance"},
nil,
)
c.transactionsVersionStoreCreationUnits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transactions_version_store_creation_units"),
"(Transactions.VersionStoreUnitCreation)",
[]string{"mssql_instance"},
nil,
)
c.transactionsVersionStoreTruncationUnits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "transactions_version_store_truncation_units"),
"(Transactions.VersionStoreUnitTruncation)",
[]string{"mssql_instance"},
nil,
)
return nil
}
func (c *Collector) collectTransactions(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorTransactions, c.transactionsPerfDataCollectors, c.collectTransactionsInstance)
}
// Win32_PerfRawData_MSSQLSERVER_Transactions docs:
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object
func (c *Collector) collectTransactionsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
perfData, err := perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Transactions"), err)
}
data, ok := perfData[perfdata.EmptyInstance]
if !ok {
return fmt.Errorf("perflib query for %s returned empty result set", c.mssqlGetPerfObjectName(sqlInstance, "Transactions"))
}
ch <- prometheus.MustNewConstMetric(
c.transactionsTempDbFreeSpaceBytes,
prometheus.GaugeValue,
data[transactionsFreeSpaceintempdbKB].FirstValue*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsLongestTransactionRunningSeconds,
prometheus.GaugeValue,
data[transactionsLongestTransactionRunningTime].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsNonSnapshotVersionActiveTotal,
prometheus.CounterValue,
data[transactionsNonSnapshotVersionTransactions].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsSnapshotActiveTotal,
prometheus.CounterValue,
data[transactionsSnapshotTransactions].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsActive,
prometheus.GaugeValue,
data[transactionsTransactions].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsUpdateConflictsTotal,
prometheus.CounterValue,
data[transactionsUpdateconflictratio].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsUpdateSnapshotActiveTotal,
prometheus.CounterValue,
data[transactionsUpdateSnapshotTransactions].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionCleanupRateBytes,
prometheus.GaugeValue,
data[transactionsVersionCleanuprateKBPers].FirstValue*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionGenerationRateBytes,
prometheus.GaugeValue,
data[transactionsVersionGenerationrateKBPers].FirstValue*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionStoreSizeBytes,
prometheus.GaugeValue,
data[transactionsVersionStoreSizeKB].FirstValue*1024,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionStoreUnits,
prometheus.CounterValue,
data[transactionsVersionStoreunitcount].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionStoreCreationUnits,
prometheus.CounterValue,
data[transactionsVersionStoreunitcreation].FirstValue,
sqlInstance,
)
ch <- prometheus.MustNewConstMetric(
c.transactionsVersionStoreTruncationUnits,
prometheus.CounterValue,
data[transactionsVersionStoreunittruncation].FirstValue,
sqlInstance,
)
return nil
}
func (c *Collector) closeTransactions() {
for _, perfDataCollector := range c.transactionsPerfDataCollectors {
perfDataCollector.Close()
}
}

View File

@ -0,0 +1,251 @@
//go:build windows
package mssql
import (
"fmt"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorWaitStats struct {
waitStatsPerfDataCollectors map[string]*perfdata.Collector
waitStatsLockWaits *prometheus.Desc
waitStatsMemoryGrantQueueWaits *prometheus.Desc
waitStatsThreadSafeMemoryObjectsWaits *prometheus.Desc
waitStatsLogWriteWaits *prometheus.Desc
waitStatsLogBufferWaits *prometheus.Desc
waitStatsNetworkIOWaits *prometheus.Desc
waitStatsPageIOLatchWaits *prometheus.Desc
waitStatsPageLatchWaits *prometheus.Desc
waitStatsNonPageLatchWaits *prometheus.Desc
waitStatsWaitForTheWorkerWaits *prometheus.Desc
waitStatsWorkspaceSynchronizationWaits *prometheus.Desc
waitStatsTransactionOwnershipWaits *prometheus.Desc
}
const (
waitStatsLockWaits = "Lock waits"
waitStatsMemoryGrantQueueWaits = "Memory grant queue waits"
waitStatsThreadSafeMemoryObjectsWaits = "Thread-safe memory objects waits"
waitStatsLogWriteWaits = "Log write waits"
waitStatsLogBufferWaits = "Log buffer waits"
waitStatsNetworkIOWaits = "Network IO waits"
waitStatsPageIOLatchWaits = "Page IO latch waits"
waitStatsPageLatchWaits = "Page latch waits"
waitStatsNonpageLatchWaits = "Non-Page latch waits"
waitStatsWaitForTheWorkerWaits = "Wait for the worker"
waitStatsWorkspaceSynchronizationWaits = "Workspace synchronization waits"
waitStatsTransactionOwnershipWaits = "Transaction ownership waits"
)
func (c *Collector) buildWaitStats() error {
var err error
c.waitStatsPerfDataCollectors = make(map[string]*perfdata.Collector, len(c.mssqlInstances))
counters := []string{
waitStatsLockWaits,
waitStatsMemoryGrantQueueWaits,
waitStatsThreadSafeMemoryObjectsWaits,
waitStatsLogWriteWaits,
waitStatsLogBufferWaits,
waitStatsNetworkIOWaits,
waitStatsPageIOLatchWaits,
waitStatsPageLatchWaits,
waitStatsNonpageLatchWaits,
waitStatsWaitForTheWorkerWaits,
waitStatsWorkspaceSynchronizationWaits,
waitStatsTransactionOwnershipWaits,
}
for sqlInstance := range c.mssqlInstances {
c.waitStatsPerfDataCollectors[sqlInstance], err = perfdata.NewCollector(c.mssqlGetPerfObjectName(sqlInstance, "Wait Statistics"), perfdata.InstanceAll, counters)
if err != nil {
return fmt.Errorf("failed to create Wait Statistics collector for instance %s: %w", sqlInstance, err)
}
}
c.waitStatsLockWaits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "waitstats_lock_waits"),
"(WaitStats.LockWaits)",
[]string{"mssql_instance", "item"},
nil,
)
c.waitStatsMemoryGrantQueueWaits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "waitstats_memory_grant_queue_waits"),
"(WaitStats.MemoryGrantQueueWaits)",
[]string{"mssql_instance", "item"},
nil,
)
c.waitStatsThreadSafeMemoryObjectsWaits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "waitstats_thread_safe_memory_objects_waits"),
"(WaitStats.ThreadSafeMemoryObjectsWaits)",
[]string{"mssql_instance", "item"},
nil,
)
c.waitStatsLogWriteWaits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "waitstats_log_write_waits"),
"(WaitStats.LogWriteWaits)",
[]string{"mssql_instance", "item"},
nil,
)
c.waitStatsLogBufferWaits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "waitstats_log_buffer_waits"),
"(WaitStats.LogBufferWaits)",
[]string{"mssql_instance", "item"},
nil,
)
c.waitStatsNetworkIOWaits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "waitstats_network_io_waits"),
"(WaitStats.NetworkIOWaits)",
[]string{"mssql_instance", "item"},
nil,
)
c.waitStatsPageIOLatchWaits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "waitstats_page_io_latch_waits"),
"(WaitStats.PageIOLatchWaits)",
[]string{"mssql_instance", "item"},
nil,
)
c.waitStatsPageLatchWaits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "waitstats_page_latch_waits"),
"(WaitStats.PageLatchWaits)",
[]string{"mssql_instance", "item"},
nil,
)
c.waitStatsNonPageLatchWaits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "waitstats_nonpage_latch_waits"),
"(WaitStats.NonpageLatchWaits)",
[]string{"mssql_instance", "item"},
nil,
)
c.waitStatsWaitForTheWorkerWaits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "waitstats_wait_for_the_worker_waits"),
"(WaitStats.WaitForTheWorkerWaits)",
[]string{"mssql_instance", "item"},
nil,
)
c.waitStatsWorkspaceSynchronizationWaits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "waitstats_workspace_synchronization_waits"),
"(WaitStats.WorkspaceSynchronizationWaits)",
[]string{"mssql_instance", "item"},
nil,
)
c.waitStatsTransactionOwnershipWaits = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "waitstats_transaction_ownership_waits"),
"(WaitStats.TransactionOwnershipWaits)",
[]string{"mssql_instance", "item"},
nil,
)
return nil
}
func (c *Collector) collectWaitStats(ch chan<- prometheus.Metric) error {
return c.collect(ch, subCollectorWaitStats, c.waitStatsPerfDataCollectors, c.collectWaitStatsInstance)
}
func (c *Collector) collectWaitStatsInstance(ch chan<- prometheus.Metric, sqlInstance string, perfDataCollector *perfdata.Collector) error {
perfData, err := perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect %s metrics: %w", c.mssqlGetPerfObjectName(sqlInstance, "Wait Statistics"), err)
}
for item, data := range perfData {
ch <- prometheus.MustNewConstMetric(
c.waitStatsLockWaits,
prometheus.CounterValue,
data[waitStatsLockWaits].FirstValue,
sqlInstance, item,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsMemoryGrantQueueWaits,
prometheus.CounterValue,
data[waitStatsMemoryGrantQueueWaits].FirstValue,
sqlInstance, item,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsThreadSafeMemoryObjectsWaits,
prometheus.CounterValue,
data[waitStatsThreadSafeMemoryObjectsWaits].FirstValue,
sqlInstance, item,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsLogWriteWaits,
prometheus.CounterValue,
data[waitStatsLogWriteWaits].FirstValue,
sqlInstance, item,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsLogBufferWaits,
prometheus.CounterValue,
data[waitStatsLogBufferWaits].FirstValue,
sqlInstance, item,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsNetworkIOWaits,
prometheus.CounterValue,
data[waitStatsNetworkIOWaits].FirstValue,
sqlInstance, item,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsPageIOLatchWaits,
prometheus.CounterValue,
data[waitStatsPageIOLatchWaits].FirstValue,
sqlInstance, item,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsPageLatchWaits,
prometheus.CounterValue,
data[waitStatsPageLatchWaits].FirstValue,
sqlInstance, item,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsNonPageLatchWaits,
prometheus.CounterValue,
data[waitStatsNonpageLatchWaits].FirstValue,
sqlInstance, item,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsWaitForTheWorkerWaits,
prometheus.CounterValue,
data[waitStatsWaitForTheWorkerWaits].FirstValue,
sqlInstance, item,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsWorkspaceSynchronizationWaits,
prometheus.CounterValue,
data[waitStatsWorkspaceSynchronizationWaits].FirstValue,
sqlInstance, item,
)
ch <- prometheus.MustNewConstMetric(
c.waitStatsTransactionOwnershipWaits,
prometheus.CounterValue,
data[waitStatsTransactionOwnershipWaits].FirstValue,
sqlInstance, item,
)
}
return nil
}
func (c *Collector) closeWaitStats() {
for _, perfDataCollector := range c.waitStatsPerfDataCollectors {
perfDataCollector.Close()
}
}

View File

@ -1,36 +1,19 @@
//go:build windows
package net
const (
BytesReceivedPerSec = "Bytes Received/sec"
BytesSentPerSec = "Bytes Sent/sec"
BytesTotalPerSec = "Bytes Total/sec"
OutputQueueLength = "Output Queue Length"
PacketsOutboundDiscarded = "Packets Outbound Discarded"
PacketsOutboundErrors = "Packets Outbound Errors"
PacketsPerSec = "Packets/sec"
PacketsReceivedDiscarded = "Packets Received Discarded"
PacketsReceivedErrors = "Packets Received Errors"
PacketsReceivedPerSec = "Packets Received/sec"
PacketsReceivedUnknown = "Packets Received Unknown"
PacketsSentPerSec = "Packets Sent/sec"
CurrentBandwidth = "Current Bandwidth"
bytesReceivedPerSec = "Bytes Received/sec"
bytesSentPerSec = "Bytes Sent/sec"
bytesTotalPerSec = "Bytes Total/sec"
currentBandwidth = "Current Bandwidth"
outputQueueLength = "Output Queue Length"
packetsOutboundDiscarded = "Packets Outbound Discarded"
packetsOutboundErrors = "Packets Outbound Errors"
packetsPerSec = "Packets/sec"
packetsReceivedDiscarded = "Packets Received Discarded"
packetsReceivedErrors = "Packets Received Errors"
packetsReceivedPerSec = "Packets Received/sec"
packetsReceivedUnknown = "Packets Received Unknown"
packetsSentPerSec = "Packets Sent/sec"
)
// Win32_PerfRawData_Tcpip_NetworkInterface docs:
// - https://technet.microsoft.com/en-us/security/aa394340(v=vs.80)
type perflibNetworkInterface struct {
BytesReceivedPerSec float64 `perflib:"Bytes Received/sec"`
BytesSentPerSec float64 `perflib:"Bytes Sent/sec"`
BytesTotalPerSec float64 `perflib:"Bytes Total/sec"`
Name string
OutputQueueLength float64 `perflib:"Output Queue Length"`
PacketsOutboundDiscarded float64 `perflib:"Packets Outbound Discarded"`
PacketsOutboundErrors float64 `perflib:"Packets Outbound Errors"`
PacketsPerSec float64 `perflib:"Packets/sec"`
PacketsReceivedDiscarded float64 `perflib:"Packets Received Discarded"`
PacketsReceivedErrors float64 `perflib:"Packets Received Errors"`
PacketsReceivedPerSec float64 `perflib:"Packets Received/sec"`
PacketsReceivedUnknown float64 `perflib:"Packets Received Unknown"`
PacketsSentPerSec float64 `perflib:"Packets Sent/sec"`
CurrentBandwidth float64 `perflib:"Current Bandwidth"`
}

View File

@ -15,8 +15,6 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/perfdata"
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
"github.com/prometheus-community/windows_exporter/internal/toggle"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows"
@ -43,7 +41,7 @@ var ConfigDefaults = Config{
type Collector struct {
config Config
perfDataCollector perfdata.Collector
perfDataCollector *perfdata.Collector
bytesReceivedTotal *prometheus.Desc
bytesSentTotal *prometheus.Desc
@ -137,44 +135,32 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
if toggle.IsPDHEnabled() {
return []string{}, nil
}
return []string{"Network Interface"}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
if toggle.IsPDHEnabled() {
c.perfDataCollector.Close()
}
func (c *Collector) Close() error {
c.perfDataCollector.Close()
return nil
}
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
if toggle.IsPDHEnabled() {
var err error
var err error
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Network Interface", perfdata.AllInstances, []string{
BytesReceivedPerSec,
BytesSentPerSec,
BytesTotalPerSec,
OutputQueueLength,
PacketsOutboundDiscarded,
PacketsOutboundErrors,
PacketsPerSec,
PacketsReceivedDiscarded,
PacketsReceivedErrors,
PacketsReceivedPerSec,
PacketsReceivedUnknown,
PacketsSentPerSec,
CurrentBandwidth,
})
if err != nil {
return fmt.Errorf("failed to create Processor Information collector: %w", err)
}
c.perfDataCollector, err = perfdata.NewCollector("Network Interface", perfdata.InstanceAll, []string{
bytesReceivedPerSec,
bytesSentPerSec,
bytesTotalPerSec,
outputQueueLength,
packetsOutboundDiscarded,
packetsOutboundErrors,
packetsPerSec,
packetsReceivedDiscarded,
packetsReceivedErrors,
packetsReceivedPerSec,
packetsReceivedUnknown,
packetsSentPerSec,
currentBandwidth,
})
if err != nil {
return fmt.Errorf("failed to create Processor Information collector: %w", err)
}
if slices.Contains(c.config.CollectorsEnabled, "addresses") {
@ -279,130 +265,25 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs := make([]error, 0, 2)
if slices.Contains(c.config.CollectorsEnabled, "metrics") {
var err error
if toggle.IsPDHEnabled() {
err = c.collectPDH(ch)
} else {
err = c.collect(ctx, logger, ch)
}
if err != nil {
return fmt.Errorf("failed collecting net metrics: %w", err)
if err := c.collect(ch); err != nil {
errs = append(errs, fmt.Errorf("failed collecting metrics: %w", err))
}
}
if slices.Contains(c.config.CollectorsEnabled, "nic_addresses") {
if err := c.collectNICAddresses(ch); err != nil {
return fmt.Errorf("failed collecting net addresses: %w", err)
errs = append(errs, fmt.Errorf("failed collecting net addresses: %w", err))
}
}
return nil
return errors.Join(errs...)
}
func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
var dst []perflibNetworkInterface
if err := v1.UnmarshalObject(ctx.PerfObjects["Network Interface"], &dst, logger); err != nil {
return err
}
for _, nic := range dst {
if c.config.NicExclude.MatchString(nic.Name) ||
!c.config.NicInclude.MatchString(nic.Name) {
continue
}
// Counters
ch <- prometheus.MustNewConstMetric(
c.bytesReceivedTotal,
prometheus.CounterValue,
nic.BytesReceivedPerSec,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.bytesSentTotal,
prometheus.CounterValue,
nic.BytesSentPerSec,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.bytesTotal,
prometheus.CounterValue,
nic.BytesTotalPerSec,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.outputQueueLength,
prometheus.GaugeValue,
nic.OutputQueueLength,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsOutboundDiscarded,
prometheus.CounterValue,
nic.PacketsOutboundDiscarded,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsOutboundErrors,
prometheus.CounterValue,
nic.PacketsOutboundErrors,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsTotal,
prometheus.CounterValue,
nic.PacketsPerSec,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedDiscarded,
prometheus.CounterValue,
nic.PacketsReceivedDiscarded,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedErrors,
prometheus.CounterValue,
nic.PacketsReceivedErrors,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedTotal,
prometheus.CounterValue,
nic.PacketsReceivedPerSec,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedUnknown,
prometheus.CounterValue,
nic.PacketsReceivedUnknown,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.packetsSentTotal,
prometheus.CounterValue,
nic.PacketsSentPerSec,
nic.Name,
)
ch <- prometheus.MustNewConstMetric(
c.currentBandwidth,
prometheus.GaugeValue,
nic.CurrentBandwidth/8,
nic.Name,
)
}
return nil
}
func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
func (c *Collector) collect(ch chan<- prometheus.Metric) error {
data, err := c.perfDataCollector.Collect()
if err != nil {
return fmt.Errorf("failed to collect Network Information metrics: %w", err)
@ -418,79 +299,79 @@ func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error {
ch <- prometheus.MustNewConstMetric(
c.bytesReceivedTotal,
prometheus.CounterValue,
nicData[BytesReceivedPerSec].FirstValue,
nicData[bytesReceivedPerSec].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.bytesSentTotal,
prometheus.CounterValue,
nicData[BytesSentPerSec].FirstValue,
nicData[bytesSentPerSec].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.bytesTotal,
prometheus.CounterValue,
nicData[BytesTotalPerSec].FirstValue,
nicData[bytesTotalPerSec].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.outputQueueLength,
prometheus.GaugeValue,
nicData[OutputQueueLength].FirstValue,
nicData[outputQueueLength].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.packetsOutboundDiscarded,
prometheus.CounterValue,
nicData[PacketsOutboundDiscarded].FirstValue,
nicData[packetsOutboundDiscarded].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.packetsOutboundErrors,
prometheus.CounterValue,
nicData[PacketsOutboundErrors].FirstValue,
nicData[packetsOutboundErrors].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.packetsTotal,
prometheus.CounterValue,
nicData[PacketsPerSec].FirstValue,
nicData[packetsPerSec].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedDiscarded,
prometheus.CounterValue,
nicData[PacketsReceivedDiscarded].FirstValue,
nicData[packetsReceivedDiscarded].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedErrors,
prometheus.CounterValue,
nicData[PacketsReceivedErrors].FirstValue,
nicData[packetsReceivedErrors].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedTotal,
prometheus.CounterValue,
nicData[PacketsReceivedPerSec].FirstValue,
nicData[packetsReceivedPerSec].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceivedUnknown,
prometheus.CounterValue,
nicData[PacketsReceivedUnknown].FirstValue,
nicData[packetsReceivedUnknown].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.packetsSentTotal,
prometheus.CounterValue,
nicData[PacketsSentPerSec].FirstValue,
nicData[packetsSentPerSec].FirstValue,
nicName,
)
ch <- prometheus.MustNewConstMetric(
c.currentBandwidth,
prometheus.GaugeValue,
nicData[CurrentBandwidth].FirstValue/8,
nicData[currentBandwidth].FirstValue/8,
nicName,
)
}

View File

@ -7,7 +7,7 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/collector/net"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -6,7 +6,7 @@ import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/net"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func TestCollector(t *testing.T) {

View File

@ -10,7 +10,6 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
@ -135,11 +134,7 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
return nil
}
@ -187,7 +182,7 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, _ *slog.Logger, ch chan<- prometheus.Metric) error {
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
var (
err error
errs []error

View File

@ -1,10 +1,12 @@
//go:build windows
package netframework_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/netframework"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

View File

@ -1,3 +1,5 @@
//go:build windows
package nps
import (
@ -17,7 +19,6 @@ type Config struct{}
var ConfigDefaults = Config{}
// Collector is a Prometheus Collector for WMI Win32_PerfRawData_IAS_NPSAuthenticationServer and Win32_PerfRawData_IAS_NPSAccountingServer metrics.
type Collector struct {
config Config
miSession *mi.Session
@ -73,11 +74,7 @@ func (c *Collector) GetName() string {
return Name
}
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
return []string{}, nil
}
func (c *Collector) Close(_ *slog.Logger) error {
func (c *Collector) Close() error {
return nil
}
@ -258,21 +255,18 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
logger = logger.With(slog.String("collector", Name))
if err := c.CollectAccept(ch); err != nil {
logger.Error(fmt.Sprintf("failed collecting NPS accept data: %s", err))
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs := make([]error, 0, 2)
return err
if err := c.CollectAccept(ch); err != nil {
errs = append(errs, fmt.Errorf("failed collecting NPS accept data: %w", err))
}
if err := c.CollectAccounting(ch); err != nil {
logger.Error(fmt.Sprintf("failed collecting NPS accounting data: %s", err))
return err
errs = append(errs, fmt.Errorf("failed collecting NPS accounting data: %w", err))
}
return nil
return errors.Join(errs...)
}
// Win32_PerfRawData_IAS_NPSAuthenticationServer docs:

View File

@ -1,10 +1,12 @@
//go:build windows
package nps_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/nps"
"github.com/prometheus-community/windows_exporter/internal/testutils"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {

Some files were not shown because too many files have changed in this diff Show More