Merge pull request #2124 from Betula-L/fix-timestamp-gap
add prometheus metrics timestamp
This commit is contained in:
commit
55d76d7998
13
Godeps/Godeps.json
generated
13
Godeps/Godeps.json
generated
@ -726,13 +726,13 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/prometheus/client_golang/prometheus",
|
"ImportPath": "github.com/prometheus/client_golang/prometheus",
|
||||||
"Comment": "v0.8.0-62-g08fd2e1",
|
"Comment": "v0.9.1",
|
||||||
"Rev": "08fd2e12372a66e68e30523c7642e0cbc3e4fbde"
|
"Rev": "abad2d1bd44235a26707c172eab6bca5bf2dbad3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/prometheus/client_golang/prometheus/promhttp",
|
"ImportPath": "github.com/prometheus/client_golang/prometheus/promhttp",
|
||||||
"Comment": "v0.8.0-62-g08fd2e1",
|
"Comment": "v0.9.1",
|
||||||
"Rev": "08fd2e12372a66e68e30523c7642e0cbc3e4fbde"
|
"Rev": "abad2d1bd44235a26707c172eab6bca5bf2dbad3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/prometheus/client_model/go",
|
"ImportPath": "github.com/prometheus/client_model/go",
|
||||||
@ -1009,6 +1009,11 @@
|
|||||||
{
|
{
|
||||||
"ImportPath": "k8s.io/utils/clock/testing",
|
"ImportPath": "k8s.io/utils/clock/testing",
|
||||||
"Rev": "aedf551cdb8b0119df3a19c65fde413a13b34997"
|
"Rev": "aedf551cdb8b0119df3a19c65fde413a13b34997"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/prometheus/client_golang/prometheus/internal",
|
||||||
|
"Comment": "v0.9.1",
|
||||||
|
"Rev": "abad2d1bd44235a26707c172eab6bca5bf2dbad3"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@ -17,8 +17,8 @@ package http
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
|
|
||||||
|
auth "github.com/abbot/go-http-auth"
|
||||||
"github.com/google/cadvisor/api"
|
"github.com/google/cadvisor/api"
|
||||||
"github.com/google/cadvisor/container"
|
"github.com/google/cadvisor/container"
|
||||||
"github.com/google/cadvisor/healthz"
|
"github.com/google/cadvisor/healthz"
|
||||||
@ -28,8 +28,6 @@ import (
|
|||||||
"github.com/google/cadvisor/pages"
|
"github.com/google/cadvisor/pages"
|
||||||
"github.com/google/cadvisor/pages/static"
|
"github.com/google/cadvisor/pages/static"
|
||||||
"github.com/google/cadvisor/validate"
|
"github.com/google/cadvisor/validate"
|
||||||
|
|
||||||
auth "github.com/abbot/go-http-auth"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
@ -100,7 +98,7 @@ func RegisterPrometheusHandler(mux httpmux.Mux, containerManager manager.Manager
|
|||||||
r.MustRegister(
|
r.MustRegister(
|
||||||
metrics.NewPrometheusCollector(containerManager, f, includedMetrics),
|
metrics.NewPrometheusCollector(containerManager, f, includedMetrics),
|
||||||
prometheus.NewGoCollector(),
|
prometheus.NewGoCollector(),
|
||||||
prometheus.NewProcessCollector(os.Getpid(), ""),
|
prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}),
|
||||||
)
|
)
|
||||||
mux.Handle(prometheusEndpoint, promhttp.HandlerFor(r, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}))
|
mux.Handle(prometheusEndpoint, promhttp.HandlerFor(r, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}))
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
|
|
||||||
"github.com/google/cadvisor/container"
|
"github.com/google/cadvisor/container"
|
||||||
info "github.com/google/cadvisor/info/v1"
|
info "github.com/google/cadvisor/info/v1"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
)
|
)
|
||||||
@ -40,8 +39,9 @@ type infoProvider interface {
|
|||||||
// metricValue describes a single metric value for a given set of label values
|
// metricValue describes a single metric value for a given set of label values
|
||||||
// within a parent containerMetric.
|
// within a parent containerMetric.
|
||||||
type metricValue struct {
|
type metricValue struct {
|
||||||
value float64
|
value float64
|
||||||
labels []string
|
labels []string
|
||||||
|
timestamp time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
type metricValues []metricValue
|
type metricValues []metricValue
|
||||||
@ -55,30 +55,35 @@ func asNanosecondsToSeconds(v uint64) float64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// fsValues is a helper method for assembling per-filesystem stats.
|
// fsValues is a helper method for assembling per-filesystem stats.
|
||||||
func fsValues(fsStats []info.FsStats, valueFn func(*info.FsStats) float64) metricValues {
|
func fsValues(fsStats []info.FsStats, valueFn func(*info.FsStats) float64, timestamp time.Time) metricValues {
|
||||||
values := make(metricValues, 0, len(fsStats))
|
values := make(metricValues, 0, len(fsStats))
|
||||||
for _, stat := range fsStats {
|
for _, stat := range fsStats {
|
||||||
values = append(values, metricValue{
|
values = append(values, metricValue{
|
||||||
value: valueFn(&stat),
|
value: valueFn(&stat),
|
||||||
labels: []string{stat.Device},
|
labels: []string{stat.Device},
|
||||||
|
timestamp: timestamp,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
}
|
}
|
||||||
|
|
||||||
// ioValues is a helper method for assembling per-disk and per-filesystem stats.
|
// ioValues is a helper method for assembling per-disk and per-filesystem stats.
|
||||||
func ioValues(ioStats []info.PerDiskStats, ioType string, ioValueFn func(uint64) float64, fsStats []info.FsStats, valueFn func(*info.FsStats) float64) metricValues {
|
func ioValues(ioStats []info.PerDiskStats, ioType string, ioValueFn func(uint64) float64,
|
||||||
|
fsStats []info.FsStats, valueFn func(*info.FsStats) float64, timestamp time.Time) metricValues {
|
||||||
|
|
||||||
values := make(metricValues, 0, len(ioStats)+len(fsStats))
|
values := make(metricValues, 0, len(ioStats)+len(fsStats))
|
||||||
for _, stat := range ioStats {
|
for _, stat := range ioStats {
|
||||||
values = append(values, metricValue{
|
values = append(values, metricValue{
|
||||||
value: ioValueFn(stat.Stats[ioType]),
|
value: ioValueFn(stat.Stats[ioType]),
|
||||||
labels: []string{stat.Device},
|
labels: []string{stat.Device},
|
||||||
|
timestamp: timestamp,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
for _, stat := range fsStats {
|
for _, stat := range fsStats {
|
||||||
values = append(values, metricValue{
|
values = append(values, metricValue{
|
||||||
value: valueFn(&stat),
|
value: valueFn(&stat),
|
||||||
labels: []string{stat.Device},
|
labels: []string{stat.Device},
|
||||||
|
timestamp: timestamp,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
@ -147,14 +152,24 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
help: "Cumulative user cpu time consumed in seconds.",
|
help: "Cumulative user cpu time consumed in seconds.",
|
||||||
valueType: prometheus.CounterValue,
|
valueType: prometheus.CounterValue,
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Cpu.Usage.User) / float64(time.Second)}}
|
return metricValues{
|
||||||
|
{
|
||||||
|
value: float64(s.Cpu.Usage.User) / float64(time.Second),
|
||||||
|
timestamp: s.Timestamp,
|
||||||
|
},
|
||||||
|
}
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_cpu_system_seconds_total",
|
name: "container_cpu_system_seconds_total",
|
||||||
help: "Cumulative system cpu time consumed in seconds.",
|
help: "Cumulative system cpu time consumed in seconds.",
|
||||||
valueType: prometheus.CounterValue,
|
valueType: prometheus.CounterValue,
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Cpu.Usage.System) / float64(time.Second)}}
|
return metricValues{
|
||||||
|
{
|
||||||
|
value: float64(s.Cpu.Usage.System) / float64(time.Second),
|
||||||
|
timestamp: s.Timestamp,
|
||||||
|
},
|
||||||
|
}
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_cpu_usage_seconds_total",
|
name: "container_cpu_usage_seconds_total",
|
||||||
@ -165,8 +180,9 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
if len(s.Cpu.Usage.PerCpu) == 0 {
|
if len(s.Cpu.Usage.PerCpu) == 0 {
|
||||||
if s.Cpu.Usage.Total > 0 {
|
if s.Cpu.Usage.Total > 0 {
|
||||||
return metricValues{{
|
return metricValues{{
|
||||||
value: float64(s.Cpu.Usage.Total) / float64(time.Second),
|
value: float64(s.Cpu.Usage.Total) / float64(time.Second),
|
||||||
labels: []string{"total"},
|
labels: []string{"total"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -174,8 +190,9 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
for i, value := range s.Cpu.Usage.PerCpu {
|
for i, value := range s.Cpu.Usage.PerCpu {
|
||||||
if value > 0 {
|
if value > 0 {
|
||||||
values = append(values, metricValue{
|
values = append(values, metricValue{
|
||||||
value: float64(value) / float64(time.Second),
|
value: float64(value) / float64(time.Second),
|
||||||
labels: []string{fmt.Sprintf("cpu%02d", i)},
|
labels: []string{fmt.Sprintf("cpu%02d", i)},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -187,7 +204,11 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
valueType: prometheus.CounterValue,
|
valueType: prometheus.CounterValue,
|
||||||
condition: func(s info.ContainerSpec) bool { return s.Cpu.Quota != 0 },
|
condition: func(s info.ContainerSpec) bool { return s.Cpu.Quota != 0 },
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Cpu.CFS.Periods)}}
|
return metricValues{
|
||||||
|
{
|
||||||
|
value: float64(s.Cpu.CFS.Periods),
|
||||||
|
timestamp: s.Timestamp,
|
||||||
|
}}
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_cpu_cfs_throttled_periods_total",
|
name: "container_cpu_cfs_throttled_periods_total",
|
||||||
@ -195,7 +216,11 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
valueType: prometheus.CounterValue,
|
valueType: prometheus.CounterValue,
|
||||||
condition: func(s info.ContainerSpec) bool { return s.Cpu.Quota != 0 },
|
condition: func(s info.ContainerSpec) bool { return s.Cpu.Quota != 0 },
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Cpu.CFS.ThrottledPeriods)}}
|
return metricValues{
|
||||||
|
{
|
||||||
|
value: float64(s.Cpu.CFS.ThrottledPeriods),
|
||||||
|
timestamp: s.Timestamp,
|
||||||
|
}}
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_cpu_cfs_throttled_seconds_total",
|
name: "container_cpu_cfs_throttled_seconds_total",
|
||||||
@ -203,7 +228,11 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
valueType: prometheus.CounterValue,
|
valueType: prometheus.CounterValue,
|
||||||
condition: func(s info.ContainerSpec) bool { return s.Cpu.Quota != 0 },
|
condition: func(s info.ContainerSpec) bool { return s.Cpu.Quota != 0 },
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Cpu.CFS.ThrottledTime) / float64(time.Second)}}
|
return metricValues{
|
||||||
|
{
|
||||||
|
value: float64(s.Cpu.CFS.ThrottledTime) / float64(time.Second),
|
||||||
|
timestamp: s.Timestamp,
|
||||||
|
}}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}...)
|
}...)
|
||||||
@ -215,21 +244,30 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
help: "Time duration the processes of the container have run on the CPU.",
|
help: "Time duration the processes of the container have run on the CPU.",
|
||||||
valueType: prometheus.CounterValue,
|
valueType: prometheus.CounterValue,
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Cpu.Schedstat.RunTime) / float64(time.Second)}}
|
return metricValues{{
|
||||||
|
value: float64(s.Cpu.Schedstat.RunTime) / float64(time.Second),
|
||||||
|
timestamp: s.Timestamp,
|
||||||
|
}}
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_cpu_schedstat_runqueue_seconds_total",
|
name: "container_cpu_schedstat_runqueue_seconds_total",
|
||||||
help: "Time duration processes of the container have been waiting on a runqueue.",
|
help: "Time duration processes of the container have been waiting on a runqueue.",
|
||||||
valueType: prometheus.CounterValue,
|
valueType: prometheus.CounterValue,
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Cpu.Schedstat.RunqueueTime) / float64(time.Second)}}
|
return metricValues{{
|
||||||
|
value: float64(s.Cpu.Schedstat.RunqueueTime) / float64(time.Second),
|
||||||
|
timestamp: s.Timestamp,
|
||||||
|
}}
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_cpu_schedstat_run_periods_total",
|
name: "container_cpu_schedstat_run_periods_total",
|
||||||
help: "Number of times processes of the cgroup have run on the cpu",
|
help: "Number of times processes of the cgroup have run on the cpu",
|
||||||
valueType: prometheus.CounterValue,
|
valueType: prometheus.CounterValue,
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Cpu.Schedstat.RunPeriods)}}
|
return metricValues{{
|
||||||
|
value: float64(s.Cpu.Schedstat.RunPeriods),
|
||||||
|
timestamp: s.Timestamp,
|
||||||
|
}}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}...)
|
}...)
|
||||||
@ -241,7 +279,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
help: "Value of container cpu load average over the last 10 seconds.",
|
help: "Value of container cpu load average over the last 10 seconds.",
|
||||||
valueType: prometheus.GaugeValue,
|
valueType: prometheus.GaugeValue,
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Cpu.LoadAverage)}}
|
return metricValues{{value: float64(s.Cpu.LoadAverage), timestamp: s.Timestamp}}
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_tasks_state",
|
name: "container_tasks_state",
|
||||||
@ -251,24 +289,29 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{
|
return metricValues{
|
||||||
{
|
{
|
||||||
value: float64(s.TaskStats.NrSleeping),
|
value: float64(s.TaskStats.NrSleeping),
|
||||||
labels: []string{"sleeping"},
|
labels: []string{"sleeping"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.TaskStats.NrRunning),
|
value: float64(s.TaskStats.NrRunning),
|
||||||
labels: []string{"running"},
|
labels: []string{"running"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.TaskStats.NrStopped),
|
value: float64(s.TaskStats.NrStopped),
|
||||||
labels: []string{"stopped"},
|
labels: []string{"stopped"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.TaskStats.NrUninterruptible),
|
value: float64(s.TaskStats.NrUninterruptible),
|
||||||
labels: []string{"uninterruptible"},
|
labels: []string{"uninterruptible"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.TaskStats.NrIoWait),
|
value: float64(s.TaskStats.NrIoWait),
|
||||||
labels: []string{"iowaiting"},
|
labels: []string{"iowaiting"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -282,42 +325,45 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
help: "Number of bytes of page cache memory.",
|
help: "Number of bytes of page cache memory.",
|
||||||
valueType: prometheus.GaugeValue,
|
valueType: prometheus.GaugeValue,
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Memory.Cache)}}
|
return metricValues{{value: float64(s.Memory.Cache), timestamp: s.Timestamp}}
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_memory_rss",
|
name: "container_memory_rss",
|
||||||
help: "Size of RSS in bytes.",
|
help: "Size of RSS in bytes.",
|
||||||
valueType: prometheus.GaugeValue,
|
valueType: prometheus.GaugeValue,
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Memory.RSS)}}
|
return metricValues{{value: float64(s.Memory.RSS), timestamp: s.Timestamp}}
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_memory_mapped_file",
|
name: "container_memory_mapped_file",
|
||||||
help: "Size of memory mapped files in bytes.",
|
help: "Size of memory mapped files in bytes.",
|
||||||
valueType: prometheus.GaugeValue,
|
valueType: prometheus.GaugeValue,
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Memory.MappedFile)}}
|
return metricValues{{value: float64(s.Memory.MappedFile), timestamp: s.Timestamp}}
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_memory_swap",
|
name: "container_memory_swap",
|
||||||
help: "Container swap usage in bytes.",
|
help: "Container swap usage in bytes.",
|
||||||
valueType: prometheus.GaugeValue,
|
valueType: prometheus.GaugeValue,
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Memory.Swap)}}
|
return metricValues{{value: float64(s.Memory.Swap), timestamp: s.Timestamp}}
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_memory_failcnt",
|
name: "container_memory_failcnt",
|
||||||
help: "Number of memory usage hits limits",
|
help: "Number of memory usage hits limits",
|
||||||
valueType: prometheus.CounterValue,
|
valueType: prometheus.CounterValue,
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Memory.Failcnt)}}
|
return metricValues{{
|
||||||
|
value: float64(s.Memory.Failcnt),
|
||||||
|
timestamp: s.Timestamp,
|
||||||
|
}}
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_memory_usage_bytes",
|
name: "container_memory_usage_bytes",
|
||||||
help: "Current memory usage in bytes, including all memory regardless of when it was accessed",
|
help: "Current memory usage in bytes, including all memory regardless of when it was accessed",
|
||||||
valueType: prometheus.GaugeValue,
|
valueType: prometheus.GaugeValue,
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Memory.Usage)}}
|
return metricValues{{value: float64(s.Memory.Usage), timestamp: s.Timestamp}}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -325,14 +371,14 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
help: "Maximum memory usage recorded in bytes",
|
help: "Maximum memory usage recorded in bytes",
|
||||||
valueType: prometheus.GaugeValue,
|
valueType: prometheus.GaugeValue,
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Memory.MaxUsage)}}
|
return metricValues{{value: float64(s.Memory.MaxUsage), timestamp: s.Timestamp}}
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_memory_working_set_bytes",
|
name: "container_memory_working_set_bytes",
|
||||||
help: "Current working set in bytes.",
|
help: "Current working set in bytes.",
|
||||||
valueType: prometheus.GaugeValue,
|
valueType: prometheus.GaugeValue,
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Memory.WorkingSet)}}
|
return metricValues{{value: float64(s.Memory.WorkingSet), timestamp: s.Timestamp}}
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_memory_failures_total",
|
name: "container_memory_failures_total",
|
||||||
@ -342,20 +388,24 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{
|
return metricValues{
|
||||||
{
|
{
|
||||||
value: float64(s.Memory.ContainerData.Pgfault),
|
value: float64(s.Memory.ContainerData.Pgfault),
|
||||||
labels: []string{"pgfault", "container"},
|
labels: []string{"pgfault", "container"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Memory.ContainerData.Pgmajfault),
|
value: float64(s.Memory.ContainerData.Pgmajfault),
|
||||||
labels: []string{"pgmajfault", "container"},
|
labels: []string{"pgmajfault", "container"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Memory.HierarchicalData.Pgfault),
|
value: float64(s.Memory.HierarchicalData.Pgfault),
|
||||||
labels: []string{"pgfault", "hierarchy"},
|
labels: []string{"pgfault", "hierarchy"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Memory.HierarchicalData.Pgmajfault),
|
value: float64(s.Memory.HierarchicalData.Pgmajfault),
|
||||||
labels: []string{"pgmajfault", "hierarchy"},
|
labels: []string{"pgmajfault", "hierarchy"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -373,8 +423,9 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
values := make(metricValues, 0, len(s.Accelerators))
|
values := make(metricValues, 0, len(s.Accelerators))
|
||||||
for _, value := range s.Accelerators {
|
for _, value := range s.Accelerators {
|
||||||
values = append(values, metricValue{
|
values = append(values, metricValue{
|
||||||
value: float64(value.MemoryTotal),
|
value: float64(value.MemoryTotal),
|
||||||
labels: []string{value.Make, value.Model, value.ID},
|
labels: []string{value.Make, value.Model, value.ID},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
@ -388,8 +439,9 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
values := make(metricValues, 0, len(s.Accelerators))
|
values := make(metricValues, 0, len(s.Accelerators))
|
||||||
for _, value := range s.Accelerators {
|
for _, value := range s.Accelerators {
|
||||||
values = append(values, metricValue{
|
values = append(values, metricValue{
|
||||||
value: float64(value.MemoryUsed),
|
value: float64(value.MemoryUsed),
|
||||||
labels: []string{value.Make, value.Model, value.ID},
|
labels: []string{value.Make, value.Model, value.ID},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
@ -403,8 +455,9 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
values := make(metricValues, 0, len(s.Accelerators))
|
values := make(metricValues, 0, len(s.Accelerators))
|
||||||
for _, value := range s.Accelerators {
|
for _, value := range s.Accelerators {
|
||||||
values = append(values, metricValue{
|
values = append(values, metricValue{
|
||||||
value: float64(value.DutyCycle),
|
value: float64(value.DutyCycle),
|
||||||
labels: []string{value.Make, value.Model, value.ID},
|
labels: []string{value.Make, value.Model, value.ID},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
@ -422,7 +475,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||||
return float64(fs.InodesFree)
|
return float64(fs.InodesFree)
|
||||||
})
|
}, s.Timestamp)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_fs_inodes_total",
|
name: "container_fs_inodes_total",
|
||||||
@ -432,7 +485,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||||
return float64(fs.Inodes)
|
return float64(fs.Inodes)
|
||||||
})
|
}, s.Timestamp)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_fs_limit_bytes",
|
name: "container_fs_limit_bytes",
|
||||||
@ -442,7 +495,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||||
return float64(fs.Limit)
|
return float64(fs.Limit)
|
||||||
})
|
}, s.Timestamp)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "container_fs_usage_bytes",
|
name: "container_fs_usage_bytes",
|
||||||
@ -452,7 +505,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||||
return float64(fs.Usage)
|
return float64(fs.Usage)
|
||||||
})
|
}, s.Timestamp)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}...)
|
}...)
|
||||||
@ -468,6 +521,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
return ioValues(
|
return ioValues(
|
||||||
s.DiskIo.IoServiceBytes, "Read", asFloat64,
|
s.DiskIo.IoServiceBytes, "Read", asFloat64,
|
||||||
nil, nil,
|
nil, nil,
|
||||||
|
s.Timestamp,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@ -481,6 +535,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||||
return float64(fs.ReadsCompleted)
|
return float64(fs.ReadsCompleted)
|
||||||
},
|
},
|
||||||
|
s.Timestamp,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@ -494,6 +549,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||||
return float64(fs.SectorsRead)
|
return float64(fs.SectorsRead)
|
||||||
},
|
},
|
||||||
|
s.Timestamp,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@ -507,6 +563,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||||
return float64(fs.ReadsMerged)
|
return float64(fs.ReadsMerged)
|
||||||
},
|
},
|
||||||
|
s.Timestamp,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@ -520,6 +577,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||||
return float64(fs.ReadTime) / float64(time.Second)
|
return float64(fs.ReadTime) / float64(time.Second)
|
||||||
},
|
},
|
||||||
|
s.Timestamp,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@ -531,6 +589,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
return ioValues(
|
return ioValues(
|
||||||
s.DiskIo.IoServiceBytes, "Write", asFloat64,
|
s.DiskIo.IoServiceBytes, "Write", asFloat64,
|
||||||
nil, nil,
|
nil, nil,
|
||||||
|
s.Timestamp,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@ -544,6 +603,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||||
return float64(fs.WritesCompleted)
|
return float64(fs.WritesCompleted)
|
||||||
},
|
},
|
||||||
|
s.Timestamp,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@ -557,6 +617,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||||
return float64(fs.SectorsWritten)
|
return float64(fs.SectorsWritten)
|
||||||
},
|
},
|
||||||
|
s.Timestamp,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@ -570,6 +631,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||||
return float64(fs.WritesMerged)
|
return float64(fs.WritesMerged)
|
||||||
},
|
},
|
||||||
|
s.Timestamp,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@ -583,6 +645,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||||
return float64(fs.WriteTime) / float64(time.Second)
|
return float64(fs.WriteTime) / float64(time.Second)
|
||||||
},
|
},
|
||||||
|
s.Timestamp,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@ -596,6 +659,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||||
return float64(fs.IoInProgress)
|
return float64(fs.IoInProgress)
|
||||||
},
|
},
|
||||||
|
s.Timestamp,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@ -609,6 +673,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||||
return float64(float64(fs.IoTime) / float64(time.Second))
|
return float64(float64(fs.IoTime) / float64(time.Second))
|
||||||
},
|
},
|
||||||
|
s.Timestamp,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@ -619,7 +684,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||||
return float64(fs.WeightedIoTime) / float64(time.Second)
|
return float64(fs.WeightedIoTime) / float64(time.Second)
|
||||||
})
|
}, s.Timestamp)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}...)
|
}...)
|
||||||
@ -635,8 +700,9 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||||
for _, value := range s.Network.Interfaces {
|
for _, value := range s.Network.Interfaces {
|
||||||
values = append(values, metricValue{
|
values = append(values, metricValue{
|
||||||
value: float64(value.RxBytes),
|
value: float64(value.RxBytes),
|
||||||
labels: []string{value.Name},
|
labels: []string{value.Name},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
@ -650,8 +716,9 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||||
for _, value := range s.Network.Interfaces {
|
for _, value := range s.Network.Interfaces {
|
||||||
values = append(values, metricValue{
|
values = append(values, metricValue{
|
||||||
value: float64(value.RxPackets),
|
value: float64(value.RxPackets),
|
||||||
labels: []string{value.Name},
|
labels: []string{value.Name},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
@ -665,8 +732,9 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||||
for _, value := range s.Network.Interfaces {
|
for _, value := range s.Network.Interfaces {
|
||||||
values = append(values, metricValue{
|
values = append(values, metricValue{
|
||||||
value: float64(value.RxDropped),
|
value: float64(value.RxDropped),
|
||||||
labels: []string{value.Name},
|
labels: []string{value.Name},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
@ -680,8 +748,9 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||||
for _, value := range s.Network.Interfaces {
|
for _, value := range s.Network.Interfaces {
|
||||||
values = append(values, metricValue{
|
values = append(values, metricValue{
|
||||||
value: float64(value.RxErrors),
|
value: float64(value.RxErrors),
|
||||||
labels: []string{value.Name},
|
labels: []string{value.Name},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
@ -695,8 +764,9 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||||
for _, value := range s.Network.Interfaces {
|
for _, value := range s.Network.Interfaces {
|
||||||
values = append(values, metricValue{
|
values = append(values, metricValue{
|
||||||
value: float64(value.TxBytes),
|
value: float64(value.TxBytes),
|
||||||
labels: []string{value.Name},
|
labels: []string{value.Name},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
@ -710,8 +780,9 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||||
for _, value := range s.Network.Interfaces {
|
for _, value := range s.Network.Interfaces {
|
||||||
values = append(values, metricValue{
|
values = append(values, metricValue{
|
||||||
value: float64(value.TxPackets),
|
value: float64(value.TxPackets),
|
||||||
labels: []string{value.Name},
|
labels: []string{value.Name},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
@ -725,8 +796,9 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||||
for _, value := range s.Network.Interfaces {
|
for _, value := range s.Network.Interfaces {
|
||||||
values = append(values, metricValue{
|
values = append(values, metricValue{
|
||||||
value: float64(value.TxDropped),
|
value: float64(value.TxDropped),
|
||||||
labels: []string{value.Name},
|
labels: []string{value.Name},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
@ -740,8 +812,9 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||||
for _, value := range s.Network.Interfaces {
|
for _, value := range s.Network.Interfaces {
|
||||||
values = append(values, metricValue{
|
values = append(values, metricValue{
|
||||||
value: float64(value.TxErrors),
|
value: float64(value.TxErrors),
|
||||||
labels: []string{value.Name},
|
labels: []string{value.Name},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
@ -759,48 +832,59 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{
|
return metricValues{
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp.Established),
|
value: float64(s.Network.Tcp.Established),
|
||||||
labels: []string{"established"},
|
labels: []string{"established"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp.SynSent),
|
value: float64(s.Network.Tcp.SynSent),
|
||||||
labels: []string{"synsent"},
|
labels: []string{"synsent"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp.SynRecv),
|
value: float64(s.Network.Tcp.SynRecv),
|
||||||
labels: []string{"synrecv"},
|
labels: []string{"synrecv"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp.FinWait1),
|
value: float64(s.Network.Tcp.FinWait1),
|
||||||
labels: []string{"finwait1"},
|
labels: []string{"finwait1"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp.FinWait2),
|
value: float64(s.Network.Tcp.FinWait2),
|
||||||
labels: []string{"finwait2"},
|
labels: []string{"finwait2"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp.TimeWait),
|
value: float64(s.Network.Tcp.TimeWait),
|
||||||
labels: []string{"timewait"},
|
labels: []string{"timewait"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp.Close),
|
value: float64(s.Network.Tcp.Close),
|
||||||
labels: []string{"close"},
|
labels: []string{"close"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp.CloseWait),
|
value: float64(s.Network.Tcp.CloseWait),
|
||||||
labels: []string{"closewait"},
|
labels: []string{"closewait"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp.LastAck),
|
value: float64(s.Network.Tcp.LastAck),
|
||||||
labels: []string{"lastack"},
|
labels: []string{"lastack"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp.Listen),
|
value: float64(s.Network.Tcp.Listen),
|
||||||
labels: []string{"listen"},
|
labels: []string{"listen"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp.Closing),
|
value: float64(s.Network.Tcp.Closing),
|
||||||
labels: []string{"closing"},
|
labels: []string{"closing"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -815,48 +899,59 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{
|
return metricValues{
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp6.Established),
|
value: float64(s.Network.Tcp6.Established),
|
||||||
labels: []string{"established"},
|
labels: []string{"established"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp6.SynSent),
|
value: float64(s.Network.Tcp6.SynSent),
|
||||||
labels: []string{"synsent"},
|
labels: []string{"synsent"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp6.SynRecv),
|
value: float64(s.Network.Tcp6.SynRecv),
|
||||||
labels: []string{"synrecv"},
|
labels: []string{"synrecv"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp6.FinWait1),
|
value: float64(s.Network.Tcp6.FinWait1),
|
||||||
labels: []string{"finwait1"},
|
labels: []string{"finwait1"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp6.FinWait2),
|
value: float64(s.Network.Tcp6.FinWait2),
|
||||||
labels: []string{"finwait2"},
|
labels: []string{"finwait2"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp6.TimeWait),
|
value: float64(s.Network.Tcp6.TimeWait),
|
||||||
labels: []string{"timewait"},
|
labels: []string{"timewait"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp6.Close),
|
value: float64(s.Network.Tcp6.Close),
|
||||||
labels: []string{"close"},
|
labels: []string{"close"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp6.CloseWait),
|
value: float64(s.Network.Tcp6.CloseWait),
|
||||||
labels: []string{"closewait"},
|
labels: []string{"closewait"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp6.LastAck),
|
value: float64(s.Network.Tcp6.LastAck),
|
||||||
labels: []string{"lastack"},
|
labels: []string{"lastack"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp6.Listen),
|
value: float64(s.Network.Tcp6.Listen),
|
||||||
labels: []string{"listen"},
|
labels: []string{"listen"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Tcp6.Closing),
|
value: float64(s.Network.Tcp6.Closing),
|
||||||
labels: []string{"closing"},
|
labels: []string{"closing"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -873,20 +968,24 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{
|
return metricValues{
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Udp6.Listen),
|
value: float64(s.Network.Udp6.Listen),
|
||||||
labels: []string{"listen"},
|
labels: []string{"listen"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Udp6.Dropped),
|
value: float64(s.Network.Udp6.Dropped),
|
||||||
labels: []string{"dropped"},
|
labels: []string{"dropped"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Udp6.RxQueued),
|
value: float64(s.Network.Udp6.RxQueued),
|
||||||
labels: []string{"rxqueued"},
|
labels: []string{"rxqueued"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Udp6.TxQueued),
|
value: float64(s.Network.Udp6.TxQueued),
|
||||||
labels: []string{"txqueued"},
|
labels: []string{"txqueued"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -901,20 +1000,24 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{
|
return metricValues{
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Udp.Listen),
|
value: float64(s.Network.Udp.Listen),
|
||||||
labels: []string{"listen"},
|
labels: []string{"listen"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Udp.Dropped),
|
value: float64(s.Network.Udp.Dropped),
|
||||||
labels: []string{"dropped"},
|
labels: []string{"dropped"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Udp.RxQueued),
|
value: float64(s.Network.Udp.RxQueued),
|
||||||
labels: []string{"rxqueued"},
|
labels: []string{"rxqueued"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
value: float64(s.Network.Udp.TxQueued),
|
value: float64(s.Network.Udp.TxQueued),
|
||||||
labels: []string{"txqueued"},
|
labels: []string{"txqueued"},
|
||||||
|
timestamp: s.Timestamp,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -928,7 +1031,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
help: "Number of processes running inside the container.",
|
help: "Number of processes running inside the container.",
|
||||||
valueType: prometheus.GaugeValue,
|
valueType: prometheus.GaugeValue,
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Processes.ProcessCount)}}
|
return metricValues{{value: float64(s.Processes.ProcessCount), timestamp: s.Timestamp}}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -936,7 +1039,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
|||||||
help: "Number of open file descriptors for the container.",
|
help: "Number of open file descriptors for the container.",
|
||||||
valueType: prometheus.GaugeValue,
|
valueType: prometheus.GaugeValue,
|
||||||
getValues: func(s *info.ContainerStats) metricValues {
|
getValues: func(s *info.ContainerStats) metricValues {
|
||||||
return metricValues{{value: float64(s.Processes.FdCount)}}
|
return metricValues{{value: float64(s.Processes.FdCount), timestamp: s.Timestamp}}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}...)
|
}...)
|
||||||
@ -1089,7 +1192,10 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric)
|
|||||||
}
|
}
|
||||||
desc := cm.desc(labels)
|
desc := cm.desc(labels)
|
||||||
for _, metricValue := range cm.getValues(stats) {
|
for _, metricValue := range cm.getValues(stats) {
|
||||||
ch <- prometheus.MustNewConstMetric(desc, cm.valueType, float64(metricValue.value), append(values, metricValue.labels...)...)
|
ch <- prometheus.NewMetricWithTimestamp(
|
||||||
|
metricValue.timestamp,
|
||||||
|
prometheus.MustNewConstMetric(desc, cm.valueType, float64(metricValue.value), append(values, metricValue.labels...)...),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
|
|
||||||
"github.com/google/cadvisor/container"
|
"github.com/google/cadvisor/container"
|
||||||
info "github.com/google/cadvisor/info/v1"
|
info "github.com/google/cadvisor/info/v1"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -94,6 +93,7 @@ func (p testSubcontainersInfoProvider) SubcontainersInfo(string, *info.Container
|
|||||||
},
|
},
|
||||||
Stats: []*info.ContainerStats{
|
Stats: []*info.ContainerStats{
|
||||||
{
|
{
|
||||||
|
Timestamp: time.Unix(1395066363, 0),
|
||||||
Cpu: info.CpuStats{
|
Cpu: info.CpuStats{
|
||||||
Usage: info.CpuUsage{
|
Usage: info.CpuUsage{
|
||||||
Total: 1,
|
Total: 1,
|
||||||
|
235
metrics/testdata/prometheus_metrics
vendored
235
metrics/testdata/prometheus_metrics
vendored
@ -3,210 +3,210 @@
|
|||||||
cadvisor_version_info{cadvisorRevision="abcdef",cadvisorVersion="0.16.0",dockerVersion="1.8.1",kernelVersion="4.1.6-200.fc22.x86_64",osVersion="Fedora 22 (Twenty Two)"} 1
|
cadvisor_version_info{cadvisorRevision="abcdef",cadvisorVersion="0.16.0",dockerVersion="1.8.1",kernelVersion="4.1.6-200.fc22.x86_64",osVersion="Fedora 22 (Twenty Two)"} 1
|
||||||
# HELP container_accelerator_duty_cycle Percent of time over the past sample period during which the accelerator was actively processing.
|
# HELP container_accelerator_duty_cycle Percent of time over the past sample period during which the accelerator was actively processing.
|
||||||
# TYPE container_accelerator_duty_cycle gauge
|
# TYPE container_accelerator_duty_cycle gauge
|
||||||
container_accelerator_duty_cycle{acc_id="GPU-deadbeef-0123-4567-89ab-feedfacecafe",container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",make="nvidia",model="tesla-k80",name="testcontaineralias",zone_name="hello"} 6
|
container_accelerator_duty_cycle{acc_id="GPU-deadbeef-0123-4567-89ab-feedfacecafe",container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",make="nvidia",model="tesla-k80",name="testcontaineralias",zone_name="hello"} 6 1395066363000
|
||||||
container_accelerator_duty_cycle{acc_id="GPU-deadbeef-1234-5678-90ab-feedfacecafe",container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",make="nvidia",model="tesla-p100",name="testcontaineralias",zone_name="hello"} 12
|
container_accelerator_duty_cycle{acc_id="GPU-deadbeef-1234-5678-90ab-feedfacecafe",container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",make="nvidia",model="tesla-p100",name="testcontaineralias",zone_name="hello"} 12 1395066363000
|
||||||
# HELP container_accelerator_memory_total_bytes Total accelerator memory.
|
# HELP container_accelerator_memory_total_bytes Total accelerator memory.
|
||||||
# TYPE container_accelerator_memory_total_bytes gauge
|
# TYPE container_accelerator_memory_total_bytes gauge
|
||||||
container_accelerator_memory_total_bytes{acc_id="GPU-deadbeef-0123-4567-89ab-feedfacecafe",container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",make="nvidia",model="tesla-k80",name="testcontaineralias",zone_name="hello"} 1.0203040506e+10
|
container_accelerator_memory_total_bytes{acc_id="GPU-deadbeef-0123-4567-89ab-feedfacecafe",container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",make="nvidia",model="tesla-k80",name="testcontaineralias",zone_name="hello"} 1.0203040506e+10 1395066363000
|
||||||
container_accelerator_memory_total_bytes{acc_id="GPU-deadbeef-1234-5678-90ab-feedfacecafe",container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",make="nvidia",model="tesla-p100",name="testcontaineralias",zone_name="hello"} 2.0304050607e+10
|
container_accelerator_memory_total_bytes{acc_id="GPU-deadbeef-1234-5678-90ab-feedfacecafe",container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",make="nvidia",model="tesla-p100",name="testcontaineralias",zone_name="hello"} 2.0304050607e+10 1395066363000
|
||||||
# HELP container_accelerator_memory_used_bytes Total accelerator memory allocated.
|
# HELP container_accelerator_memory_used_bytes Total accelerator memory allocated.
|
||||||
# TYPE container_accelerator_memory_used_bytes gauge
|
# TYPE container_accelerator_memory_used_bytes gauge
|
||||||
container_accelerator_memory_used_bytes{acc_id="GPU-deadbeef-0123-4567-89ab-feedfacecafe",container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",make="nvidia",model="tesla-k80",name="testcontaineralias",zone_name="hello"} 1.02030405e+09
|
container_accelerator_memory_used_bytes{acc_id="GPU-deadbeef-0123-4567-89ab-feedfacecafe",container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",make="nvidia",model="tesla-k80",name="testcontaineralias",zone_name="hello"} 1.02030405e+09 1395066363000
|
||||||
container_accelerator_memory_used_bytes{acc_id="GPU-deadbeef-1234-5678-90ab-feedfacecafe",container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",make="nvidia",model="tesla-p100",name="testcontaineralias",zone_name="hello"} 2.03040506e+09
|
container_accelerator_memory_used_bytes{acc_id="GPU-deadbeef-1234-5678-90ab-feedfacecafe",container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",make="nvidia",model="tesla-p100",name="testcontaineralias",zone_name="hello"} 2.03040506e+09 1395066363000
|
||||||
# HELP container_cpu_cfs_periods_total Number of elapsed enforcement period intervals.
|
# HELP container_cpu_cfs_periods_total Number of elapsed enforcement period intervals.
|
||||||
# TYPE container_cpu_cfs_periods_total counter
|
# TYPE container_cpu_cfs_periods_total counter
|
||||||
container_cpu_cfs_periods_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 723
|
container_cpu_cfs_periods_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 723 1395066363000
|
||||||
# HELP container_cpu_cfs_throttled_periods_total Number of throttled period intervals.
|
# HELP container_cpu_cfs_throttled_periods_total Number of throttled period intervals.
|
||||||
# TYPE container_cpu_cfs_throttled_periods_total counter
|
# TYPE container_cpu_cfs_throttled_periods_total counter
|
||||||
container_cpu_cfs_throttled_periods_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 18
|
container_cpu_cfs_throttled_periods_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 18 1395066363000
|
||||||
# HELP container_cpu_cfs_throttled_seconds_total Total time duration the container has been throttled.
|
# HELP container_cpu_cfs_throttled_seconds_total Total time duration the container has been throttled.
|
||||||
# TYPE container_cpu_cfs_throttled_seconds_total counter
|
# TYPE container_cpu_cfs_throttled_seconds_total counter
|
||||||
container_cpu_cfs_throttled_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 1.724314
|
container_cpu_cfs_throttled_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 1.724314 1395066363000
|
||||||
# HELP container_cpu_load_average_10s Value of container cpu load average over the last 10 seconds.
|
# HELP container_cpu_load_average_10s Value of container cpu load average over the last 10 seconds.
|
||||||
# TYPE container_cpu_load_average_10s gauge
|
# TYPE container_cpu_load_average_10s gauge
|
||||||
container_cpu_load_average_10s{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 2
|
container_cpu_load_average_10s{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 2 1395066363000
|
||||||
# HELP container_cpu_schedstat_run_periods_total Number of times processes of the cgroup have run on the cpu
|
# HELP container_cpu_schedstat_run_periods_total Number of times processes of the cgroup have run on the cpu
|
||||||
# TYPE container_cpu_schedstat_run_periods_total counter
|
# TYPE container_cpu_schedstat_run_periods_total counter
|
||||||
container_cpu_schedstat_run_periods_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 984285
|
container_cpu_schedstat_run_periods_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 984285 1395066363000
|
||||||
# HELP container_cpu_schedstat_run_seconds_total Time duration the processes of the container have run on the CPU.
|
# HELP container_cpu_schedstat_run_seconds_total Time duration the processes of the container have run on the CPU.
|
||||||
# TYPE container_cpu_schedstat_run_seconds_total counter
|
# TYPE container_cpu_schedstat_run_seconds_total counter
|
||||||
container_cpu_schedstat_run_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 0.053643567
|
container_cpu_schedstat_run_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 0.053643567 1395066363000
|
||||||
# HELP container_cpu_schedstat_runqueue_seconds_total Time duration processes of the container have been waiting on a runqueue.
|
# HELP container_cpu_schedstat_runqueue_seconds_total Time duration processes of the container have been waiting on a runqueue.
|
||||||
# TYPE container_cpu_schedstat_runqueue_seconds_total counter
|
# TYPE container_cpu_schedstat_runqueue_seconds_total counter
|
||||||
container_cpu_schedstat_runqueue_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 479.424566378
|
container_cpu_schedstat_runqueue_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 479.424566378 1395066363000
|
||||||
# HELP container_cpu_system_seconds_total Cumulative system cpu time consumed in seconds.
|
# HELP container_cpu_system_seconds_total Cumulative system cpu time consumed in seconds.
|
||||||
# TYPE container_cpu_system_seconds_total counter
|
# TYPE container_cpu_system_seconds_total counter
|
||||||
container_cpu_system_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 7e-09
|
container_cpu_system_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 7e-09 1395066363000
|
||||||
# HELP container_cpu_usage_seconds_total Cumulative cpu time consumed in seconds.
|
# HELP container_cpu_usage_seconds_total Cumulative cpu time consumed in seconds.
|
||||||
# TYPE container_cpu_usage_seconds_total counter
|
# TYPE container_cpu_usage_seconds_total counter
|
||||||
container_cpu_usage_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",cpu="cpu00",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 2e-09
|
container_cpu_usage_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",cpu="cpu00",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 2e-09 1395066363000
|
||||||
container_cpu_usage_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",cpu="cpu01",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 3e-09
|
container_cpu_usage_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",cpu="cpu01",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 3e-09 1395066363000
|
||||||
container_cpu_usage_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",cpu="cpu02",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 4e-09
|
container_cpu_usage_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",cpu="cpu02",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 4e-09 1395066363000
|
||||||
container_cpu_usage_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",cpu="cpu03",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 5e-09
|
container_cpu_usage_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",cpu="cpu03",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 5e-09 1395066363000
|
||||||
# HELP container_cpu_user_seconds_total Cumulative user cpu time consumed in seconds.
|
# HELP container_cpu_user_seconds_total Cumulative user cpu time consumed in seconds.
|
||||||
# TYPE container_cpu_user_seconds_total counter
|
# TYPE container_cpu_user_seconds_total counter
|
||||||
container_cpu_user_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 6e-09
|
container_cpu_user_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 6e-09 1395066363000
|
||||||
# HELP container_file_descriptors Number of open file descriptors for the container.
|
# HELP container_file_descriptors Number of open file descriptors for the container.
|
||||||
# TYPE container_file_descriptors gauge
|
# TYPE container_file_descriptors gauge
|
||||||
container_file_descriptors{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 5
|
container_file_descriptors{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 5 1395066363000
|
||||||
# HELP container_fs_inodes_free Number of available Inodes
|
# HELP container_fs_inodes_free Number of available Inodes
|
||||||
# TYPE container_fs_inodes_free gauge
|
# TYPE container_fs_inodes_free gauge
|
||||||
container_fs_inodes_free{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 524288
|
container_fs_inodes_free{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 524288 1395066363000
|
||||||
container_fs_inodes_free{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 262144
|
container_fs_inodes_free{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 262144 1395066363000
|
||||||
# HELP container_fs_inodes_total Number of Inodes
|
# HELP container_fs_inodes_total Number of Inodes
|
||||||
# TYPE container_fs_inodes_total gauge
|
# TYPE container_fs_inodes_total gauge
|
||||||
container_fs_inodes_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 2.097152e+06
|
container_fs_inodes_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 2.097152e+06 1395066363000
|
||||||
container_fs_inodes_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 2.097152e+06
|
container_fs_inodes_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 2.097152e+06 1395066363000
|
||||||
# HELP container_fs_io_current Number of I/Os currently in progress
|
# HELP container_fs_io_current Number of I/Os currently in progress
|
||||||
# TYPE container_fs_io_current gauge
|
# TYPE container_fs_io_current gauge
|
||||||
container_fs_io_current{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 42
|
container_fs_io_current{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 42 1395066363000
|
||||||
container_fs_io_current{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 47
|
container_fs_io_current{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 47 1395066363000
|
||||||
# HELP container_fs_io_time_seconds_total Cumulative count of seconds spent doing I/Os
|
# HELP container_fs_io_time_seconds_total Cumulative count of seconds spent doing I/Os
|
||||||
# TYPE container_fs_io_time_seconds_total counter
|
# TYPE container_fs_io_time_seconds_total counter
|
||||||
container_fs_io_time_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 4.3e-08
|
container_fs_io_time_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 4.3e-08 1395066363000
|
||||||
container_fs_io_time_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 4.8e-08
|
container_fs_io_time_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 4.8e-08 1395066363000
|
||||||
# HELP container_fs_io_time_weighted_seconds_total Cumulative weighted I/O time in seconds
|
# HELP container_fs_io_time_weighted_seconds_total Cumulative weighted I/O time in seconds
|
||||||
# TYPE container_fs_io_time_weighted_seconds_total counter
|
# TYPE container_fs_io_time_weighted_seconds_total counter
|
||||||
container_fs_io_time_weighted_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 4.4e-08
|
container_fs_io_time_weighted_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 4.4e-08 1395066363000
|
||||||
container_fs_io_time_weighted_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 4.9e-08
|
container_fs_io_time_weighted_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 4.9e-08 1395066363000
|
||||||
# HELP container_fs_limit_bytes Number of bytes that can be consumed by the container on this filesystem.
|
# HELP container_fs_limit_bytes Number of bytes that can be consumed by the container on this filesystem.
|
||||||
# TYPE container_fs_limit_bytes gauge
|
# TYPE container_fs_limit_bytes gauge
|
||||||
container_fs_limit_bytes{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 22
|
container_fs_limit_bytes{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 22 1395066363000
|
||||||
container_fs_limit_bytes{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 37
|
container_fs_limit_bytes{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 37 1395066363000
|
||||||
# HELP container_fs_read_seconds_total Cumulative count of seconds spent reading
|
# HELP container_fs_read_seconds_total Cumulative count of seconds spent reading
|
||||||
# TYPE container_fs_read_seconds_total counter
|
# TYPE container_fs_read_seconds_total counter
|
||||||
container_fs_read_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 2.7e-08
|
container_fs_read_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 2.7e-08 1395066363000
|
||||||
container_fs_read_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 4.2e-08
|
container_fs_read_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 4.2e-08 1395066363000
|
||||||
# HELP container_fs_reads_merged_total Cumulative count of reads merged
|
# HELP container_fs_reads_merged_total Cumulative count of reads merged
|
||||||
# TYPE container_fs_reads_merged_total counter
|
# TYPE container_fs_reads_merged_total counter
|
||||||
container_fs_reads_merged_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 25
|
container_fs_reads_merged_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 25 1395066363000
|
||||||
container_fs_reads_merged_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 40
|
container_fs_reads_merged_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 40 1395066363000
|
||||||
# HELP container_fs_reads_total Cumulative count of reads completed
|
# HELP container_fs_reads_total Cumulative count of reads completed
|
||||||
# TYPE container_fs_reads_total counter
|
# TYPE container_fs_reads_total counter
|
||||||
container_fs_reads_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 24
|
container_fs_reads_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 24 1395066363000
|
||||||
container_fs_reads_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 39
|
container_fs_reads_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 39 1395066363000
|
||||||
# HELP container_fs_sector_reads_total Cumulative count of sector reads completed
|
# HELP container_fs_sector_reads_total Cumulative count of sector reads completed
|
||||||
# TYPE container_fs_sector_reads_total counter
|
# TYPE container_fs_sector_reads_total counter
|
||||||
container_fs_sector_reads_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 26
|
container_fs_sector_reads_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 26 1395066363000
|
||||||
container_fs_sector_reads_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 41
|
container_fs_sector_reads_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 41 1395066363000
|
||||||
# HELP container_fs_sector_writes_total Cumulative count of sector writes completed
|
# HELP container_fs_sector_writes_total Cumulative count of sector writes completed
|
||||||
# TYPE container_fs_sector_writes_total counter
|
# TYPE container_fs_sector_writes_total counter
|
||||||
container_fs_sector_writes_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 40
|
container_fs_sector_writes_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 40 1395066363000
|
||||||
container_fs_sector_writes_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 45
|
container_fs_sector_writes_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 45 1395066363000
|
||||||
# HELP container_fs_usage_bytes Number of bytes that are consumed by the container on this filesystem.
|
# HELP container_fs_usage_bytes Number of bytes that are consumed by the container on this filesystem.
|
||||||
# TYPE container_fs_usage_bytes gauge
|
# TYPE container_fs_usage_bytes gauge
|
||||||
container_fs_usage_bytes{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 23
|
container_fs_usage_bytes{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 23 1395066363000
|
||||||
container_fs_usage_bytes{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 38
|
container_fs_usage_bytes{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 38 1395066363000
|
||||||
# HELP container_fs_write_seconds_total Cumulative count of seconds spent writing
|
# HELP container_fs_write_seconds_total Cumulative count of seconds spent writing
|
||||||
# TYPE container_fs_write_seconds_total counter
|
# TYPE container_fs_write_seconds_total counter
|
||||||
container_fs_write_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 4.1e-08
|
container_fs_write_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 4.1e-08 1395066363000
|
||||||
container_fs_write_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 4.6e-08
|
container_fs_write_seconds_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 4.6e-08 1395066363000
|
||||||
# HELP container_fs_writes_merged_total Cumulative count of writes merged
|
# HELP container_fs_writes_merged_total Cumulative count of writes merged
|
||||||
# TYPE container_fs_writes_merged_total counter
|
# TYPE container_fs_writes_merged_total counter
|
||||||
container_fs_writes_merged_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 39
|
container_fs_writes_merged_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 39 1395066363000
|
||||||
container_fs_writes_merged_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 44
|
container_fs_writes_merged_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 44 1395066363000
|
||||||
# HELP container_fs_writes_total Cumulative count of writes completed
|
# HELP container_fs_writes_total Cumulative count of writes completed
|
||||||
# TYPE container_fs_writes_total counter
|
# TYPE container_fs_writes_total counter
|
||||||
container_fs_writes_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 28
|
container_fs_writes_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda1",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 28 1395066363000
|
||||||
container_fs_writes_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 43
|
container_fs_writes_total{container_env_foo_env="prod",container_label_foo_label="bar",device="sda2",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 43 1395066363000
|
||||||
# HELP container_last_seen Last time a container was seen by the exporter
|
# HELP container_last_seen Last time a container was seen by the exporter
|
||||||
# TYPE container_last_seen gauge
|
# TYPE container_last_seen gauge
|
||||||
container_last_seen{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 1.426203694e+09
|
container_last_seen{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 1.426203694e+09 1395066363000
|
||||||
# HELP container_memory_cache Number of bytes of page cache memory.
|
# HELP container_memory_cache Number of bytes of page cache memory.
|
||||||
# TYPE container_memory_cache gauge
|
# TYPE container_memory_cache gauge
|
||||||
container_memory_cache{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 14
|
container_memory_cache{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 14 1395066363000
|
||||||
# HELP container_memory_failcnt Number of memory usage hits limits
|
# HELP container_memory_failcnt Number of memory usage hits limits
|
||||||
# TYPE container_memory_failcnt counter
|
# TYPE container_memory_failcnt counter
|
||||||
container_memory_failcnt{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 0
|
container_memory_failcnt{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 0 1395066363000
|
||||||
# HELP container_memory_failures_total Cumulative count of memory allocation failures.
|
# HELP container_memory_failures_total Cumulative count of memory allocation failures.
|
||||||
# TYPE container_memory_failures_total counter
|
# TYPE container_memory_failures_total counter
|
||||||
container_memory_failures_total{container_env_foo_env="prod",container_label_foo_label="bar",failure_type="pgfault",id="testcontainer",image="test",name="testcontaineralias",scope="container",zone_name="hello"} 10
|
container_memory_failures_total{container_env_foo_env="prod",container_label_foo_label="bar",failure_type="pgfault",id="testcontainer",image="test",name="testcontaineralias",scope="container",zone_name="hello"} 10 1395066363000
|
||||||
container_memory_failures_total{container_env_foo_env="prod",container_label_foo_label="bar",failure_type="pgfault",id="testcontainer",image="test",name="testcontaineralias",scope="hierarchy",zone_name="hello"} 12
|
container_memory_failures_total{container_env_foo_env="prod",container_label_foo_label="bar",failure_type="pgfault",id="testcontainer",image="test",name="testcontaineralias",scope="hierarchy",zone_name="hello"} 12 1395066363000
|
||||||
container_memory_failures_total{container_env_foo_env="prod",container_label_foo_label="bar",failure_type="pgmajfault",id="testcontainer",image="test",name="testcontaineralias",scope="container",zone_name="hello"} 11
|
container_memory_failures_total{container_env_foo_env="prod",container_label_foo_label="bar",failure_type="pgmajfault",id="testcontainer",image="test",name="testcontaineralias",scope="container",zone_name="hello"} 11 1395066363000
|
||||||
container_memory_failures_total{container_env_foo_env="prod",container_label_foo_label="bar",failure_type="pgmajfault",id="testcontainer",image="test",name="testcontaineralias",scope="hierarchy",zone_name="hello"} 13
|
container_memory_failures_total{container_env_foo_env="prod",container_label_foo_label="bar",failure_type="pgmajfault",id="testcontainer",image="test",name="testcontaineralias",scope="hierarchy",zone_name="hello"} 13 1395066363000
|
||||||
# HELP container_memory_mapped_file Size of memory mapped files in bytes.
|
# HELP container_memory_mapped_file Size of memory mapped files in bytes.
|
||||||
# TYPE container_memory_mapped_file gauge
|
# TYPE container_memory_mapped_file gauge
|
||||||
container_memory_mapped_file{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 16
|
container_memory_mapped_file{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 16 1395066363000
|
||||||
# HELP container_memory_max_usage_bytes Maximum memory usage recorded in bytes
|
# HELP container_memory_max_usage_bytes Maximum memory usage recorded in bytes
|
||||||
# TYPE container_memory_max_usage_bytes gauge
|
# TYPE container_memory_max_usage_bytes gauge
|
||||||
container_memory_max_usage_bytes{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 8
|
container_memory_max_usage_bytes{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 8 1395066363000
|
||||||
# HELP container_memory_rss Size of RSS in bytes.
|
# HELP container_memory_rss Size of RSS in bytes.
|
||||||
# TYPE container_memory_rss gauge
|
# TYPE container_memory_rss gauge
|
||||||
container_memory_rss{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 15
|
container_memory_rss{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 15 1395066363000
|
||||||
# HELP container_memory_swap Container swap usage in bytes.
|
# HELP container_memory_swap Container swap usage in bytes.
|
||||||
# TYPE container_memory_swap gauge
|
# TYPE container_memory_swap gauge
|
||||||
container_memory_swap{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 8192
|
container_memory_swap{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 8192 1395066363000
|
||||||
# HELP container_memory_usage_bytes Current memory usage in bytes, including all memory regardless of when it was accessed
|
# HELP container_memory_usage_bytes Current memory usage in bytes, including all memory regardless of when it was accessed
|
||||||
# TYPE container_memory_usage_bytes gauge
|
# TYPE container_memory_usage_bytes gauge
|
||||||
container_memory_usage_bytes{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 8
|
container_memory_usage_bytes{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 8 1395066363000
|
||||||
# HELP container_memory_working_set_bytes Current working set in bytes.
|
# HELP container_memory_working_set_bytes Current working set in bytes.
|
||||||
# TYPE container_memory_working_set_bytes gauge
|
# TYPE container_memory_working_set_bytes gauge
|
||||||
container_memory_working_set_bytes{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 9
|
container_memory_working_set_bytes{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 9 1395066363000
|
||||||
# HELP container_network_receive_bytes_total Cumulative count of bytes received
|
# HELP container_network_receive_bytes_total Cumulative count of bytes received
|
||||||
# TYPE container_network_receive_bytes_total counter
|
# TYPE container_network_receive_bytes_total counter
|
||||||
container_network_receive_bytes_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",interface="eth0",name="testcontaineralias",zone_name="hello"} 14
|
container_network_receive_bytes_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",interface="eth0",name="testcontaineralias",zone_name="hello"} 14 1395066363000
|
||||||
# HELP container_network_receive_errors_total Cumulative count of errors encountered while receiving
|
# HELP container_network_receive_errors_total Cumulative count of errors encountered while receiving
|
||||||
# TYPE container_network_receive_errors_total counter
|
# TYPE container_network_receive_errors_total counter
|
||||||
container_network_receive_errors_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",interface="eth0",name="testcontaineralias",zone_name="hello"} 16
|
container_network_receive_errors_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",interface="eth0",name="testcontaineralias",zone_name="hello"} 16 1395066363000
|
||||||
# HELP container_network_receive_packets_dropped_total Cumulative count of packets dropped while receiving
|
# HELP container_network_receive_packets_dropped_total Cumulative count of packets dropped while receiving
|
||||||
# TYPE container_network_receive_packets_dropped_total counter
|
# TYPE container_network_receive_packets_dropped_total counter
|
||||||
container_network_receive_packets_dropped_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",interface="eth0",name="testcontaineralias",zone_name="hello"} 17
|
container_network_receive_packets_dropped_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",interface="eth0",name="testcontaineralias",zone_name="hello"} 17 1395066363000
|
||||||
# HELP container_network_receive_packets_total Cumulative count of packets received
|
# HELP container_network_receive_packets_total Cumulative count of packets received
|
||||||
# TYPE container_network_receive_packets_total counter
|
# TYPE container_network_receive_packets_total counter
|
||||||
container_network_receive_packets_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",interface="eth0",name="testcontaineralias",zone_name="hello"} 15
|
container_network_receive_packets_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",interface="eth0",name="testcontaineralias",zone_name="hello"} 15 1395066363000
|
||||||
# HELP container_network_tcp6_usage_total tcp6 connection usage statistic for container
|
# HELP container_network_tcp6_usage_total tcp6 connection usage statistic for container
|
||||||
# TYPE container_network_tcp6_usage_total gauge
|
# TYPE container_network_tcp6_usage_total gauge
|
||||||
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="close",zone_name="hello"} 0
|
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="close",zone_name="hello"} 0 1395066363000
|
||||||
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="closewait",zone_name="hello"} 0
|
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="closewait",zone_name="hello"} 0 1395066363000
|
||||||
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="closing",zone_name="hello"} 0
|
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="closing",zone_name="hello"} 0 1395066363000
|
||||||
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="established",zone_name="hello"} 11
|
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="established",zone_name="hello"} 11 1395066363000
|
||||||
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="finwait1",zone_name="hello"} 0
|
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="finwait1",zone_name="hello"} 0 1395066363000
|
||||||
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="finwait2",zone_name="hello"} 0
|
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="finwait2",zone_name="hello"} 0 1395066363000
|
||||||
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="lastack",zone_name="hello"} 0
|
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="lastack",zone_name="hello"} 0 1395066363000
|
||||||
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="listen",zone_name="hello"} 3
|
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="listen",zone_name="hello"} 3 1395066363000
|
||||||
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="synrecv",zone_name="hello"} 0
|
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="synrecv",zone_name="hello"} 0 1395066363000
|
||||||
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="synsent",zone_name="hello"} 0
|
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="synsent",zone_name="hello"} 0 1395066363000
|
||||||
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="timewait",zone_name="hello"} 0
|
container_network_tcp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="timewait",zone_name="hello"} 0 1395066363000
|
||||||
# HELP container_network_tcp_usage_total tcp connection usage statistic for container
|
# HELP container_network_tcp_usage_total tcp connection usage statistic for container
|
||||||
# TYPE container_network_tcp_usage_total gauge
|
# TYPE container_network_tcp_usage_total gauge
|
||||||
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="close",zone_name="hello"} 0
|
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="close",zone_name="hello"} 0 1395066363000
|
||||||
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="closewait",zone_name="hello"} 0
|
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="closewait",zone_name="hello"} 0 1395066363000
|
||||||
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="closing",zone_name="hello"} 0
|
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="closing",zone_name="hello"} 0 1395066363000
|
||||||
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="established",zone_name="hello"} 13
|
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="established",zone_name="hello"} 13 1395066363000
|
||||||
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="finwait1",zone_name="hello"} 0
|
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="finwait1",zone_name="hello"} 0 1395066363000
|
||||||
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="finwait2",zone_name="hello"} 0
|
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="finwait2",zone_name="hello"} 0 1395066363000
|
||||||
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="lastack",zone_name="hello"} 0
|
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="lastack",zone_name="hello"} 0 1395066363000
|
||||||
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="listen",zone_name="hello"} 3
|
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="listen",zone_name="hello"} 3 1395066363000
|
||||||
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="synrecv",zone_name="hello"} 0
|
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="synrecv",zone_name="hello"} 0 1395066363000
|
||||||
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="synsent",zone_name="hello"} 0
|
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="synsent",zone_name="hello"} 0 1395066363000
|
||||||
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="timewait",zone_name="hello"} 0
|
container_network_tcp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",tcp_state="timewait",zone_name="hello"} 0 1395066363000
|
||||||
# HELP container_network_transmit_bytes_total Cumulative count of bytes transmitted
|
# HELP container_network_transmit_bytes_total Cumulative count of bytes transmitted
|
||||||
# TYPE container_network_transmit_bytes_total counter
|
# TYPE container_network_transmit_bytes_total counter
|
||||||
container_network_transmit_bytes_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",interface="eth0",name="testcontaineralias",zone_name="hello"} 18
|
container_network_transmit_bytes_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",interface="eth0",name="testcontaineralias",zone_name="hello"} 18 1395066363000
|
||||||
# HELP container_network_transmit_errors_total Cumulative count of errors encountered while transmitting
|
# HELP container_network_transmit_errors_total Cumulative count of errors encountered while transmitting
|
||||||
# TYPE container_network_transmit_errors_total counter
|
# TYPE container_network_transmit_errors_total counter
|
||||||
container_network_transmit_errors_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",interface="eth0",name="testcontaineralias",zone_name="hello"} 20
|
container_network_transmit_errors_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",interface="eth0",name="testcontaineralias",zone_name="hello"} 20 1395066363000
|
||||||
# HELP container_network_transmit_packets_dropped_total Cumulative count of packets dropped while transmitting
|
# HELP container_network_transmit_packets_dropped_total Cumulative count of packets dropped while transmitting
|
||||||
# TYPE container_network_transmit_packets_dropped_total counter
|
# TYPE container_network_transmit_packets_dropped_total counter
|
||||||
container_network_transmit_packets_dropped_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",interface="eth0",name="testcontaineralias",zone_name="hello"} 21
|
container_network_transmit_packets_dropped_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",interface="eth0",name="testcontaineralias",zone_name="hello"} 21 1395066363000
|
||||||
# HELP container_network_transmit_packets_total Cumulative count of packets transmitted
|
# HELP container_network_transmit_packets_total Cumulative count of packets transmitted
|
||||||
# TYPE container_network_transmit_packets_total counter
|
# TYPE container_network_transmit_packets_total counter
|
||||||
container_network_transmit_packets_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",interface="eth0",name="testcontaineralias",zone_name="hello"} 19
|
container_network_transmit_packets_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",interface="eth0",name="testcontaineralias",zone_name="hello"} 19 1395066363000
|
||||||
# HELP container_network_udp6_usage_total udp6 connection usage statistic for container
|
# HELP container_network_udp6_usage_total udp6 connection usage statistic for container
|
||||||
# TYPE container_network_udp6_usage_total gauge
|
# TYPE container_network_udp6_usage_total gauge
|
||||||
container_network_udp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="dropped",zone_name="hello"} 0
|
container_network_udp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="dropped",zone_name="hello"} 0 1395066363000
|
||||||
container_network_udp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="listen",zone_name="hello"} 0
|
container_network_udp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="listen",zone_name="hello"} 0 1395066363000
|
||||||
container_network_udp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="rxqueued",zone_name="hello"} 0
|
container_network_udp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="rxqueued",zone_name="hello"} 0 1395066363000
|
||||||
container_network_udp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="txqueued",zone_name="hello"} 0
|
container_network_udp6_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="txqueued",zone_name="hello"} 0 1395066363000
|
||||||
# HELP container_network_udp_usage_total udp connection usage statistic for container
|
# HELP container_network_udp_usage_total udp connection usage statistic for container
|
||||||
# TYPE container_network_udp_usage_total gauge
|
# TYPE container_network_udp_usage_total gauge
|
||||||
container_network_udp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="dropped",zone_name="hello"} 0
|
container_network_udp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="dropped",zone_name="hello"} 0 1395066363000
|
||||||
container_network_udp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="listen",zone_name="hello"} 0
|
container_network_udp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="listen",zone_name="hello"} 0 1395066363000
|
||||||
container_network_udp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="rxqueued",zone_name="hello"} 0
|
container_network_udp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="rxqueued",zone_name="hello"} 0 1395066363000
|
||||||
container_network_udp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="txqueued",zone_name="hello"} 0
|
container_network_udp_usage_total{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",udp_state="txqueued",zone_name="hello"} 0 1395066363000
|
||||||
# HELP container_processes Number of processes running inside the container.
|
# HELP container_processes Number of processes running inside the container.
|
||||||
# TYPE container_processes gauge
|
# TYPE container_processes gauge
|
||||||
container_processes{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 1
|
container_processes{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 1 1395066363000
|
||||||
# HELP container_scrape_error 1 if there was an error while getting container metrics, 0 otherwise
|
# HELP container_scrape_error 1 if there was an error while getting container metrics, 0 otherwise
|
||||||
# TYPE container_scrape_error gauge
|
# TYPE container_scrape_error gauge
|
||||||
container_scrape_error 0
|
container_scrape_error 0
|
||||||
@ -224,32 +224,11 @@ container_spec_cpu_shares{container_env_foo_env="prod",container_label_foo_label
|
|||||||
container_start_time_seconds{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 1.257894e+09
|
container_start_time_seconds{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 1.257894e+09
|
||||||
# HELP container_tasks_state Number of tasks in given state
|
# HELP container_tasks_state Number of tasks in given state
|
||||||
# TYPE container_tasks_state gauge
|
# TYPE container_tasks_state gauge
|
||||||
container_tasks_state{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",state="iowaiting",zone_name="hello"} 54
|
container_tasks_state{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",state="iowaiting",zone_name="hello"} 54 1395066363000
|
||||||
container_tasks_state{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",state="running",zone_name="hello"} 51
|
container_tasks_state{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",state="running",zone_name="hello"} 51 1395066363000
|
||||||
container_tasks_state{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",state="sleeping",zone_name="hello"} 50
|
container_tasks_state{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",state="sleeping",zone_name="hello"} 50 1395066363000
|
||||||
container_tasks_state{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",state="stopped",zone_name="hello"} 52
|
container_tasks_state{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",state="stopped",zone_name="hello"} 52 1395066363000
|
||||||
container_tasks_state{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",state="uninterruptible",zone_name="hello"} 53
|
container_tasks_state{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",state="uninterruptible",zone_name="hello"} 53 1395066363000
|
||||||
# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
|
|
||||||
# TYPE http_request_duration_microseconds summary
|
|
||||||
http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 0
|
|
||||||
http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 0
|
|
||||||
http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 0
|
|
||||||
http_request_duration_microseconds_sum{handler="prometheus"} 0
|
|
||||||
http_request_duration_microseconds_count{handler="prometheus"} 0
|
|
||||||
# HELP http_request_size_bytes The HTTP request sizes in bytes.
|
|
||||||
# TYPE http_request_size_bytes summary
|
|
||||||
http_request_size_bytes{handler="prometheus",quantile="0.5"} 0
|
|
||||||
http_request_size_bytes{handler="prometheus",quantile="0.9"} 0
|
|
||||||
http_request_size_bytes{handler="prometheus",quantile="0.99"} 0
|
|
||||||
http_request_size_bytes_sum{handler="prometheus"} 0
|
|
||||||
http_request_size_bytes_count{handler="prometheus"} 0
|
|
||||||
# HELP http_response_size_bytes The HTTP response sizes in bytes.
|
|
||||||
# TYPE http_response_size_bytes summary
|
|
||||||
http_response_size_bytes{handler="prometheus",quantile="0.5"} 0
|
|
||||||
http_response_size_bytes{handler="prometheus",quantile="0.9"} 0
|
|
||||||
http_response_size_bytes{handler="prometheus",quantile="0.99"} 0
|
|
||||||
http_response_size_bytes_sum{handler="prometheus"} 0
|
|
||||||
http_response_size_bytes_count{handler="prometheus"} 0
|
|
||||||
# HELP machine_cpu_cores Number of CPU cores on the machine.
|
# HELP machine_cpu_cores Number of CPU cores on the machine.
|
||||||
# TYPE machine_cpu_cores gauge
|
# TYPE machine_cpu_cores gauge
|
||||||
machine_cpu_cores 4
|
machine_cpu_cores 4
|
||||||
|
73
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
73
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
@ -29,27 +29,72 @@ type Collector interface {
|
|||||||
// collected by this Collector to the provided channel and returns once
|
// collected by this Collector to the provided channel and returns once
|
||||||
// the last descriptor has been sent. The sent descriptors fulfill the
|
// the last descriptor has been sent. The sent descriptors fulfill the
|
||||||
// consistency and uniqueness requirements described in the Desc
|
// consistency and uniqueness requirements described in the Desc
|
||||||
// documentation. (It is valid if one and the same Collector sends
|
// documentation.
|
||||||
// duplicate descriptors. Those duplicates are simply ignored. However,
|
//
|
||||||
// two different Collectors must not send duplicate descriptors.) This
|
// It is valid if one and the same Collector sends duplicate
|
||||||
// method idempotently sends the same descriptors throughout the
|
// descriptors. Those duplicates are simply ignored. However, two
|
||||||
// lifetime of the Collector. If a Collector encounters an error while
|
// different Collectors must not send duplicate descriptors.
|
||||||
// executing this method, it must send an invalid descriptor (created
|
//
|
||||||
// with NewInvalidDesc) to signal the error to the registry.
|
// Sending no descriptor at all marks the Collector as “unchecked”,
|
||||||
|
// i.e. no checks will be performed at registration time, and the
|
||||||
|
// Collector may yield any Metric it sees fit in its Collect method.
|
||||||
|
//
|
||||||
|
// This method idempotently sends the same descriptors throughout the
|
||||||
|
// lifetime of the Collector. It may be called concurrently and
|
||||||
|
// therefore must be implemented in a concurrency safe way.
|
||||||
|
//
|
||||||
|
// If a Collector encounters an error while executing this method, it
|
||||||
|
// must send an invalid descriptor (created with NewInvalidDesc) to
|
||||||
|
// signal the error to the registry.
|
||||||
Describe(chan<- *Desc)
|
Describe(chan<- *Desc)
|
||||||
// Collect is called by the Prometheus registry when collecting
|
// Collect is called by the Prometheus registry when collecting
|
||||||
// metrics. The implementation sends each collected metric via the
|
// metrics. The implementation sends each collected metric via the
|
||||||
// provided channel and returns once the last metric has been sent. The
|
// provided channel and returns once the last metric has been sent. The
|
||||||
// descriptor of each sent metric is one of those returned by
|
// descriptor of each sent metric is one of those returned by Describe
|
||||||
// Describe. Returned metrics that share the same descriptor must differ
|
// (unless the Collector is unchecked, see above). Returned metrics that
|
||||||
// in their variable label values. This method may be called
|
// share the same descriptor must differ in their variable label
|
||||||
// concurrently and must therefore be implemented in a concurrency safe
|
// values.
|
||||||
// way. Blocking occurs at the expense of total performance of rendering
|
//
|
||||||
// all registered metrics. Ideally, Collector implementations support
|
// This method may be called concurrently and must therefore be
|
||||||
// concurrent readers.
|
// implemented in a concurrency safe way. Blocking occurs at the expense
|
||||||
|
// of total performance of rendering all registered metrics. Ideally,
|
||||||
|
// Collector implementations support concurrent readers.
|
||||||
Collect(chan<- Metric)
|
Collect(chan<- Metric)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DescribeByCollect is a helper to implement the Describe method of a custom
|
||||||
|
// Collector. It collects the metrics from the provided Collector and sends
|
||||||
|
// their descriptors to the provided channel.
|
||||||
|
//
|
||||||
|
// If a Collector collects the same metrics throughout its lifetime, its
|
||||||
|
// Describe method can simply be implemented as:
|
||||||
|
//
|
||||||
|
// func (c customCollector) Describe(ch chan<- *Desc) {
|
||||||
|
// DescribeByCollect(c, ch)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// However, this will not work if the metrics collected change dynamically over
|
||||||
|
// the lifetime of the Collector in a way that their combined set of descriptors
|
||||||
|
// changes as well. The shortcut implementation will then violate the contract
|
||||||
|
// of the Describe method. If a Collector sometimes collects no metrics at all
|
||||||
|
// (for example vectors like CounterVec, GaugeVec, etc., which only collect
|
||||||
|
// metrics after a metric with a fully specified label set has been accessed),
|
||||||
|
// it might even get registered as an unchecked Collecter (cf. the Register
|
||||||
|
// method of the Registerer interface). Hence, only use this shortcut
|
||||||
|
// implementation of Describe if you are certain to fulfill the contract.
|
||||||
|
//
|
||||||
|
// The Collector example demonstrates a use of DescribeByCollect.
|
||||||
|
func DescribeByCollect(c Collector, descs chan<- *Desc) {
|
||||||
|
metrics := make(chan Metric)
|
||||||
|
go func() {
|
||||||
|
c.Collect(metrics)
|
||||||
|
close(metrics)
|
||||||
|
}()
|
||||||
|
for m := range metrics {
|
||||||
|
descs <- m.Desc()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// selfCollector implements Collector for a single Metric so that the Metric
|
// selfCollector implements Collector for a single Metric so that the Metric
|
||||||
// collects itself. Add it as an anonymous field to a struct that implements
|
// collects itself. Add it as an anonymous field to a struct that implements
|
||||||
// Metric, and call init with the Metric itself as an argument.
|
// Metric, and call init with the Metric itself as an argument.
|
||||||
|
179
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
179
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
@ -15,6 +15,10 @@ package prometheus
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"math"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Counter is a Metric that represents a single numerical value that only ever
|
// Counter is a Metric that represents a single numerical value that only ever
|
||||||
@ -42,6 +46,14 @@ type Counter interface {
|
|||||||
type CounterOpts Opts
|
type CounterOpts Opts
|
||||||
|
|
||||||
// NewCounter creates a new Counter based on the provided CounterOpts.
|
// NewCounter creates a new Counter based on the provided CounterOpts.
|
||||||
|
//
|
||||||
|
// The returned implementation tracks the counter value in two separate
|
||||||
|
// variables, a float64 and a uint64. The latter is used to track calls of the
|
||||||
|
// Inc method and calls of the Add method with a value that can be represented
|
||||||
|
// as a uint64. This allows atomic increments of the counter with optimal
|
||||||
|
// performance. (It is common to have an Inc call in very hot execution paths.)
|
||||||
|
// Both internal tracking values are added up in the Write method. This has to
|
||||||
|
// be taken into account when it comes to precision and overflow behavior.
|
||||||
func NewCounter(opts CounterOpts) Counter {
|
func NewCounter(opts CounterOpts) Counter {
|
||||||
desc := NewDesc(
|
desc := NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
@ -49,20 +61,58 @@ func NewCounter(opts CounterOpts) Counter {
|
|||||||
nil,
|
nil,
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
|
result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
|
||||||
result.init(result) // Init self-collection.
|
result.init(result) // Init self-collection.
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
type counter struct {
|
type counter struct {
|
||||||
value
|
// valBits contains the bits of the represented float64 value, while
|
||||||
|
// valInt stores values that are exact integers. Both have to go first
|
||||||
|
// in the struct to guarantee alignment for atomic operations.
|
||||||
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
|
valBits uint64
|
||||||
|
valInt uint64
|
||||||
|
|
||||||
|
selfCollector
|
||||||
|
desc *Desc
|
||||||
|
|
||||||
|
labelPairs []*dto.LabelPair
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) Desc() *Desc {
|
||||||
|
return c.desc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *counter) Add(v float64) {
|
func (c *counter) Add(v float64) {
|
||||||
if v < 0 {
|
if v < 0 {
|
||||||
panic(errors.New("counter cannot decrease in value"))
|
panic(errors.New("counter cannot decrease in value"))
|
||||||
}
|
}
|
||||||
c.value.Add(v)
|
ival := uint64(v)
|
||||||
|
if float64(ival) == v {
|
||||||
|
atomic.AddUint64(&c.valInt, ival)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
oldBits := atomic.LoadUint64(&c.valBits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||||
|
if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) Inc() {
|
||||||
|
atomic.AddUint64(&c.valInt, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) Write(out *dto.Metric) error {
|
||||||
|
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
|
||||||
|
ival := atomic.LoadUint64(&c.valInt)
|
||||||
|
val := fval + float64(ival)
|
||||||
|
|
||||||
|
return populateMetric(CounterValue, val, c.labelPairs, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CounterVec is a Collector that bundles a set of Counters that all share the
|
// CounterVec is a Collector that bundles a set of Counters that all share the
|
||||||
@ -70,16 +120,12 @@ func (c *counter) Add(v float64) {
|
|||||||
// if you want to count the same thing partitioned by various dimensions
|
// if you want to count the same thing partitioned by various dimensions
|
||||||
// (e.g. number of HTTP requests, partitioned by response code and
|
// (e.g. number of HTTP requests, partitioned by response code and
|
||||||
// method). Create instances with NewCounterVec.
|
// method). Create instances with NewCounterVec.
|
||||||
//
|
|
||||||
// CounterVec embeds MetricVec. See there for a full list of methods with
|
|
||||||
// detailed documentation.
|
|
||||||
type CounterVec struct {
|
type CounterVec struct {
|
||||||
*MetricVec
|
*metricVec
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
||||||
// partitioned by the given label names. At least one label name must be
|
// partitioned by the given label names.
|
||||||
// provided.
|
|
||||||
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
||||||
desc := NewDesc(
|
desc := NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
@ -88,34 +134,62 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
|||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &CounterVec{
|
return &CounterVec{
|
||||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||||
result := &counter{value: value{
|
if len(lvs) != len(desc.variableLabels) {
|
||||||
desc: desc,
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
|
||||||
valType: CounterValue,
|
}
|
||||||
labelPairs: makeLabelPairs(desc, lvs),
|
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
|
||||||
}}
|
|
||||||
result.init(result) // Init self-collection.
|
result.init(result) // Init self-collection.
|
||||||
return result
|
return result
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWithLabelValues replaces the method of the same name in
|
// GetMetricWithLabelValues returns the Counter for the given slice of label
|
||||||
// MetricVec. The difference is that this method returns a Counter and not a
|
// values (same order as the VariableLabels in Desc). If that combination of
|
||||||
// Metric so that no type conversion is required.
|
// label values is accessed for the first time, a new Counter is created.
|
||||||
func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
|
//
|
||||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
// It is possible to call this method without using the returned Counter to only
|
||||||
|
// create the new Counter but leave it at its starting value 0. See also the
|
||||||
|
// SummaryVec example.
|
||||||
|
//
|
||||||
|
// Keeping the Counter for later use is possible (and should be considered if
|
||||||
|
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||||
|
// Delete can be used to delete the Counter from the CounterVec. In that case,
|
||||||
|
// the Counter will still exist, but it will not be exported anymore, even if a
|
||||||
|
// Counter with the same label values is created later.
|
||||||
|
//
|
||||||
|
// An error is returned if the number of label values is not the same as the
|
||||||
|
// number of VariableLabels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// Note that for more than one label value, this method is prone to mistakes
|
||||||
|
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||||
|
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||||
|
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||||
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
|
// See also the GaugeVec example.
|
||||||
|
func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
|
||||||
|
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
|
||||||
if metric != nil {
|
if metric != nil {
|
||||||
return metric.(Counter), err
|
return metric.(Counter), err
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
// GetMetricWith returns the Counter for the given Labels map (the label names
|
||||||
// difference is that this method returns a Counter and not a Metric so that no
|
// must match those of the VariableLabels in Desc). If that label map is
|
||||||
// type conversion is required.
|
// accessed for the first time, a new Counter is created. Implications of
|
||||||
func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
|
// creating a Counter without using it and keeping the Counter for later use are
|
||||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
// the same as for GetMetricWithLabelValues.
|
||||||
|
//
|
||||||
|
// An error is returned if the number and names of the Labels are inconsistent
|
||||||
|
// with those of the VariableLabels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// This method is used for the same purpose as
|
||||||
|
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||||
|
// methods.
|
||||||
|
func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
|
||||||
|
metric, err := v.metricVec.getMetricWith(labels)
|
||||||
if metric != nil {
|
if metric != nil {
|
||||||
return metric.(Counter), err
|
return metric.(Counter), err
|
||||||
}
|
}
|
||||||
@ -123,18 +197,57 @@ func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||||
// error, WithLabelValues allows shortcuts like
|
// error allows shortcuts like
|
||||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||||
func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
|
func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
|
||||||
return m.MetricVec.WithLabelValues(lvs...).(Counter)
|
c, err := v.GetMetricWithLabelValues(lvs...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||||
// returned an error. By not returning an error, With allows shortcuts like
|
// returned an error. Not returning an error allows shortcuts like
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
||||||
func (m *CounterVec) With(labels Labels) Counter {
|
func (v *CounterVec) With(labels Labels) Counter {
|
||||||
return m.MetricVec.With(labels).(Counter)
|
c, err := v.GetMetricWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurryWith returns a vector curried with the provided labels, i.e. the
|
||||||
|
// returned vector has those labels pre-set for all labeled operations performed
|
||||||
|
// on it. The cardinality of the curried vector is reduced accordingly. The
|
||||||
|
// order of the remaining labels stays the same (just with the curried labels
|
||||||
|
// taken out of the sequence – which is relevant for the
|
||||||
|
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
|
||||||
|
// vector, but only with labels not yet used for currying before.
|
||||||
|
//
|
||||||
|
// The metrics contained in the CounterVec are shared between the curried and
|
||||||
|
// uncurried vectors. They are just accessed differently. Curried and uncurried
|
||||||
|
// vectors behave identically in terms of collection. Only one must be
|
||||||
|
// registered with a given registry (usually the uncurried version). The Reset
|
||||||
|
// method deletes all metrics, even if called on a curried vector.
|
||||||
|
func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
|
||||||
|
vec, err := v.curryWith(labels)
|
||||||
|
if vec != nil {
|
||||||
|
return &CounterVec{vec}, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustCurryWith works as CurryWith but panics where CurryWith would have
|
||||||
|
// returned an error.
|
||||||
|
func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
|
||||||
|
vec, err := v.CurryWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return vec
|
||||||
}
|
}
|
||||||
|
|
||||||
// CounterFunc is a Counter whose value is determined at collect time by calling a
|
// CounterFunc is a Counter whose value is determined at collect time by calling a
|
||||||
|
36
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
36
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
@ -25,19 +25,6 @@ import (
|
|||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
// reservedLabelPrefix is a prefix which is not legal in user-supplied
|
|
||||||
// label names.
|
|
||||||
const reservedLabelPrefix = "__"
|
|
||||||
|
|
||||||
// Labels represents a collection of label name -> value mappings. This type is
|
|
||||||
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
|
|
||||||
// metric vector Collectors, e.g.:
|
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
|
||||||
//
|
|
||||||
// The other use-case is the specification of constant label pairs in Opts or to
|
|
||||||
// create a Desc.
|
|
||||||
type Labels map[string]string
|
|
||||||
|
|
||||||
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
||||||
// the immutable meta-data of a Metric. The normal Metric implementations
|
// the immutable meta-data of a Metric. The normal Metric implementations
|
||||||
// included in this package manage their Desc under the hood. Users only have to
|
// included in this package manage their Desc under the hood. Users only have to
|
||||||
@ -80,24 +67,19 @@ type Desc struct {
|
|||||||
|
|
||||||
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
|
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
|
||||||
// and will be reported on registration time. variableLabels and constLabels can
|
// and will be reported on registration time. variableLabels and constLabels can
|
||||||
// be nil if no such labels should be set. fqName and help must not be empty.
|
// be nil if no such labels should be set. fqName must not be empty.
|
||||||
//
|
//
|
||||||
// variableLabels only contain the label names. Their label values are variable
|
// variableLabels only contain the label names. Their label values are variable
|
||||||
// and therefore not part of the Desc. (They are managed within the Metric.)
|
// and therefore not part of the Desc. (They are managed within the Metric.)
|
||||||
//
|
//
|
||||||
// For constLabels, the label values are constant. Therefore, they are fully
|
// For constLabels, the label values are constant. Therefore, they are fully
|
||||||
// specified in the Desc. See the Opts documentation for the implications of
|
// specified in the Desc. See the Collector example for a usage pattern.
|
||||||
// constant labels.
|
|
||||||
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
|
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
|
||||||
d := &Desc{
|
d := &Desc{
|
||||||
fqName: fqName,
|
fqName: fqName,
|
||||||
help: help,
|
help: help,
|
||||||
variableLabels: variableLabels,
|
variableLabels: variableLabels,
|
||||||
}
|
}
|
||||||
if help == "" {
|
|
||||||
d.err = errors.New("empty help string")
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
if !model.IsValidMetricName(model.LabelValue(fqName)) {
|
if !model.IsValidMetricName(model.LabelValue(fqName)) {
|
||||||
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
||||||
return d
|
return d
|
||||||
@ -122,6 +104,12 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||||||
for _, labelName := range labelNames {
|
for _, labelName := range labelNames {
|
||||||
labelValues = append(labelValues, constLabels[labelName])
|
labelValues = append(labelValues, constLabels[labelName])
|
||||||
}
|
}
|
||||||
|
// Validate the const label values. They can't have a wrong cardinality, so
|
||||||
|
// use in len(labelValues) as expectedNumberOfValues.
|
||||||
|
if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
|
||||||
|
d.err = err
|
||||||
|
return d
|
||||||
|
}
|
||||||
// Now add the variable label names, but prefix them with something that
|
// Now add the variable label names, but prefix them with something that
|
||||||
// cannot be in a regular label name. That prevents matching the label
|
// cannot be in a regular label name. That prevents matching the label
|
||||||
// dimension with a different mix between preset and variable labels.
|
// dimension with a different mix between preset and variable labels.
|
||||||
@ -137,6 +125,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||||||
d.err = errors.New("duplicate label names")
|
d.err = errors.New("duplicate label names")
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
vh := hashNew()
|
vh := hashNew()
|
||||||
for _, val := range labelValues {
|
for _, val := range labelValues {
|
||||||
vh = hashAdd(vh, val)
|
vh = hashAdd(vh, val)
|
||||||
@ -163,7 +152,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||||||
Value: proto.String(v),
|
Value: proto.String(v),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
sort.Sort(LabelPairSorter(d.constLabelPairs))
|
sort.Sort(labelPairSorter(d.constLabelPairs))
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,8 +182,3 @@ func (d *Desc) String() string {
|
|||||||
d.variableLabels,
|
d.variableLabels,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkLabelName(l string) bool {
|
|
||||||
return model.LabelName(l).IsValid() &&
|
|
||||||
!strings.HasPrefix(l, reservedLabelPrefix)
|
|
||||||
}
|
|
||||||
|
40
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
40
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
@ -11,10 +11,12 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// Package prometheus provides metrics primitives to instrument code for
|
// Package prometheus is the core instrumentation package. It provides metrics
|
||||||
// monitoring. It also offers a registry for metrics. Sub-packages allow to
|
// primitives to instrument code for monitoring. It also offers a registry for
|
||||||
// expose the registered metrics via HTTP (package promhttp) or push them to a
|
// metrics. Sub-packages allow to expose the registered metrics via HTTP
|
||||||
// Pushgateway (package push).
|
// (package promhttp) or push them to a Pushgateway (package push). There is
|
||||||
|
// also a sub-package promauto, which provides metrics constructors with
|
||||||
|
// automatic registration.
|
||||||
//
|
//
|
||||||
// All exported functions and methods are safe to be used concurrently unless
|
// All exported functions and methods are safe to be used concurrently unless
|
||||||
// specified otherwise.
|
// specified otherwise.
|
||||||
@ -26,6 +28,7 @@
|
|||||||
// package main
|
// package main
|
||||||
//
|
//
|
||||||
// import (
|
// import (
|
||||||
|
// "log"
|
||||||
// "net/http"
|
// "net/http"
|
||||||
//
|
//
|
||||||
// "github.com/prometheus/client_golang/prometheus"
|
// "github.com/prometheus/client_golang/prometheus"
|
||||||
@ -59,7 +62,7 @@
|
|||||||
// // The Handler function provides a default handler to expose metrics
|
// // The Handler function provides a default handler to expose metrics
|
||||||
// // via an HTTP server. "/metrics" is the usual endpoint for that.
|
// // via an HTTP server. "/metrics" is the usual endpoint for that.
|
||||||
// http.Handle("/metrics", promhttp.Handler())
|
// http.Handle("/metrics", promhttp.Handler())
|
||||||
// log.Fatal(http.ListenAndServe(":8080", nil))
|
// log.Fatal(http.ListenAndServe(":8080", nil))
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
//
|
//
|
||||||
@ -71,7 +74,10 @@
|
|||||||
// The number of exported identifiers in this package might appear a bit
|
// The number of exported identifiers in this package might appear a bit
|
||||||
// overwhelming. However, in addition to the basic plumbing shown in the example
|
// overwhelming. However, in addition to the basic plumbing shown in the example
|
||||||
// above, you only need to understand the different metric types and their
|
// above, you only need to understand the different metric types and their
|
||||||
// vector versions for basic usage.
|
// vector versions for basic usage. Furthermore, if you are not concerned with
|
||||||
|
// fine-grained control of when and how to register metrics with the registry,
|
||||||
|
// have a look at the promauto package, which will effectively allow you to
|
||||||
|
// ignore registration altogether in simple cases.
|
||||||
//
|
//
|
||||||
// Above, you have already touched the Counter and the Gauge. There are two more
|
// Above, you have already touched the Counter and the Gauge. There are two more
|
||||||
// advanced metric types: the Summary and Histogram. A more thorough description
|
// advanced metric types: the Summary and Histogram. A more thorough description
|
||||||
@ -115,7 +121,17 @@
|
|||||||
// NewConstSummary (and their respective Must… versions). That will happen in
|
// NewConstSummary (and their respective Must… versions). That will happen in
|
||||||
// the Collect method. The Describe method has to return separate Desc
|
// the Collect method. The Describe method has to return separate Desc
|
||||||
// instances, representative of the “throw-away” metrics to be created later.
|
// instances, representative of the “throw-away” metrics to be created later.
|
||||||
// NewDesc comes in handy to create those Desc instances.
|
// NewDesc comes in handy to create those Desc instances. Alternatively, you
|
||||||
|
// could return no Desc at all, which will marke the Collector “unchecked”. No
|
||||||
|
// checks are porformed at registration time, but metric consistency will still
|
||||||
|
// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
|
||||||
|
// errors. Thus, with unchecked Collectors, the responsibility to not collect
|
||||||
|
// metrics that lead to inconsistencies in the total scrape result lies with the
|
||||||
|
// implementer of the Collector. While this is not a desirable state, it is
|
||||||
|
// sometimes necessary. The typical use case is a situatios where the exact
|
||||||
|
// metrics to be returned by a Collector cannot be predicted at registration
|
||||||
|
// time, but the implementer has sufficient knowledge of the whole system to
|
||||||
|
// guarantee metric consistency.
|
||||||
//
|
//
|
||||||
// The Collector example illustrates the use case. You can also look at the
|
// The Collector example illustrates the use case. You can also look at the
|
||||||
// source code of the processCollector (mirroring process metrics), the
|
// source code of the processCollector (mirroring process metrics), the
|
||||||
@ -144,7 +160,7 @@
|
|||||||
// registry.
|
// registry.
|
||||||
//
|
//
|
||||||
// So far, everything we did operated on the so-called default registry, as it
|
// So far, everything we did operated on the so-called default registry, as it
|
||||||
// can be found in the global DefaultRegistry variable. With NewRegistry, you
|
// can be found in the global DefaultRegisterer variable. With NewRegistry, you
|
||||||
// can create a custom registry, or you can even implement the Registerer or
|
// can create a custom registry, or you can even implement the Registerer or
|
||||||
// Gatherer interfaces yourself. The methods Register and Unregister work in the
|
// Gatherer interfaces yourself. The methods Register and Unregister work in the
|
||||||
// same way on a custom registry as the global functions Register and Unregister
|
// same way on a custom registry as the global functions Register and Unregister
|
||||||
@ -152,11 +168,11 @@
|
|||||||
//
|
//
|
||||||
// There are a number of uses for custom registries: You can use registries with
|
// There are a number of uses for custom registries: You can use registries with
|
||||||
// special properties, see NewPedanticRegistry. You can avoid global state, as
|
// special properties, see NewPedanticRegistry. You can avoid global state, as
|
||||||
// it is imposed by the DefaultRegistry. You can use multiple registries at the
|
// it is imposed by the DefaultRegisterer. You can use multiple registries at
|
||||||
// same time to expose different metrics in different ways. You can use separate
|
// the same time to expose different metrics in different ways. You can use
|
||||||
// registries for testing purposes.
|
// separate registries for testing purposes.
|
||||||
//
|
//
|
||||||
// Also note that the DefaultRegistry comes registered with a Collector for Go
|
// Also note that the DefaultRegisterer comes registered with a Collector for Go
|
||||||
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
|
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
|
||||||
// NewProcessCollector). With a custom registry, you are in control and decide
|
// NewProcessCollector). With a custom registry, you are in control and decide
|
||||||
// yourself about the Collectors to register.
|
// yourself about the Collectors to register.
|
||||||
|
13
vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
13
vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
@ -1,3 +1,16 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
// Inline and byte-free variant of hash/fnv's fnv64a.
|
// Inline and byte-free variant of hash/fnv's fnv64a.
|
||||||
|
191
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
191
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
@ -13,6 +13,14 @@
|
|||||||
|
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
)
|
||||||
|
|
||||||
// Gauge is a Metric that represents a single numerical value that can
|
// Gauge is a Metric that represents a single numerical value that can
|
||||||
// arbitrarily go up and down.
|
// arbitrarily go up and down.
|
||||||
//
|
//
|
||||||
@ -48,13 +56,74 @@ type Gauge interface {
|
|||||||
type GaugeOpts Opts
|
type GaugeOpts Opts
|
||||||
|
|
||||||
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
||||||
|
//
|
||||||
|
// The returned implementation is optimized for a fast Set method. If you have a
|
||||||
|
// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
|
||||||
|
// the former. For example, the Inc method of the returned Gauge is slower than
|
||||||
|
// the Inc method of a Counter returned by NewCounter. This matches the typical
|
||||||
|
// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
|
||||||
|
// the latter Inc-heavy.
|
||||||
func NewGauge(opts GaugeOpts) Gauge {
|
func NewGauge(opts GaugeOpts) Gauge {
|
||||||
return newValue(NewDesc(
|
desc := NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
opts.Help,
|
opts.Help,
|
||||||
nil,
|
nil,
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
), GaugeValue, 0)
|
)
|
||||||
|
result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
|
||||||
|
result.init(result) // Init self-collection.
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
type gauge struct {
|
||||||
|
// valBits contains the bits of the represented float64 value. It has
|
||||||
|
// to go first in the struct to guarantee alignment for atomic
|
||||||
|
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
|
valBits uint64
|
||||||
|
|
||||||
|
selfCollector
|
||||||
|
|
||||||
|
desc *Desc
|
||||||
|
labelPairs []*dto.LabelPair
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Desc() *Desc {
|
||||||
|
return g.desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Set(val float64) {
|
||||||
|
atomic.StoreUint64(&g.valBits, math.Float64bits(val))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) SetToCurrentTime() {
|
||||||
|
g.Set(float64(time.Now().UnixNano()) / 1e9)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Inc() {
|
||||||
|
g.Add(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Dec() {
|
||||||
|
g.Add(-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Add(val float64) {
|
||||||
|
for {
|
||||||
|
oldBits := atomic.LoadUint64(&g.valBits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
|
||||||
|
if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Sub(val float64) {
|
||||||
|
g.Add(val * -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Write(out *dto.Metric) error {
|
||||||
|
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
|
||||||
|
return populateMetric(GaugeValue, val, g.labelPairs, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
|
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
|
||||||
@ -63,12 +132,11 @@ func NewGauge(opts GaugeOpts) Gauge {
|
|||||||
// (e.g. number of operations queued, partitioned by user and operation
|
// (e.g. number of operations queued, partitioned by user and operation
|
||||||
// type). Create instances with NewGaugeVec.
|
// type). Create instances with NewGaugeVec.
|
||||||
type GaugeVec struct {
|
type GaugeVec struct {
|
||||||
*MetricVec
|
*metricVec
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
||||||
// partitioned by the given label names. At least one label name must be
|
// partitioned by the given label names.
|
||||||
// provided.
|
|
||||||
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
||||||
desc := NewDesc(
|
desc := NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
@ -77,28 +145,62 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
|||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &GaugeVec{
|
return &GaugeVec{
|
||||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||||
return newValue(desc, GaugeValue, 0, lvs...)
|
if len(lvs) != len(desc.variableLabels) {
|
||||||
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
|
||||||
|
}
|
||||||
|
result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
|
||||||
|
result.init(result) // Init self-collection.
|
||||||
|
return result
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWithLabelValues replaces the method of the same name in
|
// GetMetricWithLabelValues returns the Gauge for the given slice of label
|
||||||
// MetricVec. The difference is that this method returns a Gauge and not a
|
// values (same order as the VariableLabels in Desc). If that combination of
|
||||||
// Metric so that no type conversion is required.
|
// label values is accessed for the first time, a new Gauge is created.
|
||||||
func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
|
//
|
||||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
// It is possible to call this method without using the returned Gauge to only
|
||||||
|
// create the new Gauge but leave it at its starting value 0. See also the
|
||||||
|
// SummaryVec example.
|
||||||
|
//
|
||||||
|
// Keeping the Gauge for later use is possible (and should be considered if
|
||||||
|
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||||
|
// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
|
||||||
|
// Gauge will still exist, but it will not be exported anymore, even if a
|
||||||
|
// Gauge with the same label values is created later. See also the CounterVec
|
||||||
|
// example.
|
||||||
|
//
|
||||||
|
// An error is returned if the number of label values is not the same as the
|
||||||
|
// number of VariableLabels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// Note that for more than one label value, this method is prone to mistakes
|
||||||
|
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||||
|
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||||
|
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||||
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
|
func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
|
||||||
|
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
|
||||||
if metric != nil {
|
if metric != nil {
|
||||||
return metric.(Gauge), err
|
return metric.(Gauge), err
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
// GetMetricWith returns the Gauge for the given Labels map (the label names
|
||||||
// difference is that this method returns a Gauge and not a Metric so that no
|
// must match those of the VariableLabels in Desc). If that label map is
|
||||||
// type conversion is required.
|
// accessed for the first time, a new Gauge is created. Implications of
|
||||||
func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
|
// creating a Gauge without using it and keeping the Gauge for later use are
|
||||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
// the same as for GetMetricWithLabelValues.
|
||||||
|
//
|
||||||
|
// An error is returned if the number and names of the Labels are inconsistent
|
||||||
|
// with those of the VariableLabels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// This method is used for the same purpose as
|
||||||
|
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||||
|
// methods.
|
||||||
|
func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
|
||||||
|
metric, err := v.metricVec.getMetricWith(labels)
|
||||||
if metric != nil {
|
if metric != nil {
|
||||||
return metric.(Gauge), err
|
return metric.(Gauge), err
|
||||||
}
|
}
|
||||||
@ -106,18 +208,57 @@ func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||||
// error, WithLabelValues allows shortcuts like
|
// error allows shortcuts like
|
||||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||||
func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
||||||
return m.MetricVec.WithLabelValues(lvs...).(Gauge)
|
g, err := v.GetMetricWithLabelValues(lvs...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return g
|
||||||
}
|
}
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||||
// returned an error. By not returning an error, With allows shortcuts like
|
// returned an error. Not returning an error allows shortcuts like
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
||||||
func (m *GaugeVec) With(labels Labels) Gauge {
|
func (v *GaugeVec) With(labels Labels) Gauge {
|
||||||
return m.MetricVec.With(labels).(Gauge)
|
g, err := v.GetMetricWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return g
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurryWith returns a vector curried with the provided labels, i.e. the
|
||||||
|
// returned vector has those labels pre-set for all labeled operations performed
|
||||||
|
// on it. The cardinality of the curried vector is reduced accordingly. The
|
||||||
|
// order of the remaining labels stays the same (just with the curried labels
|
||||||
|
// taken out of the sequence – which is relevant for the
|
||||||
|
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
|
||||||
|
// vector, but only with labels not yet used for currying before.
|
||||||
|
//
|
||||||
|
// The metrics contained in the GaugeVec are shared between the curried and
|
||||||
|
// uncurried vectors. They are just accessed differently. Curried and uncurried
|
||||||
|
// vectors behave identically in terms of collection. Only one must be
|
||||||
|
// registered with a given registry (usually the uncurried version). The Reset
|
||||||
|
// method deletes all metrics, even if called on a curried vector.
|
||||||
|
func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
|
||||||
|
vec, err := v.curryWith(labels)
|
||||||
|
if vec != nil {
|
||||||
|
return &GaugeVec{vec}, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustCurryWith works as CurryWith but panics where CurryWith would have
|
||||||
|
// returned an error.
|
||||||
|
func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
|
||||||
|
vec, err := v.CurryWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return vec
|
||||||
}
|
}
|
||||||
|
|
||||||
// GaugeFunc is a Gauge whose value is determined at collect time by calling a
|
// GaugeFunc is a Gauge whose value is determined at collect time by calling a
|
||||||
|
33
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
33
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
@ -1,3 +1,16 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -11,13 +24,18 @@ type goCollector struct {
|
|||||||
goroutinesDesc *Desc
|
goroutinesDesc *Desc
|
||||||
threadsDesc *Desc
|
threadsDesc *Desc
|
||||||
gcDesc *Desc
|
gcDesc *Desc
|
||||||
|
goInfoDesc *Desc
|
||||||
|
|
||||||
// metrics to describe and collect
|
// metrics to describe and collect
|
||||||
metrics memStatsMetrics
|
metrics memStatsMetrics
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGoCollector returns a collector which exports metrics about the current
|
// NewGoCollector returns a collector which exports metrics about the current Go
|
||||||
// go process.
|
// process. This includes memory stats. To collect those, runtime.ReadMemStats
|
||||||
|
// is called. This causes a stop-the-world, which is very short with Go1.9+
|
||||||
|
// (~25µs). However, with older Go versions, the stop-the-world duration depends
|
||||||
|
// on the heap size and can be quite significant (~1.7 ms/GiB as per
|
||||||
|
// https://go-review.googlesource.com/c/go/+/34937).
|
||||||
func NewGoCollector() Collector {
|
func NewGoCollector() Collector {
|
||||||
return &goCollector{
|
return &goCollector{
|
||||||
goroutinesDesc: NewDesc(
|
goroutinesDesc: NewDesc(
|
||||||
@ -26,12 +44,16 @@ func NewGoCollector() Collector {
|
|||||||
nil, nil),
|
nil, nil),
|
||||||
threadsDesc: NewDesc(
|
threadsDesc: NewDesc(
|
||||||
"go_threads",
|
"go_threads",
|
||||||
"Number of OS threads created",
|
"Number of OS threads created.",
|
||||||
nil, nil),
|
nil, nil),
|
||||||
gcDesc: NewDesc(
|
gcDesc: NewDesc(
|
||||||
"go_gc_duration_seconds",
|
"go_gc_duration_seconds",
|
||||||
"A summary of the GC invocation durations.",
|
"A summary of the GC invocation durations.",
|
||||||
nil, nil),
|
nil, nil),
|
||||||
|
goInfoDesc: NewDesc(
|
||||||
|
"go_info",
|
||||||
|
"Information about the Go environment.",
|
||||||
|
nil, Labels{"version": runtime.Version()}),
|
||||||
metrics: memStatsMetrics{
|
metrics: memStatsMetrics{
|
||||||
{
|
{
|
||||||
desc: NewDesc(
|
desc: NewDesc(
|
||||||
@ -239,6 +261,7 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
|
|||||||
ch <- c.goroutinesDesc
|
ch <- c.goroutinesDesc
|
||||||
ch <- c.threadsDesc
|
ch <- c.threadsDesc
|
||||||
ch <- c.gcDesc
|
ch <- c.gcDesc
|
||||||
|
ch <- c.goInfoDesc
|
||||||
for _, i := range c.metrics {
|
for _, i := range c.metrics {
|
||||||
ch <- i.desc
|
ch <- i.desc
|
||||||
}
|
}
|
||||||
@ -259,7 +282,9 @@ func (c *goCollector) Collect(ch chan<- Metric) {
|
|||||||
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
|
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
|
||||||
}
|
}
|
||||||
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
||||||
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
|
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
|
||||||
|
|
||||||
|
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
||||||
|
|
||||||
ms := &runtime.MemStats{}
|
ms := &runtime.MemStats{}
|
||||||
runtime.ReadMemStats(ms)
|
runtime.ReadMemStats(ms)
|
||||||
|
304
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
304
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
@ -16,7 +16,9 @@ package prometheus
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
@ -108,8 +110,9 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// HistogramOpts bundles the options for creating a Histogram metric. It is
|
// HistogramOpts bundles the options for creating a Histogram metric. It is
|
||||||
// mandatory to set Name and Help to a non-empty string. All other fields are
|
// mandatory to set Name to a non-empty string. All other fields are optional
|
||||||
// optional and can safely be left at their zero value.
|
// and can safely be left at their zero value, although it is strongly
|
||||||
|
// encouraged to set a Help string.
|
||||||
type HistogramOpts struct {
|
type HistogramOpts struct {
|
||||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||||
// name of the Histogram (created by joining these components with
|
// name of the Histogram (created by joining these components with
|
||||||
@ -120,29 +123,22 @@ type HistogramOpts struct {
|
|||||||
Subsystem string
|
Subsystem string
|
||||||
Name string
|
Name string
|
||||||
|
|
||||||
// Help provides information about this Histogram. Mandatory!
|
// Help provides information about this Histogram.
|
||||||
//
|
//
|
||||||
// Metrics with the same fully-qualified name must have the same Help
|
// Metrics with the same fully-qualified name must have the same Help
|
||||||
// string.
|
// string.
|
||||||
Help string
|
Help string
|
||||||
|
|
||||||
// ConstLabels are used to attach fixed labels to this
|
// ConstLabels are used to attach fixed labels to this metric. Metrics
|
||||||
// Histogram. Histograms with the same fully-qualified name must have the
|
// with the same fully-qualified name must have the same label names in
|
||||||
// same label names in their ConstLabels.
|
// their ConstLabels.
|
||||||
//
|
//
|
||||||
// Note that in most cases, labels have a value that varies during the
|
// ConstLabels are only used rarely. In particular, do not use them to
|
||||||
// lifetime of a process. Those labels are usually managed with a
|
// attach the same labels to all your metrics. Those use cases are
|
||||||
// HistogramVec. ConstLabels serve only special purposes. One is for the
|
// better covered by target labels set by the scraping Prometheus
|
||||||
// special case where the value of a label does not change during the
|
// server, or by one specific metric (e.g. a build_info or a
|
||||||
// lifetime of a process, e.g. if the revision of the running binary is
|
// machine_role metric). See also
|
||||||
// put into a label. Another, more advanced purpose is if more than one
|
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
|
||||||
// Collector needs to collect Histograms with the same fully-qualified
|
|
||||||
// name. In that case, those Summaries must differ in the values of
|
|
||||||
// their ConstLabels. See the Collector examples.
|
|
||||||
//
|
|
||||||
// If the value of a label never changes (not even between binaries),
|
|
||||||
// that label most likely should not be a label at all (but part of the
|
|
||||||
// metric name).
|
|
||||||
ConstLabels Labels
|
ConstLabels Labels
|
||||||
|
|
||||||
// Buckets defines the buckets into which observations are counted. Each
|
// Buckets defines the buckets into which observations are counted. Each
|
||||||
@ -169,7 +165,7 @@ func NewHistogram(opts HistogramOpts) Histogram {
|
|||||||
|
|
||||||
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
|
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
if len(desc.variableLabels) != len(labelValues) {
|
||||||
panic(errInconsistentCardinality)
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, n := range desc.variableLabels {
|
for _, n := range desc.variableLabels {
|
||||||
@ -191,6 +187,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
|||||||
desc: desc,
|
desc: desc,
|
||||||
upperBounds: opts.Buckets,
|
upperBounds: opts.Buckets,
|
||||||
labelPairs: makeLabelPairs(desc, labelValues),
|
labelPairs: makeLabelPairs(desc, labelValues),
|
||||||
|
counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
|
||||||
}
|
}
|
||||||
for i, upperBound := range h.upperBounds {
|
for i, upperBound := range h.upperBounds {
|
||||||
if i < len(h.upperBounds)-1 {
|
if i < len(h.upperBounds)-1 {
|
||||||
@ -207,28 +204,53 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Finally we know the final length of h.upperBounds and can make counts.
|
// Finally we know the final length of h.upperBounds and can make counts
|
||||||
h.counts = make([]uint64, len(h.upperBounds))
|
// for both states:
|
||||||
|
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
|
||||||
|
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
|
||||||
|
|
||||||
h.init(h) // Init self-collection.
|
h.init(h) // Init self-collection.
|
||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
type histogram struct {
|
type histogramCounts struct {
|
||||||
// sumBits contains the bits of the float64 representing the sum of all
|
// sumBits contains the bits of the float64 representing the sum of all
|
||||||
// observations. sumBits and count have to go first in the struct to
|
// observations. sumBits and count have to go first in the struct to
|
||||||
// guarantee alignment for atomic operations.
|
// guarantee alignment for atomic operations.
|
||||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
sumBits uint64
|
sumBits uint64
|
||||||
count uint64
|
count uint64
|
||||||
|
buckets []uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type histogram struct {
|
||||||
|
// countAndHotIdx is a complicated one. For lock-free yet atomic
|
||||||
|
// observations, we need to save the total count of observations again,
|
||||||
|
// combined with the index of the currently-hot counts struct, so that
|
||||||
|
// we can perform the operation on both values atomically. The least
|
||||||
|
// significant bit defines the hot counts struct. The remaining 63 bits
|
||||||
|
// represent the total count of observations. This happens under the
|
||||||
|
// assumption that the 63bit count will never overflow. Rationale: An
|
||||||
|
// observations takes about 30ns. Let's assume it could happen in
|
||||||
|
// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
|
||||||
|
// which is about 3000 years.
|
||||||
|
//
|
||||||
|
// This has to be first in the struct for 64bit alignment. See
|
||||||
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
|
countAndHotIdx uint64
|
||||||
|
|
||||||
selfCollector
|
selfCollector
|
||||||
// Note that there is no mutex required.
|
desc *Desc
|
||||||
|
writeMtx sync.Mutex // Only used in the Write method.
|
||||||
desc *Desc
|
|
||||||
|
|
||||||
upperBounds []float64
|
upperBounds []float64
|
||||||
counts []uint64
|
|
||||||
|
// Two counts, one is "hot" for lock-free observations, the other is
|
||||||
|
// "cold" for writing out a dto.Metric. It has to be an array of
|
||||||
|
// pointers to guarantee 64bit alignment of the histogramCounts, see
|
||||||
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
|
||||||
|
counts [2]*histogramCounts
|
||||||
|
hotIdx int // Index of currently-hot counts. Only used within Write.
|
||||||
|
|
||||||
labelPairs []*dto.LabelPair
|
labelPairs []*dto.LabelPair
|
||||||
}
|
}
|
||||||
@ -248,36 +270,113 @@ func (h *histogram) Observe(v float64) {
|
|||||||
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
||||||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
||||||
i := sort.SearchFloat64s(h.upperBounds, v)
|
i := sort.SearchFloat64s(h.upperBounds, v)
|
||||||
if i < len(h.counts) {
|
|
||||||
atomic.AddUint64(&h.counts[i], 1)
|
// We increment h.countAndHotIdx by 2 so that the counter in the upper
|
||||||
|
// 63 bits gets incremented by 1. At the same time, we get the new value
|
||||||
|
// back, which we can use to find the currently-hot counts.
|
||||||
|
n := atomic.AddUint64(&h.countAndHotIdx, 2)
|
||||||
|
hotCounts := h.counts[n%2]
|
||||||
|
|
||||||
|
if i < len(h.upperBounds) {
|
||||||
|
atomic.AddUint64(&hotCounts.buckets[i], 1)
|
||||||
}
|
}
|
||||||
atomic.AddUint64(&h.count, 1)
|
|
||||||
for {
|
for {
|
||||||
oldBits := atomic.LoadUint64(&h.sumBits)
|
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||||
if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
|
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Increment count last as we take it as a signal that the observation
|
||||||
|
// is complete.
|
||||||
|
atomic.AddUint64(&hotCounts.count, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *histogram) Write(out *dto.Metric) error {
|
func (h *histogram) Write(out *dto.Metric) error {
|
||||||
his := &dto.Histogram{}
|
var (
|
||||||
buckets := make([]*dto.Bucket, len(h.upperBounds))
|
his = &dto.Histogram{}
|
||||||
|
buckets = make([]*dto.Bucket, len(h.upperBounds))
|
||||||
|
hotCounts, coldCounts *histogramCounts
|
||||||
|
count uint64
|
||||||
|
)
|
||||||
|
|
||||||
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
|
// For simplicity, we mutex the rest of this method. It is not in the
|
||||||
his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
|
// hot path, i.e. Observe is called much more often than Write. The
|
||||||
var count uint64
|
// complication of making Write lock-free isn't worth it.
|
||||||
|
h.writeMtx.Lock()
|
||||||
|
defer h.writeMtx.Unlock()
|
||||||
|
|
||||||
|
// This is a bit arcane, which is why the following spells out this if
|
||||||
|
// clause in English:
|
||||||
|
//
|
||||||
|
// If the currently-hot counts struct is #0, we atomically increment
|
||||||
|
// h.countAndHotIdx by 1 so that from now on Observe will use the counts
|
||||||
|
// struct #1. Furthermore, the atomic increment gives us the new value,
|
||||||
|
// which, in its most significant 63 bits, tells us the count of
|
||||||
|
// observations done so far up to and including currently ongoing
|
||||||
|
// observations still using the counts struct just changed from hot to
|
||||||
|
// cold. To have a normal uint64 for the count, we bitshift by 1 and
|
||||||
|
// save the result in count. We also set h.hotIdx to 1 for the next
|
||||||
|
// Write call, and we will refer to counts #1 as hotCounts and to counts
|
||||||
|
// #0 as coldCounts.
|
||||||
|
//
|
||||||
|
// If the currently-hot counts struct is #1, we do the corresponding
|
||||||
|
// things the other way round. We have to _decrement_ h.countAndHotIdx
|
||||||
|
// (which is a bit arcane in itself, as we have to express -1 with an
|
||||||
|
// unsigned int...).
|
||||||
|
if h.hotIdx == 0 {
|
||||||
|
count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
|
||||||
|
h.hotIdx = 1
|
||||||
|
hotCounts = h.counts[1]
|
||||||
|
coldCounts = h.counts[0]
|
||||||
|
} else {
|
||||||
|
count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
|
||||||
|
h.hotIdx = 0
|
||||||
|
hotCounts = h.counts[0]
|
||||||
|
coldCounts = h.counts[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we have to wait for the now-declared-cold counts to actually cool
|
||||||
|
// down, i.e. wait for all observations still using it to finish. That's
|
||||||
|
// the case once the count in the cold counts struct is the same as the
|
||||||
|
// one atomically retrieved from the upper 63bits of h.countAndHotIdx.
|
||||||
|
for {
|
||||||
|
if count == atomic.LoadUint64(&coldCounts.count) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
runtime.Gosched() // Let observations get work done.
|
||||||
|
}
|
||||||
|
|
||||||
|
his.SampleCount = proto.Uint64(count)
|
||||||
|
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits)))
|
||||||
|
var cumCount uint64
|
||||||
for i, upperBound := range h.upperBounds {
|
for i, upperBound := range h.upperBounds {
|
||||||
count += atomic.LoadUint64(&h.counts[i])
|
cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
|
||||||
buckets[i] = &dto.Bucket{
|
buckets[i] = &dto.Bucket{
|
||||||
CumulativeCount: proto.Uint64(count),
|
CumulativeCount: proto.Uint64(cumCount),
|
||||||
UpperBound: proto.Float64(upperBound),
|
UpperBound: proto.Float64(upperBound),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
his.Bucket = buckets
|
his.Bucket = buckets
|
||||||
out.Histogram = his
|
out.Histogram = his
|
||||||
out.Label = h.labelPairs
|
out.Label = h.labelPairs
|
||||||
|
|
||||||
|
// Finally add all the cold counts to the new hot counts and reset the cold counts.
|
||||||
|
atomic.AddUint64(&hotCounts.count, count)
|
||||||
|
atomic.StoreUint64(&coldCounts.count, 0)
|
||||||
|
for {
|
||||||
|
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
|
||||||
|
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||||
|
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := range h.upperBounds {
|
||||||
|
atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
|
||||||
|
atomic.StoreUint64(&coldCounts.buckets[i], 0)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -287,12 +386,11 @@ func (h *histogram) Write(out *dto.Metric) error {
|
|||||||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
||||||
// instances with NewHistogramVec.
|
// instances with NewHistogramVec.
|
||||||
type HistogramVec struct {
|
type HistogramVec struct {
|
||||||
*MetricVec
|
*metricVec
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
||||||
// partitioned by the given label names. At least one label name must be
|
// partitioned by the given label names.
|
||||||
// provided.
|
|
||||||
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
||||||
desc := NewDesc(
|
desc := NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
@ -301,47 +399,116 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
|||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &HistogramVec{
|
return &HistogramVec{
|
||||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||||
return newHistogram(desc, opts, lvs...)
|
return newHistogram(desc, opts, lvs...)
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWithLabelValues replaces the method of the same name in
|
// GetMetricWithLabelValues returns the Histogram for the given slice of label
|
||||||
// MetricVec. The difference is that this method returns a Histogram and not a
|
// values (same order as the VariableLabels in Desc). If that combination of
|
||||||
// Metric so that no type conversion is required.
|
// label values is accessed for the first time, a new Histogram is created.
|
||||||
func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
|
//
|
||||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
// It is possible to call this method without using the returned Histogram to only
|
||||||
|
// create the new Histogram but leave it at its starting value, a Histogram without
|
||||||
|
// any observations.
|
||||||
|
//
|
||||||
|
// Keeping the Histogram for later use is possible (and should be considered if
|
||||||
|
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||||
|
// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
|
||||||
|
// Histogram will still exist, but it will not be exported anymore, even if a
|
||||||
|
// Histogram with the same label values is created later. See also the CounterVec
|
||||||
|
// example.
|
||||||
|
//
|
||||||
|
// An error is returned if the number of label values is not the same as the
|
||||||
|
// number of VariableLabels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// Note that for more than one label value, this method is prone to mistakes
|
||||||
|
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||||
|
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||||
|
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||||
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
|
// See also the GaugeVec example.
|
||||||
|
func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
|
||||||
|
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
|
||||||
if metric != nil {
|
if metric != nil {
|
||||||
return metric.(Histogram), err
|
return metric.(Observer), err
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
// GetMetricWith returns the Histogram for the given Labels map (the label names
|
||||||
// difference is that this method returns a Histogram and not a Metric so that no
|
// must match those of the VariableLabels in Desc). If that label map is
|
||||||
// type conversion is required.
|
// accessed for the first time, a new Histogram is created. Implications of
|
||||||
func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
|
// creating a Histogram without using it and keeping the Histogram for later use
|
||||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
// are the same as for GetMetricWithLabelValues.
|
||||||
|
//
|
||||||
|
// An error is returned if the number and names of the Labels are inconsistent
|
||||||
|
// with those of the VariableLabels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// This method is used for the same purpose as
|
||||||
|
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||||
|
// methods.
|
||||||
|
func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
|
||||||
|
metric, err := v.metricVec.getMetricWith(labels)
|
||||||
if metric != nil {
|
if metric != nil {
|
||||||
return metric.(Histogram), err
|
return metric.(Observer), err
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||||
// error, WithLabelValues allows shortcuts like
|
// error allows shortcuts like
|
||||||
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
||||||
func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
|
func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
|
||||||
return m.MetricVec.WithLabelValues(lvs...).(Histogram)
|
h, err := v.GetMetricWithLabelValues(lvs...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
// With works as GetMetricWith but panics where GetMetricWithLabels would have
|
||||||
// returned an error. By not returning an error, With allows shortcuts like
|
// returned an error. Not returning an error allows shortcuts like
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
||||||
func (m *HistogramVec) With(labels Labels) Histogram {
|
func (v *HistogramVec) With(labels Labels) Observer {
|
||||||
return m.MetricVec.With(labels).(Histogram)
|
h, err := v.GetMetricWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurryWith returns a vector curried with the provided labels, i.e. the
|
||||||
|
// returned vector has those labels pre-set for all labeled operations performed
|
||||||
|
// on it. The cardinality of the curried vector is reduced accordingly. The
|
||||||
|
// order of the remaining labels stays the same (just with the curried labels
|
||||||
|
// taken out of the sequence – which is relevant for the
|
||||||
|
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
|
||||||
|
// vector, but only with labels not yet used for currying before.
|
||||||
|
//
|
||||||
|
// The metrics contained in the HistogramVec are shared between the curried and
|
||||||
|
// uncurried vectors. They are just accessed differently. Curried and uncurried
|
||||||
|
// vectors behave identically in terms of collection. Only one must be
|
||||||
|
// registered with a given registry (usually the uncurried version). The Reset
|
||||||
|
// method deletes all metrics, even if called on a curried vector.
|
||||||
|
func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
|
||||||
|
vec, err := v.curryWith(labels)
|
||||||
|
if vec != nil {
|
||||||
|
return &HistogramVec{vec}, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustCurryWith works as CurryWith but panics where CurryWith would have
|
||||||
|
// returned an error.
|
||||||
|
func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
|
||||||
|
vec, err := v.CurryWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return vec
|
||||||
}
|
}
|
||||||
|
|
||||||
type constHistogram struct {
|
type constHistogram struct {
|
||||||
@ -393,7 +560,7 @@ func (h *constHistogram) Write(out *dto.Metric) error {
|
|||||||
// bucket.
|
// bucket.
|
||||||
//
|
//
|
||||||
// NewConstHistogram returns an error if the length of labelValues is not
|
// NewConstHistogram returns an error if the length of labelValues is not
|
||||||
// consistent with the variable labels in Desc.
|
// consistent with the variable labels in Desc or if Desc is invalid.
|
||||||
func NewConstHistogram(
|
func NewConstHistogram(
|
||||||
desc *Desc,
|
desc *Desc,
|
||||||
count uint64,
|
count uint64,
|
||||||
@ -401,8 +568,11 @@ func NewConstHistogram(
|
|||||||
buckets map[float64]uint64,
|
buckets map[float64]uint64,
|
||||||
labelValues ...string,
|
labelValues ...string,
|
||||||
) (Metric, error) {
|
) (Metric, error) {
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
if desc.err != nil {
|
||||||
return nil, errInconsistentCardinality
|
return nil, desc.err
|
||||||
|
}
|
||||||
|
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
return &constHistogram{
|
return &constHistogram{
|
||||||
desc: desc,
|
desc: desc,
|
||||||
|
164
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
164
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
@ -15,9 +15,7 @@ package prometheus
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -41,19 +39,10 @@ const (
|
|||||||
acceptEncodingHeader = "Accept-Encoding"
|
acceptEncodingHeader = "Accept-Encoding"
|
||||||
)
|
)
|
||||||
|
|
||||||
var bufPool sync.Pool
|
var gzipPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
func getBuf() *bytes.Buffer {
|
return gzip.NewWriter(nil)
|
||||||
buf := bufPool.Get()
|
},
|
||||||
if buf == nil {
|
|
||||||
return &bytes.Buffer{}
|
|
||||||
}
|
|
||||||
return buf.(*bytes.Buffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func giveBuf(buf *bytes.Buffer) {
|
|
||||||
buf.Reset()
|
|
||||||
bufPool.Put(buf)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handler returns an HTTP handler for the DefaultGatherer. It is
|
// Handler returns an HTTP handler for the DefaultGatherer. It is
|
||||||
@ -61,68 +50,50 @@ func giveBuf(buf *bytes.Buffer) {
|
|||||||
// name).
|
// name).
|
||||||
//
|
//
|
||||||
// Deprecated: Please note the issues described in the doc comment of
|
// Deprecated: Please note the issues described in the doc comment of
|
||||||
// InstrumentHandler. You might want to consider using promhttp.Handler instead
|
// InstrumentHandler. You might want to consider using promhttp.Handler instead.
|
||||||
// (which is not instrumented).
|
|
||||||
func Handler() http.Handler {
|
func Handler() http.Handler {
|
||||||
return InstrumentHandler("prometheus", UninstrumentedHandler())
|
return InstrumentHandler("prometheus", UninstrumentedHandler())
|
||||||
}
|
}
|
||||||
|
|
||||||
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
||||||
//
|
//
|
||||||
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
|
// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{})
|
||||||
|
// instead. See there for further documentation.
|
||||||
func UninstrumentedHandler() http.Handler {
|
func UninstrumentedHandler() http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
return http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
||||||
mfs, err := DefaultGatherer.Gather()
|
mfs, err := DefaultGatherer.Gather()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
|
httpError(rsp, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
contentType := expfmt.Negotiate(req.Header)
|
contentType := expfmt.Negotiate(req.Header)
|
||||||
buf := getBuf()
|
header := rsp.Header()
|
||||||
defer giveBuf(buf)
|
header.Set(contentTypeHeader, string(contentType))
|
||||||
writer, encoding := decorateWriter(req, buf)
|
|
||||||
enc := expfmt.NewEncoder(writer, contentType)
|
w := io.Writer(rsp)
|
||||||
var lastErr error
|
if gzipAccepted(req.Header) {
|
||||||
|
header.Set(contentEncodingHeader, "gzip")
|
||||||
|
gz := gzipPool.Get().(*gzip.Writer)
|
||||||
|
defer gzipPool.Put(gz)
|
||||||
|
|
||||||
|
gz.Reset(w)
|
||||||
|
defer gz.Close()
|
||||||
|
|
||||||
|
w = gz
|
||||||
|
}
|
||||||
|
|
||||||
|
enc := expfmt.NewEncoder(w, contentType)
|
||||||
|
|
||||||
for _, mf := range mfs {
|
for _, mf := range mfs {
|
||||||
if err := enc.Encode(mf); err != nil {
|
if err := enc.Encode(mf); err != nil {
|
||||||
lastErr = err
|
httpError(rsp, err)
|
||||||
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if closer, ok := writer.(io.Closer); ok {
|
|
||||||
closer.Close()
|
|
||||||
}
|
|
||||||
if lastErr != nil && buf.Len() == 0 {
|
|
||||||
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
header := w.Header()
|
|
||||||
header.Set(contentTypeHeader, string(contentType))
|
|
||||||
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
|
|
||||||
if encoding != "" {
|
|
||||||
header.Set(contentEncodingHeader, encoding)
|
|
||||||
}
|
|
||||||
w.Write(buf.Bytes())
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// decorateWriter wraps a writer to handle gzip compression if requested. It
|
|
||||||
// returns the decorated writer and the appropriate "Content-Encoding" header
|
|
||||||
// (which is empty if no compression is enabled).
|
|
||||||
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
|
|
||||||
header := request.Header.Get(acceptEncodingHeader)
|
|
||||||
parts := strings.Split(header, ",")
|
|
||||||
for _, part := range parts {
|
|
||||||
part := strings.TrimSpace(part)
|
|
||||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
|
||||||
return gzip.NewWriter(writer), "gzip"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return writer, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
var instLabels = []string{"method", "code"}
|
var instLabels = []string{"method", "code"}
|
||||||
|
|
||||||
type nower interface {
|
type nower interface {
|
||||||
@ -139,16 +110,6 @@ var now nower = nowFunc(func() time.Time {
|
|||||||
return time.Now()
|
return time.Now()
|
||||||
})
|
})
|
||||||
|
|
||||||
func nowSeries(t ...time.Time) nower {
|
|
||||||
return nowFunc(func() time.Time {
|
|
||||||
defer func() {
|
|
||||||
t = t[1:]
|
|
||||||
}()
|
|
||||||
|
|
||||||
return t[0]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
|
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
|
||||||
// registers four metric collectors (if not already done) and reports HTTP
|
// registers four metric collectors (if not already done) and reports HTTP
|
||||||
// metrics to the (newly or already) registered collectors: http_requests_total
|
// metrics to the (newly or already) registered collectors: http_requests_total
|
||||||
@ -158,26 +119,16 @@ func nowSeries(t ...time.Time) nower {
|
|||||||
// value. http_requests_total is a metric vector partitioned by HTTP method
|
// value. http_requests_total is a metric vector partitioned by HTTP method
|
||||||
// (label name "method") and HTTP status code (label name "code").
|
// (label name "method") and HTTP status code (label name "code").
|
||||||
//
|
//
|
||||||
// Deprecated: InstrumentHandler has several issues:
|
// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
|
||||||
//
|
// package promhttp instead. The issues are the following: (1) It uses Summaries
|
||||||
// - It uses Summaries rather than Histograms. Summaries are not useful if
|
// rather than Histograms. Summaries are not useful if aggregation across
|
||||||
// aggregation across multiple instances is required.
|
// multiple instances is required. (2) It uses microseconds as unit, which is
|
||||||
//
|
// deprecated and should be replaced by seconds. (3) The size of the request is
|
||||||
// - It uses microseconds as unit, which is deprecated and should be replaced by
|
// calculated in a separate goroutine. Since this calculator requires access to
|
||||||
// seconds.
|
// the request header, it creates a race with any writes to the header performed
|
||||||
//
|
// during request handling. httputil.ReverseProxy is a prominent example for a
|
||||||
// - The size of the request is calculated in a separate goroutine. Since this
|
// handler performing such writes. (4) It has additional issues with HTTP/2, cf.
|
||||||
// calculator requires access to the request header, it creates a race with
|
|
||||||
// any writes to the header performed during request handling.
|
|
||||||
// httputil.ReverseProxy is a prominent example for a handler
|
|
||||||
// performing such writes.
|
|
||||||
//
|
|
||||||
// - It has additional issues with HTTP/2, cf.
|
|
||||||
// https://github.com/prometheus/client_golang/issues/272.
|
// https://github.com/prometheus/client_golang/issues/272.
|
||||||
//
|
|
||||||
// Upcoming versions of this package will provide ways of instrumenting HTTP
|
|
||||||
// handlers that are more flexible and have fewer issues. Please prefer direct
|
|
||||||
// instrumentation in the meantime.
|
|
||||||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
||||||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
||||||
}
|
}
|
||||||
@ -187,7 +138,7 @@ func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFun
|
|||||||
// issues).
|
// issues).
|
||||||
//
|
//
|
||||||
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
|
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
|
||||||
// InstrumentHandler is.
|
// InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
||||||
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||||
return InstrumentHandlerFuncWithOpts(
|
return InstrumentHandlerFuncWithOpts(
|
||||||
SummaryOpts{
|
SummaryOpts{
|
||||||
@ -226,7 +177,7 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
|
|||||||
// SummaryOpts.
|
// SummaryOpts.
|
||||||
//
|
//
|
||||||
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
|
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
|
||||||
// InstrumentHandler is.
|
// InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
||||||
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
|
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
|
||||||
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
|
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
|
||||||
}
|
}
|
||||||
@ -237,7 +188,7 @@ func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.Hand
|
|||||||
// SummaryOpts are used.
|
// SummaryOpts are used.
|
||||||
//
|
//
|
||||||
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
|
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
|
||||||
// as InstrumentHandler is.
|
// as InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
||||||
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||||
reqCnt := NewCounterVec(
|
reqCnt := NewCounterVec(
|
||||||
CounterOpts{
|
CounterOpts{
|
||||||
@ -320,7 +271,7 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
|
|||||||
}
|
}
|
||||||
|
|
||||||
func computeApproximateRequestSize(r *http.Request) <-chan int {
|
func computeApproximateRequestSize(r *http.Request) <-chan int {
|
||||||
// Get URL length in current go routine for avoiding a race condition.
|
// Get URL length in current goroutine for avoiding a race condition.
|
||||||
// HandlerFunc that runs in parallel may modify the URL.
|
// HandlerFunc that runs in parallel may modify the URL.
|
||||||
s := 0
|
s := 0
|
||||||
if r.URL != nil {
|
if r.URL != nil {
|
||||||
@ -355,10 +306,9 @@ func computeApproximateRequestSize(r *http.Request) <-chan int {
|
|||||||
type responseWriterDelegator struct {
|
type responseWriterDelegator struct {
|
||||||
http.ResponseWriter
|
http.ResponseWriter
|
||||||
|
|
||||||
handler, method string
|
status int
|
||||||
status int
|
written int64
|
||||||
written int64
|
wroteHeader bool
|
||||||
wroteHeader bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *responseWriterDelegator) WriteHeader(code int) {
|
func (r *responseWriterDelegator) WriteHeader(code int) {
|
||||||
@ -524,3 +474,31 @@ func sanitizeCode(s int) string {
|
|||||||
return strconv.Itoa(s)
|
return strconv.Itoa(s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// gzipAccepted returns whether the client will accept gzip-encoded content.
|
||||||
|
func gzipAccepted(header http.Header) bool {
|
||||||
|
a := header.Get(acceptEncodingHeader)
|
||||||
|
parts := strings.Split(a, ",")
|
||||||
|
for _, part := range parts {
|
||||||
|
part = strings.TrimSpace(part)
|
||||||
|
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// httpError removes any content-encoding header and then calls http.Error with
|
||||||
|
// the provided error and http.StatusInternalServerErrer. Error contents is
|
||||||
|
// supposed to be uncompressed plain text. However, same as with a plain
|
||||||
|
// http.Error, any header settings will be void if the header has already been
|
||||||
|
// sent. The error message will still be written to the writer, but it will
|
||||||
|
// probably be of limited use.
|
||||||
|
func httpError(rsp http.ResponseWriter, err error) {
|
||||||
|
rsp.Header().Del(contentEncodingHeader)
|
||||||
|
http.Error(
|
||||||
|
rsp,
|
||||||
|
"An error has occurred while serving metrics:\n\n"+err.Error(),
|
||||||
|
http.StatusInternalServerError,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
85
vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
generated
vendored
Normal file
85
vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// metricSorter is a sortable slice of *dto.Metric.
|
||||||
|
type metricSorter []*dto.Metric
|
||||||
|
|
||||||
|
func (s metricSorter) Len() int {
|
||||||
|
return len(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s metricSorter) Swap(i, j int) {
|
||||||
|
s[i], s[j] = s[j], s[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s metricSorter) Less(i, j int) bool {
|
||||||
|
if len(s[i].Label) != len(s[j].Label) {
|
||||||
|
// This should not happen. The metrics are
|
||||||
|
// inconsistent. However, we have to deal with the fact, as
|
||||||
|
// people might use custom collectors or metric family injection
|
||||||
|
// to create inconsistent metrics. So let's simply compare the
|
||||||
|
// number of labels in this case. That will still yield
|
||||||
|
// reproducible sorting.
|
||||||
|
return len(s[i].Label) < len(s[j].Label)
|
||||||
|
}
|
||||||
|
for n, lp := range s[i].Label {
|
||||||
|
vi := lp.GetValue()
|
||||||
|
vj := s[j].Label[n].GetValue()
|
||||||
|
if vi != vj {
|
||||||
|
return vi < vj
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We should never arrive here. Multiple metrics with the same
|
||||||
|
// label set in the same scrape will lead to undefined ingestion
|
||||||
|
// behavior. However, as above, we have to provide stable sorting
|
||||||
|
// here, even for inconsistent metrics. So sort equal metrics
|
||||||
|
// by their timestamp, with missing timestamps (implying "now")
|
||||||
|
// coming last.
|
||||||
|
if s[i].TimestampMs == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if s[j].TimestampMs == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NormalizeMetricFamilies returns a MetricFamily slice with empty
|
||||||
|
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
|
||||||
|
// the slice, with the contained Metrics sorted within each MetricFamily.
|
||||||
|
func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
|
||||||
|
for _, mf := range metricFamiliesByName {
|
||||||
|
sort.Sort(metricSorter(mf.Metric))
|
||||||
|
}
|
||||||
|
names := make([]string, 0, len(metricFamiliesByName))
|
||||||
|
for name, mf := range metricFamiliesByName {
|
||||||
|
if len(mf.Metric) > 0 {
|
||||||
|
names = append(names, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
result := make([]*dto.MetricFamily, 0, len(names))
|
||||||
|
for _, name := range names {
|
||||||
|
result = append(result, metricFamiliesByName[name])
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
87
vendor/github.com/prometheus/client_golang/prometheus/labels.go
generated
vendored
Normal file
87
vendor/github.com/prometheus/client_golang/prometheus/labels.go
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Labels represents a collection of label name -> value mappings. This type is
|
||||||
|
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
|
||||||
|
// metric vector Collectors, e.g.:
|
||||||
|
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||||
|
//
|
||||||
|
// The other use-case is the specification of constant label pairs in Opts or to
|
||||||
|
// create a Desc.
|
||||||
|
type Labels map[string]string
|
||||||
|
|
||||||
|
// reservedLabelPrefix is a prefix which is not legal in user-supplied
|
||||||
|
// label names.
|
||||||
|
const reservedLabelPrefix = "__"
|
||||||
|
|
||||||
|
var errInconsistentCardinality = errors.New("inconsistent label cardinality")
|
||||||
|
|
||||||
|
func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%s: %q has %d variable labels named %q but %d values %q were provided",
|
||||||
|
errInconsistentCardinality, fqName,
|
||||||
|
len(labels), labels,
|
||||||
|
len(labelValues), labelValues,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
|
||||||
|
if len(labels) != expectedNumberOfValues {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%s: expected %d label values but got %d in %#v",
|
||||||
|
errInconsistentCardinality, expectedNumberOfValues,
|
||||||
|
len(labels), labels,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, val := range labels {
|
||||||
|
if !utf8.ValidString(val) {
|
||||||
|
return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
|
||||||
|
if len(vals) != expectedNumberOfValues {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%s: expected %d label values but got %d in %#v",
|
||||||
|
errInconsistentCardinality, expectedNumberOfValues,
|
||||||
|
len(vals), vals,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, val := range vals {
|
||||||
|
if !utf8.ValidString(val) {
|
||||||
|
return fmt.Errorf("label value %q is not valid UTF-8", val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkLabelName(l string) bool {
|
||||||
|
return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
|
||||||
|
}
|
90
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
90
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
@ -15,6 +15,9 @@ package prometheus
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
@ -43,9 +46,8 @@ type Metric interface {
|
|||||||
// While populating dto.Metric, it is the responsibility of the
|
// While populating dto.Metric, it is the responsibility of the
|
||||||
// implementation to ensure validity of the Metric protobuf (like valid
|
// implementation to ensure validity of the Metric protobuf (like valid
|
||||||
// UTF-8 strings or syntactically valid metric and label names). It is
|
// UTF-8 strings or syntactically valid metric and label names). It is
|
||||||
// recommended to sort labels lexicographically. (Implementers may find
|
// recommended to sort labels lexicographically. Callers of Write should
|
||||||
// LabelPairSorter useful for that.) Callers of Write should still make
|
// still make sure of sorting if they depend on it.
|
||||||
// sure of sorting if they depend on it.
|
|
||||||
Write(*dto.Metric) error
|
Write(*dto.Metric) error
|
||||||
// TODO(beorn7): The original rationale of passing in a pre-allocated
|
// TODO(beorn7): The original rationale of passing in a pre-allocated
|
||||||
// dto.Metric protobuf to save allocations has disappeared. The
|
// dto.Metric protobuf to save allocations has disappeared. The
|
||||||
@ -57,8 +59,9 @@ type Metric interface {
|
|||||||
// implementation XXX has its own XXXOpts type, but in most cases, it is just be
|
// implementation XXX has its own XXXOpts type, but in most cases, it is just be
|
||||||
// an alias of this type (which might change when the requirement arises.)
|
// an alias of this type (which might change when the requirement arises.)
|
||||||
//
|
//
|
||||||
// It is mandatory to set Name and Help to a non-empty string. All other fields
|
// It is mandatory to set Name to a non-empty string. All other fields are
|
||||||
// are optional and can safely be left at their zero value.
|
// optional and can safely be left at their zero value, although it is strongly
|
||||||
|
// encouraged to set a Help string.
|
||||||
type Opts struct {
|
type Opts struct {
|
||||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||||
// name of the Metric (created by joining these components with
|
// name of the Metric (created by joining these components with
|
||||||
@ -69,7 +72,7 @@ type Opts struct {
|
|||||||
Subsystem string
|
Subsystem string
|
||||||
Name string
|
Name string
|
||||||
|
|
||||||
// Help provides information about this metric. Mandatory!
|
// Help provides information about this metric.
|
||||||
//
|
//
|
||||||
// Metrics with the same fully-qualified name must have the same Help
|
// Metrics with the same fully-qualified name must have the same Help
|
||||||
// string.
|
// string.
|
||||||
@ -79,20 +82,12 @@ type Opts struct {
|
|||||||
// with the same fully-qualified name must have the same label names in
|
// with the same fully-qualified name must have the same label names in
|
||||||
// their ConstLabels.
|
// their ConstLabels.
|
||||||
//
|
//
|
||||||
// Note that in most cases, labels have a value that varies during the
|
// ConstLabels are only used rarely. In particular, do not use them to
|
||||||
// lifetime of a process. Those labels are usually managed with a metric
|
// attach the same labels to all your metrics. Those use cases are
|
||||||
// vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
|
// better covered by target labels set by the scraping Prometheus
|
||||||
// serve only special purposes. One is for the special case where the
|
// server, or by one specific metric (e.g. a build_info or a
|
||||||
// value of a label does not change during the lifetime of a process,
|
// machine_role metric). See also
|
||||||
// e.g. if the revision of the running binary is put into a
|
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
|
||||||
// label. Another, more advanced purpose is if more than one Collector
|
|
||||||
// needs to collect Metrics with the same fully-qualified name. In that
|
|
||||||
// case, those Metrics must differ in the values of their
|
|
||||||
// ConstLabels. See the Collector examples.
|
|
||||||
//
|
|
||||||
// If the value of a label never changes (not even between binaries),
|
|
||||||
// that label most likely should not be a label at all (but part of the
|
|
||||||
// metric name).
|
|
||||||
ConstLabels Labels
|
ConstLabels Labels
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,37 +113,22 @@ func BuildFQName(namespace, subsystem, name string) string {
|
|||||||
return name
|
return name
|
||||||
}
|
}
|
||||||
|
|
||||||
// LabelPairSorter implements sort.Interface. It is used to sort a slice of
|
// labelPairSorter implements sort.Interface. It is used to sort a slice of
|
||||||
// dto.LabelPair pointers. This is useful for implementing the Write method of
|
// dto.LabelPair pointers.
|
||||||
// custom metrics.
|
type labelPairSorter []*dto.LabelPair
|
||||||
type LabelPairSorter []*dto.LabelPair
|
|
||||||
|
|
||||||
func (s LabelPairSorter) Len() int {
|
func (s labelPairSorter) Len() int {
|
||||||
return len(s)
|
return len(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s LabelPairSorter) Swap(i, j int) {
|
func (s labelPairSorter) Swap(i, j int) {
|
||||||
s[i], s[j] = s[j], s[i]
|
s[i], s[j] = s[j], s[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s LabelPairSorter) Less(i, j int) bool {
|
func (s labelPairSorter) Less(i, j int) bool {
|
||||||
return s[i].GetName() < s[j].GetName()
|
return s[i].GetName() < s[j].GetName()
|
||||||
}
|
}
|
||||||
|
|
||||||
type hashSorter []uint64
|
|
||||||
|
|
||||||
func (s hashSorter) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s hashSorter) Swap(i, j int) {
|
|
||||||
s[i], s[j] = s[j], s[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s hashSorter) Less(i, j int) bool {
|
|
||||||
return s[i] < s[j]
|
|
||||||
}
|
|
||||||
|
|
||||||
type invalidMetric struct {
|
type invalidMetric struct {
|
||||||
desc *Desc
|
desc *Desc
|
||||||
err error
|
err error
|
||||||
@ -164,3 +144,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric {
|
|||||||
func (m *invalidMetric) Desc() *Desc { return m.desc }
|
func (m *invalidMetric) Desc() *Desc { return m.desc }
|
||||||
|
|
||||||
func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
|
func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
|
||||||
|
|
||||||
|
type timestampedMetric struct {
|
||||||
|
Metric
|
||||||
|
t time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m timestampedMetric) Write(pb *dto.Metric) error {
|
||||||
|
e := m.Metric.Write(pb)
|
||||||
|
pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
|
||||||
|
// way that it has an explicit timestamp set to the provided Time. This is only
|
||||||
|
// useful in rare cases as the timestamp of a Prometheus metric should usually
|
||||||
|
// be set by the Prometheus server during scraping. Exceptions include mirroring
|
||||||
|
// metrics with given timestamps from other metric
|
||||||
|
// sources.
|
||||||
|
//
|
||||||
|
// NewMetricWithTimestamp works best with MustNewConstMetric,
|
||||||
|
// MustNewConstHistogram, and MustNewConstSummary, see example.
|
||||||
|
//
|
||||||
|
// Currently, the exposition formats used by Prometheus are limited to
|
||||||
|
// millisecond resolution. Thus, the provided time will be rounded down to the
|
||||||
|
// next full millisecond value.
|
||||||
|
func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
|
||||||
|
return timestampedMetric{Metric: m, t: t}
|
||||||
|
}
|
||||||
|
52
vendor/github.com/prometheus/client_golang/prometheus/observer.go
generated
vendored
Normal file
52
vendor/github.com/prometheus/client_golang/prometheus/observer.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
// Observer is the interface that wraps the Observe method, which is used by
|
||||||
|
// Histogram and Summary to add observations.
|
||||||
|
type Observer interface {
|
||||||
|
Observe(float64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The ObserverFunc type is an adapter to allow the use of ordinary
|
||||||
|
// functions as Observers. If f is a function with the appropriate
|
||||||
|
// signature, ObserverFunc(f) is an Observer that calls f.
|
||||||
|
//
|
||||||
|
// This adapter is usually used in connection with the Timer type, and there are
|
||||||
|
// two general use cases:
|
||||||
|
//
|
||||||
|
// The most common one is to use a Gauge as the Observer for a Timer.
|
||||||
|
// See the "Gauge" Timer example.
|
||||||
|
//
|
||||||
|
// The more advanced use case is to create a function that dynamically decides
|
||||||
|
// which Observer to use for observing the duration. See the "Complex" Timer
|
||||||
|
// example.
|
||||||
|
type ObserverFunc func(float64)
|
||||||
|
|
||||||
|
// Observe calls f(value). It implements Observer.
|
||||||
|
func (f ObserverFunc) Observe(value float64) {
|
||||||
|
f(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
|
||||||
|
type ObserverVec interface {
|
||||||
|
GetMetricWith(Labels) (Observer, error)
|
||||||
|
GetMetricWithLabelValues(lvs ...string) (Observer, error)
|
||||||
|
With(Labels) Observer
|
||||||
|
WithLabelValues(...string) Observer
|
||||||
|
CurryWith(Labels) (ObserverVec, error)
|
||||||
|
MustCurryWith(Labels) ObserverVec
|
||||||
|
|
||||||
|
Collector
|
||||||
|
}
|
122
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
122
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
@ -13,46 +13,74 @@
|
|||||||
|
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
import "github.com/prometheus/procfs"
|
import (
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs"
|
||||||
|
)
|
||||||
|
|
||||||
type processCollector struct {
|
type processCollector struct {
|
||||||
pid int
|
|
||||||
collectFn func(chan<- Metric)
|
collectFn func(chan<- Metric)
|
||||||
pidFn func() (int, error)
|
pidFn func() (int, error)
|
||||||
|
reportErrors bool
|
||||||
cpuTotal *Desc
|
cpuTotal *Desc
|
||||||
openFDs, maxFDs *Desc
|
openFDs, maxFDs *Desc
|
||||||
vsize, rss *Desc
|
vsize, maxVsize *Desc
|
||||||
|
rss *Desc
|
||||||
startTime *Desc
|
startTime *Desc
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProcessCollector returns a collector which exports the current state of
|
// ProcessCollectorOpts defines the behavior of a process metrics collector
|
||||||
// process metrics including cpu, memory and file descriptor usage as well as
|
// created with NewProcessCollector.
|
||||||
// the process start time for the given process id under the given namespace.
|
type ProcessCollectorOpts struct {
|
||||||
func NewProcessCollector(pid int, namespace string) Collector {
|
// PidFn returns the PID of the process the collector collects metrics
|
||||||
return NewProcessCollectorPIDFn(
|
// for. It is called upon each collection. By default, the PID of the
|
||||||
func() (int, error) { return pid, nil },
|
// current process is used, as determined on construction time by
|
||||||
namespace,
|
// calling os.Getpid().
|
||||||
)
|
PidFn func() (int, error)
|
||||||
|
// If non-empty, each of the collected metrics is prefixed by the
|
||||||
|
// provided string and an underscore ("_").
|
||||||
|
Namespace string
|
||||||
|
// If true, any error encountered during collection is reported as an
|
||||||
|
// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
|
||||||
|
// and the collected metrics will be incomplete. (Possibly, no metrics
|
||||||
|
// will be collected at all.) While that's usually not desired, it is
|
||||||
|
// appropriate for the common "mix-in" of process metrics, where process
|
||||||
|
// metrics are nice to have, but failing to collect them should not
|
||||||
|
// disrupt the collection of the remaining metrics.
|
||||||
|
ReportErrors bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProcessCollectorPIDFn returns a collector which exports the current state
|
// NewProcessCollector returns a collector which exports the current state of
|
||||||
// of process metrics including cpu, memory and file descriptor usage as well
|
// process metrics including CPU, memory and file descriptor usage as well as
|
||||||
// as the process start time under the given namespace. The given pidFn is
|
// the process start time. The detailed behavior is defined by the provided
|
||||||
// called on each collect and is used to determine the process to export
|
// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
|
||||||
// metrics for.
|
// collector for the current process with an empty namespace string and no error
|
||||||
func NewProcessCollectorPIDFn(
|
// reporting.
|
||||||
pidFn func() (int, error),
|
//
|
||||||
namespace string,
|
// Currently, the collector depends on a Linux-style proc filesystem and
|
||||||
) Collector {
|
// therefore only exports metrics for Linux.
|
||||||
|
//
|
||||||
|
// Note: An older version of this function had the following signature:
|
||||||
|
//
|
||||||
|
// NewProcessCollector(pid int, namespace string) Collector
|
||||||
|
//
|
||||||
|
// Most commonly, it was called as
|
||||||
|
//
|
||||||
|
// NewProcessCollector(os.Getpid(), "")
|
||||||
|
//
|
||||||
|
// The following call of the current version is equivalent to the above:
|
||||||
|
//
|
||||||
|
// NewProcessCollector(ProcessCollectorOpts{})
|
||||||
|
func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
||||||
ns := ""
|
ns := ""
|
||||||
if len(namespace) > 0 {
|
if len(opts.Namespace) > 0 {
|
||||||
ns = namespace + "_"
|
ns = opts.Namespace + "_"
|
||||||
}
|
}
|
||||||
|
|
||||||
c := processCollector{
|
c := &processCollector{
|
||||||
pidFn: pidFn,
|
reportErrors: opts.ReportErrors,
|
||||||
collectFn: func(chan<- Metric) {},
|
|
||||||
|
|
||||||
cpuTotal: NewDesc(
|
cpuTotal: NewDesc(
|
||||||
ns+"process_cpu_seconds_total",
|
ns+"process_cpu_seconds_total",
|
||||||
"Total user and system CPU time spent in seconds.",
|
"Total user and system CPU time spent in seconds.",
|
||||||
@ -73,6 +101,11 @@ func NewProcessCollectorPIDFn(
|
|||||||
"Virtual memory size in bytes.",
|
"Virtual memory size in bytes.",
|
||||||
nil, nil,
|
nil, nil,
|
||||||
),
|
),
|
||||||
|
maxVsize: NewDesc(
|
||||||
|
ns+"process_virtual_memory_max_bytes",
|
||||||
|
"Maximum amount of virtual memory available in bytes.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
rss: NewDesc(
|
rss: NewDesc(
|
||||||
ns+"process_resident_memory_bytes",
|
ns+"process_resident_memory_bytes",
|
||||||
"Resident memory size in bytes.",
|
"Resident memory size in bytes.",
|
||||||
@ -85,12 +118,23 @@ func NewProcessCollectorPIDFn(
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opts.PidFn == nil {
|
||||||
|
pid := os.Getpid()
|
||||||
|
c.pidFn = func() (int, error) { return pid, nil }
|
||||||
|
} else {
|
||||||
|
c.pidFn = opts.PidFn
|
||||||
|
}
|
||||||
|
|
||||||
// Set up process metric collection if supported by the runtime.
|
// Set up process metric collection if supported by the runtime.
|
||||||
if _, err := procfs.NewStat(); err == nil {
|
if _, err := procfs.NewStat(); err == nil {
|
||||||
c.collectFn = c.processCollect
|
c.collectFn = c.processCollect
|
||||||
|
} else {
|
||||||
|
c.collectFn = func(ch chan<- Metric) {
|
||||||
|
c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Describe returns all descriptions of the collector.
|
// Describe returns all descriptions of the collector.
|
||||||
@ -99,6 +143,7 @@ func (c *processCollector) Describe(ch chan<- *Desc) {
|
|||||||
ch <- c.openFDs
|
ch <- c.openFDs
|
||||||
ch <- c.maxFDs
|
ch <- c.maxFDs
|
||||||
ch <- c.vsize
|
ch <- c.vsize
|
||||||
|
ch <- c.maxVsize
|
||||||
ch <- c.rss
|
ch <- c.rss
|
||||||
ch <- c.startTime
|
ch <- c.startTime
|
||||||
}
|
}
|
||||||
@ -108,16 +153,16 @@ func (c *processCollector) Collect(ch chan<- Metric) {
|
|||||||
c.collectFn(ch)
|
c.collectFn(ch)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
|
|
||||||
// client allows users to configure the error behavior.
|
|
||||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||||
pid, err := c.pidFn()
|
pid, err := c.pidFn()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p, err := procfs.NewProc(pid)
|
p, err := procfs.NewProc(pid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,14 +172,33 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
|
|||||||
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
|
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
|
||||||
if startTime, err := stat.StartTime(); err == nil {
|
if startTime, err := stat.StartTime(); err == nil {
|
||||||
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
|
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, c.startTime, err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if fds, err := p.FileDescriptorsLen(); err == nil {
|
if fds, err := p.FileDescriptorsLen(); err == nil {
|
||||||
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
|
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, c.openFDs, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if limits, err := p.NewLimits(); err == nil {
|
if limits, err := p.NewLimits(); err == nil {
|
||||||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
|
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
|
||||||
|
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
|
||||||
|
if !c.reportErrors {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if desc == nil {
|
||||||
|
desc = NewInvalidDesc(err)
|
||||||
|
}
|
||||||
|
ch <- NewInvalidMetric(desc, err)
|
||||||
|
}
|
||||||
|
199
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
Normal file
199
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
Normal file
@ -0,0 +1,199 @@
|
|||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package promhttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
closeNotifier = 1 << iota
|
||||||
|
flusher
|
||||||
|
hijacker
|
||||||
|
readerFrom
|
||||||
|
pusher
|
||||||
|
)
|
||||||
|
|
||||||
|
type delegator interface {
|
||||||
|
http.ResponseWriter
|
||||||
|
|
||||||
|
Status() int
|
||||||
|
Written() int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type responseWriterDelegator struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
|
||||||
|
handler, method string
|
||||||
|
status int
|
||||||
|
written int64
|
||||||
|
wroteHeader bool
|
||||||
|
observeWriteHeader func(int)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *responseWriterDelegator) Status() int {
|
||||||
|
return r.status
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *responseWriterDelegator) Written() int64 {
|
||||||
|
return r.written
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *responseWriterDelegator) WriteHeader(code int) {
|
||||||
|
r.status = code
|
||||||
|
r.wroteHeader = true
|
||||||
|
r.ResponseWriter.WriteHeader(code)
|
||||||
|
if r.observeWriteHeader != nil {
|
||||||
|
r.observeWriteHeader(code)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
|
||||||
|
if !r.wroteHeader {
|
||||||
|
r.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
n, err := r.ResponseWriter.Write(b)
|
||||||
|
r.written += int64(n)
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type closeNotifierDelegator struct{ *responseWriterDelegator }
|
||||||
|
type flusherDelegator struct{ *responseWriterDelegator }
|
||||||
|
type hijackerDelegator struct{ *responseWriterDelegator }
|
||||||
|
type readerFromDelegator struct{ *responseWriterDelegator }
|
||||||
|
|
||||||
|
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
|
||||||
|
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||||
|
}
|
||||||
|
func (d flusherDelegator) Flush() {
|
||||||
|
d.ResponseWriter.(http.Flusher).Flush()
|
||||||
|
}
|
||||||
|
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||||
|
return d.ResponseWriter.(http.Hijacker).Hijack()
|
||||||
|
}
|
||||||
|
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
|
||||||
|
if !d.wroteHeader {
|
||||||
|
d.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
|
||||||
|
d.written += n
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// TODO(beorn7): Code generation would help here.
|
||||||
|
pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
|
||||||
|
return closeNotifierDelegator{d}
|
||||||
|
}
|
||||||
|
pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
|
||||||
|
return flusherDelegator{d}
|
||||||
|
}
|
||||||
|
pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
|
||||||
|
return hijackerDelegator{d}
|
||||||
|
}
|
||||||
|
pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
}{d, hijackerDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
|
||||||
|
return readerFromDelegator{d}
|
||||||
|
}
|
||||||
|
pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
io.ReaderFrom
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Flusher
|
||||||
|
}{d, readerFromDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
}{d, readerFromDelegator{d}, hijackerDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
}
|
181
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
generated
vendored
Normal file
181
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
generated
vendored
Normal file
@ -0,0 +1,181 @@
|
|||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build go1.8
|
||||||
|
|
||||||
|
package promhttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
type pusherDelegator struct{ *responseWriterDelegator }
|
||||||
|
|
||||||
|
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
|
||||||
|
return d.ResponseWriter.(http.Pusher).Push(target, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
|
||||||
|
return pusherDelegator{d}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Flusher
|
||||||
|
}{d, pusherDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Hijacker
|
||||||
|
}{d, pusherDelegator{d}, hijackerDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Flusher
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
||||||
|
d := &responseWriterDelegator{
|
||||||
|
ResponseWriter: w,
|
||||||
|
observeWriteHeader: observeWriteHeaderFunc,
|
||||||
|
}
|
||||||
|
|
||||||
|
id := 0
|
||||||
|
if _, ok := w.(http.CloseNotifier); ok {
|
||||||
|
id += closeNotifier
|
||||||
|
}
|
||||||
|
if _, ok := w.(http.Flusher); ok {
|
||||||
|
id += flusher
|
||||||
|
}
|
||||||
|
if _, ok := w.(http.Hijacker); ok {
|
||||||
|
id += hijacker
|
||||||
|
}
|
||||||
|
if _, ok := w.(io.ReaderFrom); ok {
|
||||||
|
id += readerFrom
|
||||||
|
}
|
||||||
|
if _, ok := w.(http.Pusher); ok {
|
||||||
|
id += pusher
|
||||||
|
}
|
||||||
|
|
||||||
|
return pickDelegator[id](d)
|
||||||
|
}
|
44
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
generated
vendored
Normal file
44
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !go1.8
|
||||||
|
|
||||||
|
package promhttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
||||||
|
d := &responseWriterDelegator{
|
||||||
|
ResponseWriter: w,
|
||||||
|
observeWriteHeader: observeWriteHeaderFunc,
|
||||||
|
}
|
||||||
|
|
||||||
|
id := 0
|
||||||
|
if _, ok := w.(http.CloseNotifier); ok {
|
||||||
|
id += closeNotifier
|
||||||
|
}
|
||||||
|
if _, ok := w.(http.Flusher); ok {
|
||||||
|
id += flusher
|
||||||
|
}
|
||||||
|
if _, ok := w.(http.Hijacker); ok {
|
||||||
|
id += hijacker
|
||||||
|
}
|
||||||
|
if _, ok := w.(io.ReaderFrom); ok {
|
||||||
|
id += readerFrom
|
||||||
|
}
|
||||||
|
|
||||||
|
return pickDelegator[id](d)
|
||||||
|
}
|
252
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
252
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
@ -11,31 +11,34 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// Copyright (c) 2013, The Prometheus Authors
|
// Package promhttp provides tooling around HTTP servers and clients.
|
||||||
// All rights reserved.
|
|
||||||
//
|
//
|
||||||
// Use of this source code is governed by a BSD-style license that can be found
|
// First, the package allows the creation of http.Handler instances to expose
|
||||||
// in the LICENSE file.
|
// Prometheus metrics via HTTP. promhttp.Handler acts on the
|
||||||
|
// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
|
||||||
// Package promhttp contains functions to create http.Handler instances to
|
// custom registry or anything that implements the Gatherer interface. It also
|
||||||
// expose Prometheus metrics via HTTP. In later versions of this package, it
|
// allows the creation of handlers that act differently on errors or allow to
|
||||||
// will also contain tooling to instrument instances of http.Handler and
|
// log errors.
|
||||||
// http.RoundTripper.
|
|
||||||
//
|
//
|
||||||
// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor,
|
// Second, the package provides tooling to instrument instances of http.Handler
|
||||||
// you can create a handler for a custom registry or anything that implements
|
// via middleware. Middleware wrappers follow the naming scheme
|
||||||
// the Gatherer interface. It also allows to create handlers that act
|
// InstrumentHandlerX, where X describes the intended use of the middleware.
|
||||||
// differently on errors or allow to log errors.
|
// See each function's doc comment for specific details.
|
||||||
|
//
|
||||||
|
// Finally, the package allows for an http.RoundTripper to be instrumented via
|
||||||
|
// middleware. Middleware wrappers follow the naming scheme
|
||||||
|
// InstrumentRoundTripperX, where X describes the intended use of the
|
||||||
|
// middleware. See each function's doc comment for specific details.
|
||||||
package promhttp
|
package promhttp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/common/expfmt"
|
"github.com/prometheus/common/expfmt"
|
||||||
|
|
||||||
@ -49,36 +52,56 @@ const (
|
|||||||
acceptEncodingHeader = "Accept-Encoding"
|
acceptEncodingHeader = "Accept-Encoding"
|
||||||
)
|
)
|
||||||
|
|
||||||
var bufPool sync.Pool
|
var gzipPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
func getBuf() *bytes.Buffer {
|
return gzip.NewWriter(nil)
|
||||||
buf := bufPool.Get()
|
},
|
||||||
if buf == nil {
|
|
||||||
return &bytes.Buffer{}
|
|
||||||
}
|
|
||||||
return buf.(*bytes.Buffer)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func giveBuf(buf *bytes.Buffer) {
|
// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
|
||||||
buf.Reset()
|
// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
|
||||||
bufPool.Put(buf)
|
// no error logging, and it applies compression if requested by the client.
|
||||||
}
|
|
||||||
|
|
||||||
// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
|
|
||||||
// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
|
|
||||||
// error, no error logging, and compression if requested by the client.
|
|
||||||
//
|
//
|
||||||
// If you want to create a Handler for the DefaultGatherer with different
|
// The returned http.Handler is already instrumented using the
|
||||||
// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
|
// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
|
||||||
// your desired HandlerOpts.
|
// create multiple http.Handlers by separate calls of the Handler function, the
|
||||||
|
// metrics used for instrumentation will be shared between them, providing
|
||||||
|
// global scrape counts.
|
||||||
|
//
|
||||||
|
// This function is meant to cover the bulk of basic use cases. If you are doing
|
||||||
|
// anything that requires more customization (including using a non-default
|
||||||
|
// Gatherer, different instrumentation, and non-default HandlerOpts), use the
|
||||||
|
// HandlerFor function. See there for details.
|
||||||
func Handler() http.Handler {
|
func Handler() http.Handler {
|
||||||
return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})
|
return InstrumentMetricHandler(
|
||||||
|
prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
|
// HandlerFor returns an uninstrumented http.Handler for the provided
|
||||||
// of the Handler is defined by the provided HandlerOpts.
|
// Gatherer. The behavior of the Handler is defined by the provided
|
||||||
|
// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
|
||||||
|
// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
|
||||||
|
// instrumentation. Use the InstrumentMetricHandler function to apply the same
|
||||||
|
// kind of instrumentation as it is used by the Handler function.
|
||||||
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
var inFlightSem chan struct{}
|
||||||
|
if opts.MaxRequestsInFlight > 0 {
|
||||||
|
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
|
||||||
|
}
|
||||||
|
|
||||||
|
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
||||||
|
if inFlightSem != nil {
|
||||||
|
select {
|
||||||
|
case inFlightSem <- struct{}{}: // All good, carry on.
|
||||||
|
defer func() { <-inFlightSem }()
|
||||||
|
default:
|
||||||
|
http.Error(rsp, fmt.Sprintf(
|
||||||
|
"Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
|
||||||
|
), http.StatusServiceUnavailable)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
mfs, err := reg.Gather()
|
mfs, err := reg.Gather()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if opts.ErrorLog != nil {
|
if opts.ErrorLog != nil {
|
||||||
@ -89,26 +112,40 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
|||||||
panic(err)
|
panic(err)
|
||||||
case ContinueOnError:
|
case ContinueOnError:
|
||||||
if len(mfs) == 0 {
|
if len(mfs) == 0 {
|
||||||
http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError)
|
// Still report the error if no metrics have been gathered.
|
||||||
|
httpError(rsp, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case HTTPErrorOnError:
|
case HTTPErrorOnError:
|
||||||
http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError)
|
httpError(rsp, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
contentType := expfmt.Negotiate(req.Header)
|
contentType := expfmt.Negotiate(req.Header)
|
||||||
buf := getBuf()
|
header := rsp.Header()
|
||||||
defer giveBuf(buf)
|
header.Set(contentTypeHeader, string(contentType))
|
||||||
writer, encoding := decorateWriter(req, buf, opts.DisableCompression)
|
|
||||||
enc := expfmt.NewEncoder(writer, contentType)
|
w := io.Writer(rsp)
|
||||||
|
if !opts.DisableCompression && gzipAccepted(req.Header) {
|
||||||
|
header.Set(contentEncodingHeader, "gzip")
|
||||||
|
gz := gzipPool.Get().(*gzip.Writer)
|
||||||
|
defer gzipPool.Put(gz)
|
||||||
|
|
||||||
|
gz.Reset(w)
|
||||||
|
defer gz.Close()
|
||||||
|
|
||||||
|
w = gz
|
||||||
|
}
|
||||||
|
|
||||||
|
enc := expfmt.NewEncoder(w, contentType)
|
||||||
|
|
||||||
var lastErr error
|
var lastErr error
|
||||||
for _, mf := range mfs {
|
for _, mf := range mfs {
|
||||||
if err := enc.Encode(mf); err != nil {
|
if err := enc.Encode(mf); err != nil {
|
||||||
lastErr = err
|
lastErr = err
|
||||||
if opts.ErrorLog != nil {
|
if opts.ErrorLog != nil {
|
||||||
opts.ErrorLog.Println("error encoding metric family:", err)
|
opts.ErrorLog.Println("error encoding and sending metric family:", err)
|
||||||
}
|
}
|
||||||
switch opts.ErrorHandling {
|
switch opts.ErrorHandling {
|
||||||
case PanicOnError:
|
case PanicOnError:
|
||||||
@ -116,27 +153,75 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
|||||||
case ContinueOnError:
|
case ContinueOnError:
|
||||||
// Handled later.
|
// Handled later.
|
||||||
case HTTPErrorOnError:
|
case HTTPErrorOnError:
|
||||||
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
|
httpError(rsp, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if closer, ok := writer.(io.Closer); ok {
|
|
||||||
closer.Close()
|
if lastErr != nil {
|
||||||
|
httpError(rsp, lastErr)
|
||||||
}
|
}
|
||||||
if lastErr != nil && buf.Len() == 0 {
|
|
||||||
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
header := w.Header()
|
|
||||||
header.Set(contentTypeHeader, string(contentType))
|
|
||||||
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
|
|
||||||
if encoding != "" {
|
|
||||||
header.Set(contentEncodingHeader, encoding)
|
|
||||||
}
|
|
||||||
w.Write(buf.Bytes())
|
|
||||||
// TODO(beorn7): Consider streaming serving of metrics.
|
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if opts.Timeout <= 0 {
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
|
||||||
|
"Exceeded configured timeout of %v.\n",
|
||||||
|
opts.Timeout,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentMetricHandler is usually used with an http.Handler returned by the
|
||||||
|
// HandlerFor function. It instruments the provided http.Handler with two
|
||||||
|
// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
|
||||||
|
// scrapes partitioned by HTTP status code, and a gauge
|
||||||
|
// "promhttp_metric_handler_requests_in_flight" to track the number of
|
||||||
|
// simultaneous scrapes. This function idempotently registers collectors for
|
||||||
|
// both metrics with the provided Registerer. It panics if the registration
|
||||||
|
// fails. The provided metrics are useful to see how many scrapes hit the
|
||||||
|
// monitored target (which could be from different Prometheus servers or other
|
||||||
|
// scrapers), and how often they overlap (which would result in more than one
|
||||||
|
// scrape in flight at the same time). Note that the scrapes-in-flight gauge
|
||||||
|
// will contain the scrape by which it is exposed, while the scrape counter will
|
||||||
|
// only get incremented after the scrape is complete (as only then the status
|
||||||
|
// code is known). For tracking scrape durations, use the
|
||||||
|
// "scrape_duration_seconds" gauge created by the Prometheus server upon each
|
||||||
|
// scrape.
|
||||||
|
func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
|
||||||
|
cnt := prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "promhttp_metric_handler_requests_total",
|
||||||
|
Help: "Total number of scrapes by HTTP status code.",
|
||||||
|
},
|
||||||
|
[]string{"code"},
|
||||||
|
)
|
||||||
|
// Initialize the most likely HTTP status codes.
|
||||||
|
cnt.WithLabelValues("200")
|
||||||
|
cnt.WithLabelValues("500")
|
||||||
|
cnt.WithLabelValues("503")
|
||||||
|
if err := reg.Register(cnt); err != nil {
|
||||||
|
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
||||||
|
cnt = are.ExistingCollector.(*prometheus.CounterVec)
|
||||||
|
} else {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gge := prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Name: "promhttp_metric_handler_requests_in_flight",
|
||||||
|
Help: "Current number of scrapes being served.",
|
||||||
|
})
|
||||||
|
if err := reg.Register(gge); err != nil {
|
||||||
|
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
||||||
|
gge = are.ExistingCollector.(prometheus.Gauge)
|
||||||
|
} else {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandlerErrorHandling defines how a Handler serving metrics will handle
|
// HandlerErrorHandling defines how a Handler serving metrics will handle
|
||||||
@ -180,22 +265,47 @@ type HandlerOpts struct {
|
|||||||
// If DisableCompression is true, the handler will never compress the
|
// If DisableCompression is true, the handler will never compress the
|
||||||
// response, even if requested by the client.
|
// response, even if requested by the client.
|
||||||
DisableCompression bool
|
DisableCompression bool
|
||||||
|
// The number of concurrent HTTP requests is limited to
|
||||||
|
// MaxRequestsInFlight. Additional requests are responded to with 503
|
||||||
|
// Service Unavailable and a suitable message in the body. If
|
||||||
|
// MaxRequestsInFlight is 0 or negative, no limit is applied.
|
||||||
|
MaxRequestsInFlight int
|
||||||
|
// If handling a request takes longer than Timeout, it is responded to
|
||||||
|
// with 503 ServiceUnavailable and a suitable Message. No timeout is
|
||||||
|
// applied if Timeout is 0 or negative. Note that with the current
|
||||||
|
// implementation, reaching the timeout simply ends the HTTP requests as
|
||||||
|
// described above (and even that only if sending of the body hasn't
|
||||||
|
// started yet), while the bulk work of gathering all the metrics keeps
|
||||||
|
// running in the background (with the eventual result to be thrown
|
||||||
|
// away). Until the implementation is improved, it is recommended to
|
||||||
|
// implement a separate timeout in potentially slow Collectors.
|
||||||
|
Timeout time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// decorateWriter wraps a writer to handle gzip compression if requested. It
|
// gzipAccepted returns whether the client will accept gzip-encoded content.
|
||||||
// returns the decorated writer and the appropriate "Content-Encoding" header
|
func gzipAccepted(header http.Header) bool {
|
||||||
// (which is empty if no compression is enabled).
|
a := header.Get(acceptEncodingHeader)
|
||||||
func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) {
|
parts := strings.Split(a, ",")
|
||||||
if compressionDisabled {
|
|
||||||
return writer, ""
|
|
||||||
}
|
|
||||||
header := request.Header.Get(acceptEncodingHeader)
|
|
||||||
parts := strings.Split(header, ",")
|
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
part := strings.TrimSpace(part)
|
part = strings.TrimSpace(part)
|
||||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||||
return gzip.NewWriter(writer), "gzip"
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return writer, ""
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// httpError removes any content-encoding header and then calls http.Error with
|
||||||
|
// the provided error and http.StatusInternalServerErrer. Error contents is
|
||||||
|
// supposed to be uncompressed plain text. However, same as with a plain
|
||||||
|
// http.Error, any header settings will be void if the header has already been
|
||||||
|
// sent. The error message will still be written to the writer, but it will
|
||||||
|
// probably be of limited use.
|
||||||
|
func httpError(rsp http.ResponseWriter, err error) {
|
||||||
|
rsp.Header().Del(contentEncodingHeader)
|
||||||
|
http.Error(
|
||||||
|
rsp,
|
||||||
|
"An error has occurred while serving metrics:\n\n"+err.Error(),
|
||||||
|
http.StatusInternalServerError,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
97
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
Normal file
97
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package promhttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The RoundTripperFunc type is an adapter to allow the use of ordinary
|
||||||
|
// functions as RoundTrippers. If f is a function with the appropriate
|
||||||
|
// signature, RountTripperFunc(f) is a RoundTripper that calls f.
|
||||||
|
type RoundTripperFunc func(req *http.Request) (*http.Response, error)
|
||||||
|
|
||||||
|
// RoundTrip implements the RoundTripper interface.
|
||||||
|
func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||||
|
return rt(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentRoundTripperInFlight is a middleware that wraps the provided
|
||||||
|
// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
|
||||||
|
// requests currently handled by the wrapped http.RoundTripper.
|
||||||
|
//
|
||||||
|
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||||
|
func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
|
||||||
|
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||||
|
gauge.Inc()
|
||||||
|
defer gauge.Dec()
|
||||||
|
return next.RoundTrip(r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentRoundTripperCounter is a middleware that wraps the provided
|
||||||
|
// http.RoundTripper to observe the request result with the provided CounterVec.
|
||||||
|
// The CounterVec must have zero, one, or two non-const non-curried labels. For
|
||||||
|
// those, the only allowed label names are "code" and "method". The function
|
||||||
|
// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
|
||||||
|
// and/or HTTP method if the respective instance label names are present in the
|
||||||
|
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
|
||||||
|
//
|
||||||
|
// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
|
||||||
|
// is not incremented.
|
||||||
|
//
|
||||||
|
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||||
|
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
|
||||||
|
code, method := checkLabels(counter)
|
||||||
|
|
||||||
|
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||||
|
resp, err := next.RoundTrip(r)
|
||||||
|
if err == nil {
|
||||||
|
counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentRoundTripperDuration is a middleware that wraps the provided
|
||||||
|
// http.RoundTripper to observe the request duration with the provided
|
||||||
|
// ObserverVec. The ObserverVec must have zero, one, or two non-const
|
||||||
|
// non-curried labels. For those, the only allowed label names are "code" and
|
||||||
|
// "method". The function panics otherwise. The Observe method of the Observer
|
||||||
|
// in the ObserverVec is called with the request duration in
|
||||||
|
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
|
||||||
|
// respective instance label names are present in the ObserverVec. For
|
||||||
|
// unpartitioned observations, use an ObserverVec with zero labels. Note that
|
||||||
|
// partitioning of Histograms is expensive and should be used judiciously.
|
||||||
|
//
|
||||||
|
// If the wrapped RoundTripper panics or returns a non-nil error, no values are
|
||||||
|
// reported.
|
||||||
|
//
|
||||||
|
// Note that this method is only guaranteed to never observe negative durations
|
||||||
|
// if used with Go1.9+.
|
||||||
|
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
|
||||||
|
code, method := checkLabels(obs)
|
||||||
|
|
||||||
|
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||||
|
start := time.Now()
|
||||||
|
resp, err := next.RoundTrip(r)
|
||||||
|
if err == nil {
|
||||||
|
obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
})
|
||||||
|
}
|
144
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
generated
vendored
Normal file
144
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
generated
vendored
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build go1.8
|
||||||
|
|
||||||
|
package promhttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptrace"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// InstrumentTrace is used to offer flexibility in instrumenting the available
|
||||||
|
// httptrace.ClientTrace hook functions. Each function is passed a float64
|
||||||
|
// representing the time in seconds since the start of the http request. A user
|
||||||
|
// may choose to use separately buckets Histograms, or implement custom
|
||||||
|
// instance labels on a per function basis.
|
||||||
|
type InstrumentTrace struct {
|
||||||
|
GotConn func(float64)
|
||||||
|
PutIdleConn func(float64)
|
||||||
|
GotFirstResponseByte func(float64)
|
||||||
|
Got100Continue func(float64)
|
||||||
|
DNSStart func(float64)
|
||||||
|
DNSDone func(float64)
|
||||||
|
ConnectStart func(float64)
|
||||||
|
ConnectDone func(float64)
|
||||||
|
TLSHandshakeStart func(float64)
|
||||||
|
TLSHandshakeDone func(float64)
|
||||||
|
WroteHeaders func(float64)
|
||||||
|
Wait100Continue func(float64)
|
||||||
|
WroteRequest func(float64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentRoundTripperTrace is a middleware that wraps the provided
|
||||||
|
// RoundTripper and reports times to hook functions provided in the
|
||||||
|
// InstrumentTrace struct. Hook functions that are not present in the provided
|
||||||
|
// InstrumentTrace struct are ignored. Times reported to the hook functions are
|
||||||
|
// time since the start of the request. Only with Go1.9+, those times are
|
||||||
|
// guaranteed to never be negative. (Earlier Go versions are not using a
|
||||||
|
// monotonic clock.) Note that partitioning of Histograms is expensive and
|
||||||
|
// should be used judiciously.
|
||||||
|
//
|
||||||
|
// For hook functions that receive an error as an argument, no observations are
|
||||||
|
// made in the event of a non-nil error value.
|
||||||
|
//
|
||||||
|
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||||
|
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
|
||||||
|
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
trace := &httptrace.ClientTrace{
|
||||||
|
GotConn: func(_ httptrace.GotConnInfo) {
|
||||||
|
if it.GotConn != nil {
|
||||||
|
it.GotConn(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
PutIdleConn: func(err error) {
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if it.PutIdleConn != nil {
|
||||||
|
it.PutIdleConn(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
DNSStart: func(_ httptrace.DNSStartInfo) {
|
||||||
|
if it.DNSStart != nil {
|
||||||
|
it.DNSStart(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
DNSDone: func(_ httptrace.DNSDoneInfo) {
|
||||||
|
if it.DNSDone != nil {
|
||||||
|
it.DNSDone(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ConnectStart: func(_, _ string) {
|
||||||
|
if it.ConnectStart != nil {
|
||||||
|
it.ConnectStart(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ConnectDone: func(_, _ string, err error) {
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if it.ConnectDone != nil {
|
||||||
|
it.ConnectDone(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
GotFirstResponseByte: func() {
|
||||||
|
if it.GotFirstResponseByte != nil {
|
||||||
|
it.GotFirstResponseByte(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Got100Continue: func() {
|
||||||
|
if it.Got100Continue != nil {
|
||||||
|
it.Got100Continue(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
TLSHandshakeStart: func() {
|
||||||
|
if it.TLSHandshakeStart != nil {
|
||||||
|
it.TLSHandshakeStart(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if it.TLSHandshakeDone != nil {
|
||||||
|
it.TLSHandshakeDone(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
WroteHeaders: func() {
|
||||||
|
if it.WroteHeaders != nil {
|
||||||
|
it.WroteHeaders(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Wait100Continue: func() {
|
||||||
|
if it.Wait100Continue != nil {
|
||||||
|
it.Wait100Continue(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
WroteRequest: func(_ httptrace.WroteRequestInfo) {
|
||||||
|
if it.WroteRequest != nil {
|
||||||
|
it.WroteRequest(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
|
||||||
|
|
||||||
|
return next.RoundTrip(r)
|
||||||
|
})
|
||||||
|
}
|
447
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
Normal file
447
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
Normal file
@ -0,0 +1,447 @@
|
|||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package promhttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// magicString is used for the hacky label test in checkLabels. Remove once fixed.
|
||||||
|
const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
|
||||||
|
|
||||||
|
// InstrumentHandlerInFlight is a middleware that wraps the provided
|
||||||
|
// http.Handler. It sets the provided prometheus.Gauge to the number of
|
||||||
|
// requests currently handled by the wrapped http.Handler.
|
||||||
|
//
|
||||||
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
|
func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
g.Inc()
|
||||||
|
defer g.Dec()
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentHandlerDuration is a middleware that wraps the provided
|
||||||
|
// http.Handler to observe the request duration with the provided ObserverVec.
|
||||||
|
// The ObserverVec must have zero, one, or two non-const non-curried labels. For
|
||||||
|
// those, the only allowed label names are "code" and "method". The function
|
||||||
|
// panics otherwise. The Observe method of the Observer in the ObserverVec is
|
||||||
|
// called with the request duration in seconds. Partitioning happens by HTTP
|
||||||
|
// status code and/or HTTP method if the respective instance label names are
|
||||||
|
// present in the ObserverVec. For unpartitioned observations, use an
|
||||||
|
// ObserverVec with zero labels. Note that partitioning of Histograms is
|
||||||
|
// expensive and should be used judiciously.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler panics, no values are reported.
|
||||||
|
//
|
||||||
|
// Note that this method is only guaranteed to never observe negative durations
|
||||||
|
// if used with Go1.9+.
|
||||||
|
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
|
||||||
|
code, method := checkLabels(obs)
|
||||||
|
|
||||||
|
if code {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
now := time.Now()
|
||||||
|
d := newDelegator(w, nil)
|
||||||
|
next.ServeHTTP(d, r)
|
||||||
|
|
||||||
|
obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
now := time.Now()
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
|
||||||
|
// to observe the request result with the provided CounterVec. The CounterVec
|
||||||
|
// must have zero, one, or two non-const non-curried labels. For those, the only
|
||||||
|
// allowed label names are "code" and "method". The function panics
|
||||||
|
// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
|
||||||
|
// HTTP method if the respective instance label names are present in the
|
||||||
|
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler panics, the Counter is not incremented.
|
||||||
|
//
|
||||||
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
|
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
|
||||||
|
code, method := checkLabels(counter)
|
||||||
|
|
||||||
|
if code {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
d := newDelegator(w, nil)
|
||||||
|
next.ServeHTTP(d, r)
|
||||||
|
counter.With(labels(code, method, r.Method, d.Status())).Inc()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
counter.With(labels(code, method, r.Method, 0)).Inc()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
|
||||||
|
// http.Handler to observe with the provided ObserverVec the request duration
|
||||||
|
// until the response headers are written. The ObserverVec must have zero, one,
|
||||||
|
// or two non-const non-curried labels. For those, the only allowed label names
|
||||||
|
// are "code" and "method". The function panics otherwise. The Observe method of
|
||||||
|
// the Observer in the ObserverVec is called with the request duration in
|
||||||
|
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
|
||||||
|
// respective instance label names are present in the ObserverVec. For
|
||||||
|
// unpartitioned observations, use an ObserverVec with zero labels. Note that
|
||||||
|
// partitioning of Histograms is expensive and should be used judiciously.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler panics before calling WriteHeader, no value is
|
||||||
|
// reported.
|
||||||
|
//
|
||||||
|
// Note that this method is only guaranteed to never observe negative durations
|
||||||
|
// if used with Go1.9+.
|
||||||
|
//
|
||||||
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
|
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
|
||||||
|
code, method := checkLabels(obs)
|
||||||
|
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
now := time.Now()
|
||||||
|
d := newDelegator(w, func(status int) {
|
||||||
|
obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
|
||||||
|
})
|
||||||
|
next.ServeHTTP(d, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentHandlerRequestSize is a middleware that wraps the provided
|
||||||
|
// http.Handler to observe the request size with the provided ObserverVec. The
|
||||||
|
// ObserverVec must have zero, one, or two non-const non-curried labels. For
|
||||||
|
// those, the only allowed label names are "code" and "method". The function
|
||||||
|
// panics otherwise. The Observe method of the Observer in the ObserverVec is
|
||||||
|
// called with the request size in bytes. Partitioning happens by HTTP status
|
||||||
|
// code and/or HTTP method if the respective instance label names are present in
|
||||||
|
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
|
||||||
|
// labels. Note that partitioning of Histograms is expensive and should be used
|
||||||
|
// judiciously.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler panics, no values are reported.
|
||||||
|
//
|
||||||
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
|
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
|
||||||
|
code, method := checkLabels(obs)
|
||||||
|
|
||||||
|
if code {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
d := newDelegator(w, nil)
|
||||||
|
next.ServeHTTP(d, r)
|
||||||
|
size := computeApproximateRequestSize(r)
|
||||||
|
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
size := computeApproximateRequestSize(r)
|
||||||
|
obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentHandlerResponseSize is a middleware that wraps the provided
|
||||||
|
// http.Handler to observe the response size with the provided ObserverVec. The
|
||||||
|
// ObserverVec must have zero, one, or two non-const non-curried labels. For
|
||||||
|
// those, the only allowed label names are "code" and "method". The function
|
||||||
|
// panics otherwise. The Observe method of the Observer in the ObserverVec is
|
||||||
|
// called with the response size in bytes. Partitioning happens by HTTP status
|
||||||
|
// code and/or HTTP method if the respective instance label names are present in
|
||||||
|
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
|
||||||
|
// labels. Note that partitioning of Histograms is expensive and should be used
|
||||||
|
// judiciously.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler panics, no values are reported.
|
||||||
|
//
|
||||||
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
|
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
|
||||||
|
code, method := checkLabels(obs)
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
d := newDelegator(w, nil)
|
||||||
|
next.ServeHTTP(d, r)
|
||||||
|
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkLabels(c prometheus.Collector) (code bool, method bool) {
|
||||||
|
// TODO(beorn7): Remove this hacky way to check for instance labels
|
||||||
|
// once Descriptors can have their dimensionality queried.
|
||||||
|
var (
|
||||||
|
desc *prometheus.Desc
|
||||||
|
m prometheus.Metric
|
||||||
|
pm dto.Metric
|
||||||
|
lvs []string
|
||||||
|
)
|
||||||
|
|
||||||
|
// Get the Desc from the Collector.
|
||||||
|
descc := make(chan *prometheus.Desc, 1)
|
||||||
|
c.Describe(descc)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case desc = <-descc:
|
||||||
|
default:
|
||||||
|
panic("no description provided by collector")
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-descc:
|
||||||
|
panic("more than one description provided by collector")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
close(descc)
|
||||||
|
|
||||||
|
// Create a ConstMetric with the Desc. Since we don't know how many
|
||||||
|
// variable labels there are, try for as long as it needs.
|
||||||
|
for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
|
||||||
|
m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write out the metric into a proto message and look at the labels.
|
||||||
|
// If the value is not the magicString, it is a constLabel, which doesn't interest us.
|
||||||
|
// If the label is curried, it doesn't interest us.
|
||||||
|
// In all other cases, only "code" or "method" is allowed.
|
||||||
|
if err := m.Write(&pm); err != nil {
|
||||||
|
panic("error checking metric for labels")
|
||||||
|
}
|
||||||
|
for _, label := range pm.Label {
|
||||||
|
name, value := label.GetName(), label.GetValue()
|
||||||
|
if value != magicString || isLabelCurried(c, name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch name {
|
||||||
|
case "code":
|
||||||
|
code = true
|
||||||
|
case "method":
|
||||||
|
method = true
|
||||||
|
default:
|
||||||
|
panic("metric partitioned with non-supported labels")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func isLabelCurried(c prometheus.Collector, label string) bool {
|
||||||
|
// This is even hackier than the label test above.
|
||||||
|
// We essentially try to curry again and see if it works.
|
||||||
|
// But for that, we need to type-convert to the two
|
||||||
|
// types we use here, ObserverVec or *CounterVec.
|
||||||
|
switch v := c.(type) {
|
||||||
|
case *prometheus.CounterVec:
|
||||||
|
if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case prometheus.ObserverVec:
|
||||||
|
if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("unsupported metric vec type")
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
|
||||||
|
// unnecessary allocations on each request.
|
||||||
|
var emptyLabels = prometheus.Labels{}
|
||||||
|
|
||||||
|
func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
|
||||||
|
if !(code || method) {
|
||||||
|
return emptyLabels
|
||||||
|
}
|
||||||
|
labels := prometheus.Labels{}
|
||||||
|
|
||||||
|
if code {
|
||||||
|
labels["code"] = sanitizeCode(status)
|
||||||
|
}
|
||||||
|
if method {
|
||||||
|
labels["method"] = sanitizeMethod(reqMethod)
|
||||||
|
}
|
||||||
|
|
||||||
|
return labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func computeApproximateRequestSize(r *http.Request) int {
|
||||||
|
s := 0
|
||||||
|
if r.URL != nil {
|
||||||
|
s += len(r.URL.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
s += len(r.Method)
|
||||||
|
s += len(r.Proto)
|
||||||
|
for name, values := range r.Header {
|
||||||
|
s += len(name)
|
||||||
|
for _, value := range values {
|
||||||
|
s += len(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s += len(r.Host)
|
||||||
|
|
||||||
|
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
||||||
|
|
||||||
|
if r.ContentLength != -1 {
|
||||||
|
s += int(r.ContentLength)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func sanitizeMethod(m string) string {
|
||||||
|
switch m {
|
||||||
|
case "GET", "get":
|
||||||
|
return "get"
|
||||||
|
case "PUT", "put":
|
||||||
|
return "put"
|
||||||
|
case "HEAD", "head":
|
||||||
|
return "head"
|
||||||
|
case "POST", "post":
|
||||||
|
return "post"
|
||||||
|
case "DELETE", "delete":
|
||||||
|
return "delete"
|
||||||
|
case "CONNECT", "connect":
|
||||||
|
return "connect"
|
||||||
|
case "OPTIONS", "options":
|
||||||
|
return "options"
|
||||||
|
case "NOTIFY", "notify":
|
||||||
|
return "notify"
|
||||||
|
default:
|
||||||
|
return strings.ToLower(m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the wrapped http.Handler has not set a status code, i.e. the value is
|
||||||
|
// currently 0, santizeCode will return 200, for consistency with behavior in
|
||||||
|
// the stdlib.
|
||||||
|
func sanitizeCode(s int) string {
|
||||||
|
switch s {
|
||||||
|
case 100:
|
||||||
|
return "100"
|
||||||
|
case 101:
|
||||||
|
return "101"
|
||||||
|
|
||||||
|
case 200, 0:
|
||||||
|
return "200"
|
||||||
|
case 201:
|
||||||
|
return "201"
|
||||||
|
case 202:
|
||||||
|
return "202"
|
||||||
|
case 203:
|
||||||
|
return "203"
|
||||||
|
case 204:
|
||||||
|
return "204"
|
||||||
|
case 205:
|
||||||
|
return "205"
|
||||||
|
case 206:
|
||||||
|
return "206"
|
||||||
|
|
||||||
|
case 300:
|
||||||
|
return "300"
|
||||||
|
case 301:
|
||||||
|
return "301"
|
||||||
|
case 302:
|
||||||
|
return "302"
|
||||||
|
case 304:
|
||||||
|
return "304"
|
||||||
|
case 305:
|
||||||
|
return "305"
|
||||||
|
case 307:
|
||||||
|
return "307"
|
||||||
|
|
||||||
|
case 400:
|
||||||
|
return "400"
|
||||||
|
case 401:
|
||||||
|
return "401"
|
||||||
|
case 402:
|
||||||
|
return "402"
|
||||||
|
case 403:
|
||||||
|
return "403"
|
||||||
|
case 404:
|
||||||
|
return "404"
|
||||||
|
case 405:
|
||||||
|
return "405"
|
||||||
|
case 406:
|
||||||
|
return "406"
|
||||||
|
case 407:
|
||||||
|
return "407"
|
||||||
|
case 408:
|
||||||
|
return "408"
|
||||||
|
case 409:
|
||||||
|
return "409"
|
||||||
|
case 410:
|
||||||
|
return "410"
|
||||||
|
case 411:
|
||||||
|
return "411"
|
||||||
|
case 412:
|
||||||
|
return "412"
|
||||||
|
case 413:
|
||||||
|
return "413"
|
||||||
|
case 414:
|
||||||
|
return "414"
|
||||||
|
case 415:
|
||||||
|
return "415"
|
||||||
|
case 416:
|
||||||
|
return "416"
|
||||||
|
case 417:
|
||||||
|
return "417"
|
||||||
|
case 418:
|
||||||
|
return "418"
|
||||||
|
|
||||||
|
case 500:
|
||||||
|
return "500"
|
||||||
|
case 501:
|
||||||
|
return "501"
|
||||||
|
case 502:
|
||||||
|
return "502"
|
||||||
|
case 503:
|
||||||
|
return "503"
|
||||||
|
case 504:
|
||||||
|
return "504"
|
||||||
|
case 505:
|
||||||
|
return "505"
|
||||||
|
|
||||||
|
case 428:
|
||||||
|
return "428"
|
||||||
|
case 429:
|
||||||
|
return "429"
|
||||||
|
case 431:
|
||||||
|
return "431"
|
||||||
|
case 511:
|
||||||
|
return "511"
|
||||||
|
|
||||||
|
default:
|
||||||
|
return strconv.Itoa(s)
|
||||||
|
}
|
||||||
|
}
|
666
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
666
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
@ -15,15 +15,22 @@ package prometheus
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/prometheus/common/expfmt"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -35,13 +42,14 @@ const (
|
|||||||
// DefaultRegisterer and DefaultGatherer are the implementations of the
|
// DefaultRegisterer and DefaultGatherer are the implementations of the
|
||||||
// Registerer and Gatherer interface a number of convenience functions in this
|
// Registerer and Gatherer interface a number of convenience functions in this
|
||||||
// package act on. Initially, both variables point to the same Registry, which
|
// package act on. Initially, both variables point to the same Registry, which
|
||||||
// has a process collector (see NewProcessCollector) and a Go collector (see
|
// has a process collector (currently on Linux only, see NewProcessCollector)
|
||||||
// NewGoCollector) already registered. This approach to keep default instances
|
// and a Go collector (see NewGoCollector, in particular the note about
|
||||||
// as global state mirrors the approach of other packages in the Go standard
|
// stop-the-world implication with Go versions older than 1.9) already
|
||||||
// library. Note that there are caveats. Change the variables with caution and
|
// registered. This approach to keep default instances as global state mirrors
|
||||||
// only if you understand the consequences. Users who want to avoid global state
|
// the approach of other packages in the Go standard library. Note that there
|
||||||
// altogether should not use the convenience function and act on custom
|
// are caveats. Change the variables with caution and only if you understand the
|
||||||
// instances instead.
|
// consequences. Users who want to avoid global state altogether should not use
|
||||||
|
// the convenience functions and act on custom instances instead.
|
||||||
var (
|
var (
|
||||||
defaultRegistry = NewRegistry()
|
defaultRegistry = NewRegistry()
|
||||||
DefaultRegisterer Registerer = defaultRegistry
|
DefaultRegisterer Registerer = defaultRegistry
|
||||||
@ -49,7 +57,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
MustRegister(NewProcessCollector(os.Getpid(), ""))
|
MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
|
||||||
MustRegister(NewGoCollector())
|
MustRegister(NewGoCollector())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,7 +73,8 @@ func NewRegistry() *Registry {
|
|||||||
|
|
||||||
// NewPedanticRegistry returns a registry that checks during collection if each
|
// NewPedanticRegistry returns a registry that checks during collection if each
|
||||||
// collected Metric is consistent with its reported Desc, and if the Desc has
|
// collected Metric is consistent with its reported Desc, and if the Desc has
|
||||||
// actually been registered with the registry.
|
// actually been registered with the registry. Unchecked Collectors (those whose
|
||||||
|
// Describe methed does not yield any descriptors) are excluded from the check.
|
||||||
//
|
//
|
||||||
// Usually, a Registry will be happy as long as the union of all collected
|
// Usually, a Registry will be happy as long as the union of all collected
|
||||||
// Metrics is consistent and valid even if some metrics are not consistent with
|
// Metrics is consistent and valid even if some metrics are not consistent with
|
||||||
@ -80,7 +89,7 @@ func NewPedanticRegistry() *Registry {
|
|||||||
|
|
||||||
// Registerer is the interface for the part of a registry in charge of
|
// Registerer is the interface for the part of a registry in charge of
|
||||||
// registering and unregistering. Users of custom registries should use
|
// registering and unregistering. Users of custom registries should use
|
||||||
// Registerer as type for registration purposes (rather then the Registry type
|
// Registerer as type for registration purposes (rather than the Registry type
|
||||||
// directly). In that way, they are free to use custom Registerer implementation
|
// directly). In that way, they are free to use custom Registerer implementation
|
||||||
// (e.g. for testing purposes).
|
// (e.g. for testing purposes).
|
||||||
type Registerer interface {
|
type Registerer interface {
|
||||||
@ -95,8 +104,13 @@ type Registerer interface {
|
|||||||
// returned error is an instance of AlreadyRegisteredError, which
|
// returned error is an instance of AlreadyRegisteredError, which
|
||||||
// contains the previously registered Collector.
|
// contains the previously registered Collector.
|
||||||
//
|
//
|
||||||
// It is in general not safe to register the same Collector multiple
|
// A Collector whose Describe method does not yield any Desc is treated
|
||||||
// times concurrently.
|
// as unchecked. Registration will always succeed. No check for
|
||||||
|
// re-registering (see previous paragraph) is performed. Thus, the
|
||||||
|
// caller is responsible for not double-registering the same unchecked
|
||||||
|
// Collector, and for providing a Collector that will not cause
|
||||||
|
// inconsistent metrics on collection. (This would lead to scrape
|
||||||
|
// errors.)
|
||||||
Register(Collector) error
|
Register(Collector) error
|
||||||
// MustRegister works like Register but registers any number of
|
// MustRegister works like Register but registers any number of
|
||||||
// Collectors and panics upon the first registration that causes an
|
// Collectors and panics upon the first registration that causes an
|
||||||
@ -105,7 +119,9 @@ type Registerer interface {
|
|||||||
// Unregister unregisters the Collector that equals the Collector passed
|
// Unregister unregisters the Collector that equals the Collector passed
|
||||||
// in as an argument. (Two Collectors are considered equal if their
|
// in as an argument. (Two Collectors are considered equal if their
|
||||||
// Describe method yields the same set of descriptors.) The function
|
// Describe method yields the same set of descriptors.) The function
|
||||||
// returns whether a Collector was unregistered.
|
// returns whether a Collector was unregistered. Note that an unchecked
|
||||||
|
// Collector cannot be unregistered (as its Describe method does not
|
||||||
|
// yield any descriptor).
|
||||||
//
|
//
|
||||||
// Note that even after unregistering, it will not be possible to
|
// Note that even after unregistering, it will not be possible to
|
||||||
// register a new Collector that is inconsistent with the unregistered
|
// register a new Collector that is inconsistent with the unregistered
|
||||||
@ -123,15 +139,23 @@ type Registerer interface {
|
|||||||
type Gatherer interface {
|
type Gatherer interface {
|
||||||
// Gather calls the Collect method of the registered Collectors and then
|
// Gather calls the Collect method of the registered Collectors and then
|
||||||
// gathers the collected metrics into a lexicographically sorted slice
|
// gathers the collected metrics into a lexicographically sorted slice
|
||||||
// of MetricFamily protobufs. Even if an error occurs, Gather attempts
|
// of uniquely named MetricFamily protobufs. Gather ensures that the
|
||||||
// to gather as many metrics as possible. Hence, if a non-nil error is
|
// returned slice is valid and self-consistent so that it can be used
|
||||||
// returned, the returned MetricFamily slice could be nil (in case of a
|
// for valid exposition. As an exception to the strict consistency
|
||||||
// fatal error that prevented any meaningful metric collection) or
|
// requirements described for metric.Desc, Gather will tolerate
|
||||||
// contain a number of MetricFamily protobufs, some of which might be
|
// different sets of label names for metrics of the same metric family.
|
||||||
// incomplete, and some might be missing altogether. The returned error
|
//
|
||||||
// (which might be a MultiError) explains the details. In scenarios
|
// Even if an error occurs, Gather attempts to gather as many metrics as
|
||||||
// where complete collection is critical, the returned MetricFamily
|
// possible. Hence, if a non-nil error is returned, the returned
|
||||||
// protobufs should be disregarded if the returned error is non-nil.
|
// MetricFamily slice could be nil (in case of a fatal error that
|
||||||
|
// prevented any meaningful metric collection) or contain a number of
|
||||||
|
// MetricFamily protobufs, some of which might be incomplete, and some
|
||||||
|
// might be missing altogether. The returned error (which might be a
|
||||||
|
// MultiError) explains the details. Note that this is mostly useful for
|
||||||
|
// debugging purposes. If the gathered protobufs are to be used for
|
||||||
|
// exposition in actual monitoring, it is almost always better to not
|
||||||
|
// expose an incomplete result and instead disregard the returned
|
||||||
|
// MetricFamily protobufs in case the returned error is non-nil.
|
||||||
Gather() ([]*dto.MetricFamily, error)
|
Gather() ([]*dto.MetricFamily, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -201,6 +225,13 @@ func (errs MultiError) Error() string {
|
|||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Append appends the provided error if it is not nil.
|
||||||
|
func (errs *MultiError) Append(err error) {
|
||||||
|
if err != nil {
|
||||||
|
*errs = append(*errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
|
// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
|
||||||
// contained error as error if len(errs is 1). In all other cases, it returns
|
// contained error as error if len(errs is 1). In all other cases, it returns
|
||||||
// the MultiError directly. This is helpful for returning a MultiError in a way
|
// the MultiError directly. This is helpful for returning a MultiError in a way
|
||||||
@ -225,6 +256,7 @@ type Registry struct {
|
|||||||
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
|
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
|
||||||
descIDs map[uint64]struct{}
|
descIDs map[uint64]struct{}
|
||||||
dimHashesByName map[string]uint64
|
dimHashesByName map[string]uint64
|
||||||
|
uncheckedCollectors []Collector
|
||||||
pedanticChecksEnabled bool
|
pedanticChecksEnabled bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -242,7 +274,12 @@ func (r *Registry) Register(c Collector) error {
|
|||||||
close(descChan)
|
close(descChan)
|
||||||
}()
|
}()
|
||||||
r.mtx.Lock()
|
r.mtx.Lock()
|
||||||
defer r.mtx.Unlock()
|
defer func() {
|
||||||
|
// Drain channel in case of premature return to not leak a goroutine.
|
||||||
|
for range descChan {
|
||||||
|
}
|
||||||
|
r.mtx.Unlock()
|
||||||
|
}()
|
||||||
// Conduct various tests...
|
// Conduct various tests...
|
||||||
for desc := range descChan {
|
for desc := range descChan {
|
||||||
|
|
||||||
@ -282,9 +319,10 @@ func (r *Registry) Register(c Collector) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Did anything happen at all?
|
// A Collector yielding no Desc at all is considered unchecked.
|
||||||
if len(newDescIDs) == 0 {
|
if len(newDescIDs) == 0 {
|
||||||
return errors.New("collector has no descriptors")
|
r.uncheckedCollectors = append(r.uncheckedCollectors, c)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
if existing, exists := r.collectorsByID[collectorID]; exists {
|
if existing, exists := r.collectorsByID[collectorID]; exists {
|
||||||
return AlreadyRegisteredError{
|
return AlreadyRegisteredError{
|
||||||
@ -358,31 +396,25 @@ func (r *Registry) MustRegister(cs ...Collector) {
|
|||||||
// Gather implements Gatherer.
|
// Gather implements Gatherer.
|
||||||
func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
||||||
var (
|
var (
|
||||||
metricChan = make(chan Metric, capMetricChan)
|
checkedMetricChan = make(chan Metric, capMetricChan)
|
||||||
metricHashes = map[uint64]struct{}{}
|
uncheckedMetricChan = make(chan Metric, capMetricChan)
|
||||||
dimHashes = map[string]uint64{}
|
metricHashes = map[uint64]struct{}{}
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
errs MultiError // The collected errors to return in the end.
|
errs MultiError // The collected errors to return in the end.
|
||||||
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
|
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
|
||||||
)
|
)
|
||||||
|
|
||||||
r.mtx.RLock()
|
r.mtx.RLock()
|
||||||
|
goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
|
||||||
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
|
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
|
||||||
|
checkedCollectors := make(chan Collector, len(r.collectorsByID))
|
||||||
// Scatter.
|
uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
|
||||||
// (Collectors could be complex and slow, so we call them all at once.)
|
|
||||||
wg.Add(len(r.collectorsByID))
|
|
||||||
go func() {
|
|
||||||
wg.Wait()
|
|
||||||
close(metricChan)
|
|
||||||
}()
|
|
||||||
for _, collector := range r.collectorsByID {
|
for _, collector := range r.collectorsByID {
|
||||||
go func(collector Collector) {
|
checkedCollectors <- collector
|
||||||
defer wg.Done()
|
}
|
||||||
collector.Collect(metricChan)
|
for _, collector := range r.uncheckedCollectors {
|
||||||
}(collector)
|
uncheckedCollectors <- collector
|
||||||
}
|
}
|
||||||
|
|
||||||
// In case pedantic checks are enabled, we have to copy the map before
|
// In case pedantic checks are enabled, we have to copy the map before
|
||||||
// giving up the RLock.
|
// giving up the RLock.
|
||||||
if r.pedanticChecksEnabled {
|
if r.pedanticChecksEnabled {
|
||||||
@ -391,127 +423,258 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
|||||||
registeredDescIDs[id] = struct{}{}
|
registeredDescIDs[id] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r.mtx.RUnlock()
|
r.mtx.RUnlock()
|
||||||
|
|
||||||
// Drain metricChan in case of premature return.
|
wg.Add(goroutineBudget)
|
||||||
|
|
||||||
|
collectWorker := func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case collector := <-checkedCollectors:
|
||||||
|
collector.Collect(checkedMetricChan)
|
||||||
|
case collector := <-uncheckedCollectors:
|
||||||
|
collector.Collect(uncheckedMetricChan)
|
||||||
|
default:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start the first worker now to make sure at least one is running.
|
||||||
|
go collectWorker()
|
||||||
|
goroutineBudget--
|
||||||
|
|
||||||
|
// Close checkedMetricChan and uncheckedMetricChan once all collectors
|
||||||
|
// are collected.
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(checkedMetricChan)
|
||||||
|
close(uncheckedMetricChan)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
|
||||||
defer func() {
|
defer func() {
|
||||||
for range metricChan {
|
if checkedMetricChan != nil {
|
||||||
|
for range checkedMetricChan {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if uncheckedMetricChan != nil {
|
||||||
|
for range uncheckedMetricChan {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Gather.
|
// Copy the channel references so we can nil them out later to remove
|
||||||
for metric := range metricChan {
|
// them from the select statements below.
|
||||||
// This could be done concurrently, too, but it required locking
|
cmc := checkedMetricChan
|
||||||
// of metricFamiliesByName (and of metricHashes if checks are
|
umc := uncheckedMetricChan
|
||||||
// enabled). Most likely not worth it.
|
|
||||||
desc := metric.Desc()
|
for {
|
||||||
dtoMetric := &dto.Metric{}
|
select {
|
||||||
if err := metric.Write(dtoMetric); err != nil {
|
case metric, ok := <-cmc:
|
||||||
errs = append(errs, fmt.Errorf(
|
if !ok {
|
||||||
"error collecting metric %v: %s", desc, err,
|
cmc = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
errs.Append(processMetric(
|
||||||
|
metric, metricFamiliesByName,
|
||||||
|
metricHashes,
|
||||||
|
registeredDescIDs,
|
||||||
))
|
))
|
||||||
continue
|
case metric, ok := <-umc:
|
||||||
|
if !ok {
|
||||||
|
umc = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
errs.Append(processMetric(
|
||||||
|
metric, metricFamiliesByName,
|
||||||
|
metricHashes,
|
||||||
|
nil,
|
||||||
|
))
|
||||||
|
default:
|
||||||
|
if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
|
||||||
|
// All collectors are already being worked on or
|
||||||
|
// we have already as many goroutines started as
|
||||||
|
// there are collectors. Do the same as above,
|
||||||
|
// just without the default.
|
||||||
|
select {
|
||||||
|
case metric, ok := <-cmc:
|
||||||
|
if !ok {
|
||||||
|
cmc = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
errs.Append(processMetric(
|
||||||
|
metric, metricFamiliesByName,
|
||||||
|
metricHashes,
|
||||||
|
registeredDescIDs,
|
||||||
|
))
|
||||||
|
case metric, ok := <-umc:
|
||||||
|
if !ok {
|
||||||
|
umc = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
errs.Append(processMetric(
|
||||||
|
metric, metricFamiliesByName,
|
||||||
|
metricHashes,
|
||||||
|
nil,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Start more workers.
|
||||||
|
go collectWorker()
|
||||||
|
goroutineBudget--
|
||||||
|
runtime.Gosched()
|
||||||
}
|
}
|
||||||
metricFamily, ok := metricFamiliesByName[desc.fqName]
|
// Once both checkedMetricChan and uncheckdMetricChan are closed
|
||||||
if ok {
|
// and drained, the contraption above will nil out cmc and umc,
|
||||||
if metricFamily.GetHelp() != desc.help {
|
// and then we can leave the collect loop here.
|
||||||
errs = append(errs, fmt.Errorf(
|
if cmc == nil && umc == nil {
|
||||||
"collected metric %s %s has help %q but should have %q",
|
break
|
||||||
desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// TODO(beorn7): Simplify switch once Desc has type.
|
|
||||||
switch metricFamily.GetType() {
|
|
||||||
case dto.MetricType_COUNTER:
|
|
||||||
if dtoMetric.Counter == nil {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s should be a Counter",
|
|
||||||
desc.fqName, dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case dto.MetricType_GAUGE:
|
|
||||||
if dtoMetric.Gauge == nil {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s should be a Gauge",
|
|
||||||
desc.fqName, dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case dto.MetricType_SUMMARY:
|
|
||||||
if dtoMetric.Summary == nil {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s should be a Summary",
|
|
||||||
desc.fqName, dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case dto.MetricType_UNTYPED:
|
|
||||||
if dtoMetric.Untyped == nil {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s should be Untyped",
|
|
||||||
desc.fqName, dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case dto.MetricType_HISTOGRAM:
|
|
||||||
if dtoMetric.Histogram == nil {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s should be a Histogram",
|
|
||||||
desc.fqName, dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic("encountered MetricFamily with invalid type")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
metricFamily = &dto.MetricFamily{}
|
|
||||||
metricFamily.Name = proto.String(desc.fqName)
|
|
||||||
metricFamily.Help = proto.String(desc.help)
|
|
||||||
// TODO(beorn7): Simplify switch once Desc has type.
|
|
||||||
switch {
|
|
||||||
case dtoMetric.Gauge != nil:
|
|
||||||
metricFamily.Type = dto.MetricType_GAUGE.Enum()
|
|
||||||
case dtoMetric.Counter != nil:
|
|
||||||
metricFamily.Type = dto.MetricType_COUNTER.Enum()
|
|
||||||
case dtoMetric.Summary != nil:
|
|
||||||
metricFamily.Type = dto.MetricType_SUMMARY.Enum()
|
|
||||||
case dtoMetric.Untyped != nil:
|
|
||||||
metricFamily.Type = dto.MetricType_UNTYPED.Enum()
|
|
||||||
case dtoMetric.Histogram != nil:
|
|
||||||
metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
|
|
||||||
default:
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"empty metric collected: %s", dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
metricFamiliesByName[desc.fqName] = metricFamily
|
|
||||||
}
|
}
|
||||||
if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {
|
|
||||||
errs = append(errs, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if r.pedanticChecksEnabled {
|
|
||||||
// Is the desc registered at all?
|
|
||||||
if _, exist := registeredDescIDs[desc.id]; !exist {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s with unregistered descriptor %s",
|
|
||||||
metricFamily.GetName(), dtoMetric, desc,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
|
|
||||||
errs = append(errs, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
|
|
||||||
}
|
}
|
||||||
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
|
||||||
|
// Prometheus text format, and writes it to a temporary file. Upon success, the
|
||||||
|
// temporary file is renamed to the provided filename.
|
||||||
|
//
|
||||||
|
// This is intended for use with the textfile collector of the node exporter.
|
||||||
|
// Note that the node exporter expects the filename to be suffixed with ".prom".
|
||||||
|
func WriteToTextfile(filename string, g Gatherer) error {
|
||||||
|
tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer os.Remove(tmp.Name())
|
||||||
|
|
||||||
|
mfs, err := g.Gather()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, mf := range mfs {
|
||||||
|
if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := tmp.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Chmod(tmp.Name(), 0644); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.Rename(tmp.Name(), filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
// processMetric is an internal helper method only used by the Gather method.
|
||||||
|
func processMetric(
|
||||||
|
metric Metric,
|
||||||
|
metricFamiliesByName map[string]*dto.MetricFamily,
|
||||||
|
metricHashes map[uint64]struct{},
|
||||||
|
registeredDescIDs map[uint64]struct{},
|
||||||
|
) error {
|
||||||
|
desc := metric.Desc()
|
||||||
|
// Wrapped metrics collected by an unchecked Collector can have an
|
||||||
|
// invalid Desc.
|
||||||
|
if desc.err != nil {
|
||||||
|
return desc.err
|
||||||
|
}
|
||||||
|
dtoMetric := &dto.Metric{}
|
||||||
|
if err := metric.Write(dtoMetric); err != nil {
|
||||||
|
return fmt.Errorf("error collecting metric %v: %s", desc, err)
|
||||||
|
}
|
||||||
|
metricFamily, ok := metricFamiliesByName[desc.fqName]
|
||||||
|
if ok { // Existing name.
|
||||||
|
if metricFamily.GetHelp() != desc.help {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %s %s has help %q but should have %q",
|
||||||
|
desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
// TODO(beorn7): Simplify switch once Desc has type.
|
||||||
|
switch metricFamily.GetType() {
|
||||||
|
case dto.MetricType_COUNTER:
|
||||||
|
if dtoMetric.Counter == nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %s %s should be a Counter",
|
||||||
|
desc.fqName, dtoMetric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
case dto.MetricType_GAUGE:
|
||||||
|
if dtoMetric.Gauge == nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %s %s should be a Gauge",
|
||||||
|
desc.fqName, dtoMetric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
case dto.MetricType_SUMMARY:
|
||||||
|
if dtoMetric.Summary == nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %s %s should be a Summary",
|
||||||
|
desc.fqName, dtoMetric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
case dto.MetricType_UNTYPED:
|
||||||
|
if dtoMetric.Untyped == nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %s %s should be Untyped",
|
||||||
|
desc.fqName, dtoMetric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
if dtoMetric.Histogram == nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %s %s should be a Histogram",
|
||||||
|
desc.fqName, dtoMetric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("encountered MetricFamily with invalid type")
|
||||||
|
}
|
||||||
|
} else { // New name.
|
||||||
|
metricFamily = &dto.MetricFamily{}
|
||||||
|
metricFamily.Name = proto.String(desc.fqName)
|
||||||
|
metricFamily.Help = proto.String(desc.help)
|
||||||
|
// TODO(beorn7): Simplify switch once Desc has type.
|
||||||
|
switch {
|
||||||
|
case dtoMetric.Gauge != nil:
|
||||||
|
metricFamily.Type = dto.MetricType_GAUGE.Enum()
|
||||||
|
case dtoMetric.Counter != nil:
|
||||||
|
metricFamily.Type = dto.MetricType_COUNTER.Enum()
|
||||||
|
case dtoMetric.Summary != nil:
|
||||||
|
metricFamily.Type = dto.MetricType_SUMMARY.Enum()
|
||||||
|
case dtoMetric.Untyped != nil:
|
||||||
|
metricFamily.Type = dto.MetricType_UNTYPED.Enum()
|
||||||
|
case dtoMetric.Histogram != nil:
|
||||||
|
metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("empty metric collected: %s", dtoMetric)
|
||||||
|
}
|
||||||
|
if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
metricFamiliesByName[desc.fqName] = metricFamily
|
||||||
|
}
|
||||||
|
if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if registeredDescIDs != nil {
|
||||||
|
// Is the desc registered at all?
|
||||||
|
if _, exist := registeredDescIDs[desc.id]; !exist {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %s %s with unregistered descriptor %s",
|
||||||
|
metricFamily.GetName(), dtoMetric, desc,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gatherers is a slice of Gatherer instances that implements the Gatherer
|
// Gatherers is a slice of Gatherer instances that implements the Gatherer
|
||||||
@ -537,7 +700,6 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
|||||||
var (
|
var (
|
||||||
metricFamiliesByName = map[string]*dto.MetricFamily{}
|
metricFamiliesByName = map[string]*dto.MetricFamily{}
|
||||||
metricHashes = map[uint64]struct{}{}
|
metricHashes = map[uint64]struct{}{}
|
||||||
dimHashes = map[string]uint64{}
|
|
||||||
errs MultiError // The collected errors to return in the end.
|
errs MultiError // The collected errors to return in the end.
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -574,10 +736,14 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
|||||||
existingMF.Name = mf.Name
|
existingMF.Name = mf.Name
|
||||||
existingMF.Help = mf.Help
|
existingMF.Help = mf.Help
|
||||||
existingMF.Type = mf.Type
|
existingMF.Type = mf.Type
|
||||||
|
if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
metricFamiliesByName[mf.GetName()] = existingMF
|
metricFamiliesByName[mf.GetName()] = existingMF
|
||||||
}
|
}
|
||||||
for _, m := range mf.Metric {
|
for _, m := range mf.Metric {
|
||||||
if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {
|
if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -585,88 +751,80 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
// metricSorter is a sortable slice of *dto.Metric.
|
// checkSuffixCollisions checks for collisions with the “magic” suffixes the
|
||||||
type metricSorter []*dto.Metric
|
// Prometheus text format and the internal metric representation of the
|
||||||
|
// Prometheus server add while flattening Summaries and Histograms.
|
||||||
func (s metricSorter) Len() int {
|
func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
|
||||||
return len(s)
|
var (
|
||||||
}
|
newName = mf.GetName()
|
||||||
|
newType = mf.GetType()
|
||||||
func (s metricSorter) Swap(i, j int) {
|
newNameWithoutSuffix = ""
|
||||||
s[i], s[j] = s[j], s[i]
|
)
|
||||||
}
|
switch {
|
||||||
|
case strings.HasSuffix(newName, "_count"):
|
||||||
func (s metricSorter) Less(i, j int) bool {
|
newNameWithoutSuffix = newName[:len(newName)-6]
|
||||||
if len(s[i].Label) != len(s[j].Label) {
|
case strings.HasSuffix(newName, "_sum"):
|
||||||
// This should not happen. The metrics are
|
newNameWithoutSuffix = newName[:len(newName)-4]
|
||||||
// inconsistent. However, we have to deal with the fact, as
|
case strings.HasSuffix(newName, "_bucket"):
|
||||||
// people might use custom collectors or metric family injection
|
newNameWithoutSuffix = newName[:len(newName)-7]
|
||||||
// to create inconsistent metrics. So let's simply compare the
|
|
||||||
// number of labels in this case. That will still yield
|
|
||||||
// reproducible sorting.
|
|
||||||
return len(s[i].Label) < len(s[j].Label)
|
|
||||||
}
|
}
|
||||||
for n, lp := range s[i].Label {
|
if newNameWithoutSuffix != "" {
|
||||||
vi := lp.GetValue()
|
if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
|
||||||
vj := s[j].Label[n].GetValue()
|
switch existingMF.GetType() {
|
||||||
if vi != vj {
|
case dto.MetricType_SUMMARY:
|
||||||
return vi < vj
|
if !strings.HasSuffix(newName, "_bucket") {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric named %q collides with previously collected summary named %q",
|
||||||
|
newName, newNameWithoutSuffix,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric named %q collides with previously collected histogram named %q",
|
||||||
|
newName, newNameWithoutSuffix,
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
|
||||||
// We should never arrive here. Multiple metrics with the same
|
if _, ok := mfs[newName+"_count"]; ok {
|
||||||
// label set in the same scrape will lead to undefined ingestion
|
return fmt.Errorf(
|
||||||
// behavior. However, as above, we have to provide stable sorting
|
"collected histogram or summary named %q collides with previously collected metric named %q",
|
||||||
// here, even for inconsistent metrics. So sort equal metrics
|
newName, newName+"_count",
|
||||||
// by their timestamp, with missing timestamps (implying "now")
|
)
|
||||||
// coming last.
|
}
|
||||||
if s[i].TimestampMs == nil {
|
if _, ok := mfs[newName+"_sum"]; ok {
|
||||||
return false
|
return fmt.Errorf(
|
||||||
}
|
"collected histogram or summary named %q collides with previously collected metric named %q",
|
||||||
if s[j].TimestampMs == nil {
|
newName, newName+"_sum",
|
||||||
return true
|
)
|
||||||
}
|
|
||||||
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
|
|
||||||
}
|
|
||||||
|
|
||||||
// normalizeMetricFamilies returns a MetricFamily slice with empty
|
|
||||||
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
|
|
||||||
// the slice, with the contained Metrics sorted within each MetricFamily.
|
|
||||||
func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
|
|
||||||
for _, mf := range metricFamiliesByName {
|
|
||||||
sort.Sort(metricSorter(mf.Metric))
|
|
||||||
}
|
|
||||||
names := make([]string, 0, len(metricFamiliesByName))
|
|
||||||
for name, mf := range metricFamiliesByName {
|
|
||||||
if len(mf.Metric) > 0 {
|
|
||||||
names = append(names, name)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sort.Strings(names)
|
if newType == dto.MetricType_HISTOGRAM {
|
||||||
result := make([]*dto.MetricFamily, 0, len(names))
|
if _, ok := mfs[newName+"_bucket"]; ok {
|
||||||
for _, name := range names {
|
return fmt.Errorf(
|
||||||
result = append(result, metricFamiliesByName[name])
|
"collected histogram named %q collides with previously collected metric named %q",
|
||||||
|
newName, newName+"_bucket",
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return result
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkMetricConsistency checks if the provided Metric is consistent with the
|
// checkMetricConsistency checks if the provided Metric is consistent with the
|
||||||
// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
|
// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
|
||||||
// name. If the resulting hash is alread in the provided metricHashes, an error
|
// name. If the resulting hash is already in the provided metricHashes, an error
|
||||||
// is returned. If not, it is added to metricHashes. The provided dimHashes maps
|
// is returned. If not, it is added to metricHashes.
|
||||||
// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
|
|
||||||
// doesn't yet contain a hash for the provided MetricFamily, it is
|
|
||||||
// added. Otherwise, an error is returned if the existing dimHashes in not equal
|
|
||||||
// the calculated dimHash.
|
|
||||||
func checkMetricConsistency(
|
func checkMetricConsistency(
|
||||||
metricFamily *dto.MetricFamily,
|
metricFamily *dto.MetricFamily,
|
||||||
dtoMetric *dto.Metric,
|
dtoMetric *dto.Metric,
|
||||||
metricHashes map[uint64]struct{},
|
metricHashes map[uint64]struct{},
|
||||||
dimHashes map[string]uint64,
|
|
||||||
) error {
|
) error {
|
||||||
|
name := metricFamily.GetName()
|
||||||
|
|
||||||
// Type consistency with metric family.
|
// Type consistency with metric family.
|
||||||
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
|
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
|
||||||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
|
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
|
||||||
@ -674,41 +832,59 @@ func checkMetricConsistency(
|
|||||||
metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
|
metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
|
||||||
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
|
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"collected metric %s %s is not a %s",
|
"collected metric %q { %s} is not a %s",
|
||||||
metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
|
name, dtoMetric, metricFamily.GetType(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
|
previousLabelName := ""
|
||||||
|
for _, labelPair := range dtoMetric.GetLabel() {
|
||||||
|
labelName := labelPair.GetName()
|
||||||
|
if labelName == previousLabelName {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %q { %s} has two or more labels with the same name: %s",
|
||||||
|
name, dtoMetric, labelName,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if !checkLabelName(labelName) {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %q { %s} has a label with an invalid name: %s",
|
||||||
|
name, dtoMetric, labelName,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if dtoMetric.Summary != nil && labelName == quantileLabel {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %q { %s} must not have an explicit %q label",
|
||||||
|
name, dtoMetric, quantileLabel,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if !utf8.ValidString(labelPair.GetValue()) {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
|
||||||
|
name, dtoMetric, labelName, labelPair.GetValue())
|
||||||
|
}
|
||||||
|
previousLabelName = labelName
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is the metric unique (i.e. no other metric with the same name and the same labels)?
|
||||||
h := hashNew()
|
h := hashNew()
|
||||||
h = hashAdd(h, metricFamily.GetName())
|
h = hashAdd(h, name)
|
||||||
h = hashAddByte(h, separatorByte)
|
h = hashAddByte(h, separatorByte)
|
||||||
dh := hashNew()
|
|
||||||
// Make sure label pairs are sorted. We depend on it for the consistency
|
// Make sure label pairs are sorted. We depend on it for the consistency
|
||||||
// check.
|
// check.
|
||||||
sort.Sort(LabelPairSorter(dtoMetric.Label))
|
sort.Sort(labelPairSorter(dtoMetric.Label))
|
||||||
for _, lp := range dtoMetric.Label {
|
for _, lp := range dtoMetric.Label {
|
||||||
|
h = hashAdd(h, lp.GetName())
|
||||||
|
h = hashAddByte(h, separatorByte)
|
||||||
h = hashAdd(h, lp.GetValue())
|
h = hashAdd(h, lp.GetValue())
|
||||||
h = hashAddByte(h, separatorByte)
|
h = hashAddByte(h, separatorByte)
|
||||||
dh = hashAdd(dh, lp.GetName())
|
|
||||||
dh = hashAddByte(dh, separatorByte)
|
|
||||||
}
|
}
|
||||||
if _, exists := metricHashes[h]; exists {
|
if _, exists := metricHashes[h]; exists {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"collected metric %s %s was collected before with the same name and label values",
|
"collected metric %q { %s} was collected before with the same name and label values",
|
||||||
metricFamily.GetName(), dtoMetric,
|
name, dtoMetric,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
|
|
||||||
if dimHash != dh {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
|
|
||||||
metricFamily.GetName(), dtoMetric,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
dimHashes[metricFamily.GetName()] = dh
|
|
||||||
}
|
|
||||||
metricHashes[h] = struct{}{}
|
metricHashes[h] = struct{}{}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -740,7 +916,7 @@ func checkDescConsistency(
|
|||||||
metricFamily.GetName(), dtoMetric, desc,
|
metricFamily.GetName(), dtoMetric, desc,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
sort.Sort(LabelPairSorter(lpsFromDesc))
|
sort.Sort(labelPairSorter(lpsFromDesc))
|
||||||
for i, lpFromDesc := range lpsFromDesc {
|
for i, lpFromDesc := range lpsFromDesc {
|
||||||
lpFromMetric := dtoMetric.Label[i]
|
lpFromMetric := dtoMetric.Label[i]
|
||||||
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
|
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
|
||||||
|
177
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
177
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
@ -36,7 +36,10 @@ const quantileLabel = "quantile"
|
|||||||
//
|
//
|
||||||
// A typical use-case is the observation of request latencies. By default, a
|
// A typical use-case is the observation of request latencies. By default, a
|
||||||
// Summary provides the median, the 90th and the 99th percentile of the latency
|
// Summary provides the median, the 90th and the 99th percentile of the latency
|
||||||
// as rank estimations.
|
// as rank estimations. However, the default behavior will change in the
|
||||||
|
// upcoming v0.10 of the library. There will be no rank estimations at all by
|
||||||
|
// default. For a sane transition, it is recommended to set the desired rank
|
||||||
|
// estimations explicitly.
|
||||||
//
|
//
|
||||||
// Note that the rank estimations cannot be aggregated in a meaningful way with
|
// Note that the rank estimations cannot be aggregated in a meaningful way with
|
||||||
// the Prometheus query language (i.e. you cannot average or add them). If you
|
// the Prometheus query language (i.e. you cannot average or add them). If you
|
||||||
@ -78,8 +81,10 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// SummaryOpts bundles the options for creating a Summary metric. It is
|
// SummaryOpts bundles the options for creating a Summary metric. It is
|
||||||
// mandatory to set Name and Help to a non-empty string. All other fields are
|
// mandatory to set Name to a non-empty string. While all other fields are
|
||||||
// optional and can safely be left at their zero value.
|
// optional and can safely be left at their zero value, it is recommended to set
|
||||||
|
// a help string and to explicitly set the Objectives field to the desired value
|
||||||
|
// as the default value will change in the upcoming v0.10 of the library.
|
||||||
type SummaryOpts struct {
|
type SummaryOpts struct {
|
||||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||||
// name of the Summary (created by joining these components with
|
// name of the Summary (created by joining these components with
|
||||||
@ -90,29 +95,27 @@ type SummaryOpts struct {
|
|||||||
Subsystem string
|
Subsystem string
|
||||||
Name string
|
Name string
|
||||||
|
|
||||||
// Help provides information about this Summary. Mandatory!
|
// Help provides information about this Summary.
|
||||||
//
|
//
|
||||||
// Metrics with the same fully-qualified name must have the same Help
|
// Metrics with the same fully-qualified name must have the same Help
|
||||||
// string.
|
// string.
|
||||||
Help string
|
Help string
|
||||||
|
|
||||||
// ConstLabels are used to attach fixed labels to this
|
// ConstLabels are used to attach fixed labels to this metric. Metrics
|
||||||
// Summary. Summaries with the same fully-qualified name must have the
|
// with the same fully-qualified name must have the same label names in
|
||||||
// same label names in their ConstLabels.
|
// their ConstLabels.
|
||||||
//
|
//
|
||||||
// Note that in most cases, labels have a value that varies during the
|
// Due to the way a Summary is represented in the Prometheus text format
|
||||||
// lifetime of a process. Those labels are usually managed with a
|
// and how it is handled by the Prometheus server internally, “quantile”
|
||||||
// SummaryVec. ConstLabels serve only special purposes. One is for the
|
// is an illegal label name. Construction of a Summary or SummaryVec
|
||||||
// special case where the value of a label does not change during the
|
// will panic if this label name is used in ConstLabels.
|
||||||
// lifetime of a process, e.g. if the revision of the running binary is
|
|
||||||
// put into a label. Another, more advanced purpose is if more than one
|
|
||||||
// Collector needs to collect Summaries with the same fully-qualified
|
|
||||||
// name. In that case, those Summaries must differ in the values of
|
|
||||||
// their ConstLabels. See the Collector examples.
|
|
||||||
//
|
//
|
||||||
// If the value of a label never changes (not even between binaries),
|
// ConstLabels are only used rarely. In particular, do not use them to
|
||||||
// that label most likely should not be a label at all (but part of the
|
// attach the same labels to all your metrics. Those use cases are
|
||||||
// metric name).
|
// better covered by target labels set by the scraping Prometheus
|
||||||
|
// server, or by one specific metric (e.g. a build_info or a
|
||||||
|
// machine_role metric). See also
|
||||||
|
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
|
||||||
ConstLabels Labels
|
ConstLabels Labels
|
||||||
|
|
||||||
// Objectives defines the quantile rank estimates with their respective
|
// Objectives defines the quantile rank estimates with their respective
|
||||||
@ -178,7 +181,7 @@ func NewSummary(opts SummaryOpts) Summary {
|
|||||||
|
|
||||||
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
if len(desc.variableLabels) != len(labelValues) {
|
||||||
panic(errInconsistentCardinality)
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, n := range desc.variableLabels {
|
for _, n := range desc.variableLabels {
|
||||||
@ -399,13 +402,21 @@ func (s quantSort) Less(i, j int) bool {
|
|||||||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
||||||
// instances with NewSummaryVec.
|
// instances with NewSummaryVec.
|
||||||
type SummaryVec struct {
|
type SummaryVec struct {
|
||||||
*MetricVec
|
*metricVec
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
||||||
// partitioned by the given label names. At least one label name must be
|
// partitioned by the given label names.
|
||||||
// provided.
|
//
|
||||||
|
// Due to the way a Summary is represented in the Prometheus text format and how
|
||||||
|
// it is handled by the Prometheus server internally, “quantile” is an illegal
|
||||||
|
// label name. NewSummaryVec will panic if this label name is used.
|
||||||
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
||||||
|
for _, ln := range labelNames {
|
||||||
|
if ln == quantileLabel {
|
||||||
|
panic(errQuantileLabelNotAllowed)
|
||||||
|
}
|
||||||
|
}
|
||||||
desc := NewDesc(
|
desc := NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
opts.Help,
|
opts.Help,
|
||||||
@ -413,47 +424,116 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
|||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &SummaryVec{
|
return &SummaryVec{
|
||||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||||
return newSummary(desc, opts, lvs...)
|
return newSummary(desc, opts, lvs...)
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWithLabelValues replaces the method of the same name in
|
// GetMetricWithLabelValues returns the Summary for the given slice of label
|
||||||
// MetricVec. The difference is that this method returns a Summary and not a
|
// values (same order as the VariableLabels in Desc). If that combination of
|
||||||
// Metric so that no type conversion is required.
|
// label values is accessed for the first time, a new Summary is created.
|
||||||
func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
|
//
|
||||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
// It is possible to call this method without using the returned Summary to only
|
||||||
|
// create the new Summary but leave it at its starting value, a Summary without
|
||||||
|
// any observations.
|
||||||
|
//
|
||||||
|
// Keeping the Summary for later use is possible (and should be considered if
|
||||||
|
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||||
|
// Delete can be used to delete the Summary from the SummaryVec. In that case,
|
||||||
|
// the Summary will still exist, but it will not be exported anymore, even if a
|
||||||
|
// Summary with the same label values is created later. See also the CounterVec
|
||||||
|
// example.
|
||||||
|
//
|
||||||
|
// An error is returned if the number of label values is not the same as the
|
||||||
|
// number of VariableLabels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// Note that for more than one label value, this method is prone to mistakes
|
||||||
|
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||||
|
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||||
|
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||||
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
|
// See also the GaugeVec example.
|
||||||
|
func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
|
||||||
|
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
|
||||||
if metric != nil {
|
if metric != nil {
|
||||||
return metric.(Summary), err
|
return metric.(Observer), err
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
// GetMetricWith returns the Summary for the given Labels map (the label names
|
||||||
// difference is that this method returns a Summary and not a Metric so that no
|
// must match those of the VariableLabels in Desc). If that label map is
|
||||||
// type conversion is required.
|
// accessed for the first time, a new Summary is created. Implications of
|
||||||
func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
|
// creating a Summary without using it and keeping the Summary for later use are
|
||||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
// the same as for GetMetricWithLabelValues.
|
||||||
|
//
|
||||||
|
// An error is returned if the number and names of the Labels are inconsistent
|
||||||
|
// with those of the VariableLabels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// This method is used for the same purpose as
|
||||||
|
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||||
|
// methods.
|
||||||
|
func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
|
||||||
|
metric, err := v.metricVec.getMetricWith(labels)
|
||||||
if metric != nil {
|
if metric != nil {
|
||||||
return metric.(Summary), err
|
return metric.(Observer), err
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||||
// error, WithLabelValues allows shortcuts like
|
// error allows shortcuts like
|
||||||
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
||||||
func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
|
func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
|
||||||
return m.MetricVec.WithLabelValues(lvs...).(Summary)
|
s, err := v.GetMetricWithLabelValues(lvs...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||||
// returned an error. By not returning an error, With allows shortcuts like
|
// returned an error. Not returning an error allows shortcuts like
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
||||||
func (m *SummaryVec) With(labels Labels) Summary {
|
func (v *SummaryVec) With(labels Labels) Observer {
|
||||||
return m.MetricVec.With(labels).(Summary)
|
s, err := v.GetMetricWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurryWith returns a vector curried with the provided labels, i.e. the
|
||||||
|
// returned vector has those labels pre-set for all labeled operations performed
|
||||||
|
// on it. The cardinality of the curried vector is reduced accordingly. The
|
||||||
|
// order of the remaining labels stays the same (just with the curried labels
|
||||||
|
// taken out of the sequence – which is relevant for the
|
||||||
|
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
|
||||||
|
// vector, but only with labels not yet used for currying before.
|
||||||
|
//
|
||||||
|
// The metrics contained in the SummaryVec are shared between the curried and
|
||||||
|
// uncurried vectors. They are just accessed differently. Curried and uncurried
|
||||||
|
// vectors behave identically in terms of collection. Only one must be
|
||||||
|
// registered with a given registry (usually the uncurried version). The Reset
|
||||||
|
// method deletes all metrics, even if called on a curried vector.
|
||||||
|
func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
|
||||||
|
vec, err := v.curryWith(labels)
|
||||||
|
if vec != nil {
|
||||||
|
return &SummaryVec{vec}, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustCurryWith works as CurryWith but panics where CurryWith would have
|
||||||
|
// returned an error.
|
||||||
|
func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
|
||||||
|
vec, err := v.CurryWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return vec
|
||||||
}
|
}
|
||||||
|
|
||||||
type constSummary struct {
|
type constSummary struct {
|
||||||
@ -506,7 +586,7 @@ func (s *constSummary) Write(out *dto.Metric) error {
|
|||||||
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
|
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
|
||||||
//
|
//
|
||||||
// NewConstSummary returns an error if the length of labelValues is not
|
// NewConstSummary returns an error if the length of labelValues is not
|
||||||
// consistent with the variable labels in Desc.
|
// consistent with the variable labels in Desc or if Desc is invalid.
|
||||||
func NewConstSummary(
|
func NewConstSummary(
|
||||||
desc *Desc,
|
desc *Desc,
|
||||||
count uint64,
|
count uint64,
|
||||||
@ -514,8 +594,11 @@ func NewConstSummary(
|
|||||||
quantiles map[float64]float64,
|
quantiles map[float64]float64,
|
||||||
labelValues ...string,
|
labelValues ...string,
|
||||||
) (Metric, error) {
|
) (Metric, error) {
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
if desc.err != nil {
|
||||||
return nil, errInconsistentCardinality
|
return nil, desc.err
|
||||||
|
}
|
||||||
|
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
return &constSummary{
|
return &constSummary{
|
||||||
desc: desc,
|
desc: desc,
|
||||||
|
29
vendor/github.com/prometheus/client_golang/prometheus/timer.go
generated
vendored
29
vendor/github.com/prometheus/client_golang/prometheus/timer.go
generated
vendored
@ -15,32 +15,6 @@ package prometheus
|
|||||||
|
|
||||||
import "time"
|
import "time"
|
||||||
|
|
||||||
// Observer is the interface that wraps the Observe method, which is used by
|
|
||||||
// Histogram and Summary to add observations.
|
|
||||||
type Observer interface {
|
|
||||||
Observe(float64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The ObserverFunc type is an adapter to allow the use of ordinary
|
|
||||||
// functions as Observers. If f is a function with the appropriate
|
|
||||||
// signature, ObserverFunc(f) is an Observer that calls f.
|
|
||||||
//
|
|
||||||
// This adapter is usually used in connection with the Timer type, and there are
|
|
||||||
// two general use cases:
|
|
||||||
//
|
|
||||||
// The most common one is to use a Gauge as the Observer for a Timer.
|
|
||||||
// See the "Gauge" Timer example.
|
|
||||||
//
|
|
||||||
// The more advanced use case is to create a function that dynamically decides
|
|
||||||
// which Observer to use for observing the duration. See the "Complex" Timer
|
|
||||||
// example.
|
|
||||||
type ObserverFunc func(float64)
|
|
||||||
|
|
||||||
// Observe calls f(value). It implements Observer.
|
|
||||||
func (f ObserverFunc) Observe(value float64) {
|
|
||||||
f(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Timer is a helper type to time functions. Use NewTimer to create new
|
// Timer is a helper type to time functions. Use NewTimer to create new
|
||||||
// instances.
|
// instances.
|
||||||
type Timer struct {
|
type Timer struct {
|
||||||
@ -67,6 +41,9 @@ func NewTimer(o Observer) *Timer {
|
|||||||
// NewTimer. It calls the Observe method of the Observer provided during
|
// NewTimer. It calls the Observe method of the Observer provided during
|
||||||
// construction with the duration in seconds as an argument. ObserveDuration is
|
// construction with the duration in seconds as an argument. ObserveDuration is
|
||||||
// usually called with a defer statement.
|
// usually called with a defer statement.
|
||||||
|
//
|
||||||
|
// Note that this method is only guaranteed to never observe negative durations
|
||||||
|
// if used with Go1.9+.
|
||||||
func (t *Timer) ObserveDuration() {
|
func (t *Timer) ObserveDuration() {
|
||||||
if t.observer != nil {
|
if t.observer != nil {
|
||||||
t.observer.Observe(time.Since(t.begin).Seconds())
|
t.observer.Observe(time.Since(t.begin).Seconds())
|
||||||
|
107
vendor/github.com/prometheus/client_golang/prometheus/untyped.go
generated
vendored
107
vendor/github.com/prometheus/client_golang/prometheus/untyped.go
generated
vendored
@ -13,113 +13,12 @@
|
|||||||
|
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
// Untyped is a Metric that represents a single numerical value that can
|
|
||||||
// arbitrarily go up and down.
|
|
||||||
//
|
|
||||||
// An Untyped metric works the same as a Gauge. The only difference is that to
|
|
||||||
// no type information is implied.
|
|
||||||
//
|
|
||||||
// To create Untyped instances, use NewUntyped.
|
|
||||||
//
|
|
||||||
// Deprecated: The Untyped type is deprecated because it doesn't make sense in
|
|
||||||
// direct instrumentation. If you need to mirror an external metric of unknown
|
|
||||||
// type (usually while writing exporters), Use MustNewConstMetric to create an
|
|
||||||
// untyped metric instance on the fly.
|
|
||||||
type Untyped interface {
|
|
||||||
Metric
|
|
||||||
Collector
|
|
||||||
|
|
||||||
// Set sets the Untyped metric to an arbitrary value.
|
|
||||||
Set(float64)
|
|
||||||
// Inc increments the Untyped metric by 1.
|
|
||||||
Inc()
|
|
||||||
// Dec decrements the Untyped metric by 1.
|
|
||||||
Dec()
|
|
||||||
// Add adds the given value to the Untyped metric. (The value can be
|
|
||||||
// negative, resulting in a decrease.)
|
|
||||||
Add(float64)
|
|
||||||
// Sub subtracts the given value from the Untyped metric. (The value can
|
|
||||||
// be negative, resulting in an increase.)
|
|
||||||
Sub(float64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UntypedOpts is an alias for Opts. See there for doc comments.
|
// UntypedOpts is an alias for Opts. See there for doc comments.
|
||||||
type UntypedOpts Opts
|
type UntypedOpts Opts
|
||||||
|
|
||||||
// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
|
// UntypedFunc works like GaugeFunc but the collected metric is of type
|
||||||
func NewUntyped(opts UntypedOpts) Untyped {
|
// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
|
||||||
return newValue(NewDesc(
|
// type.
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
nil,
|
|
||||||
opts.ConstLabels,
|
|
||||||
), UntypedValue, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UntypedVec is a Collector that bundles a set of Untyped metrics that all
|
|
||||||
// share the same Desc, but have different values for their variable
|
|
||||||
// labels. This is used if you want to count the same thing partitioned by
|
|
||||||
// various dimensions. Create instances with NewUntypedVec.
|
|
||||||
type UntypedVec struct {
|
|
||||||
*MetricVec
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
|
|
||||||
// partitioned by the given label names. At least one label name must be
|
|
||||||
// provided.
|
|
||||||
func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
|
|
||||||
desc := NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
labelNames,
|
|
||||||
opts.ConstLabels,
|
|
||||||
)
|
|
||||||
return &UntypedVec{
|
|
||||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
|
||||||
return newValue(desc, UntypedValue, 0, lvs...)
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWithLabelValues replaces the method of the same name in
|
|
||||||
// MetricVec. The difference is that this method returns an Untyped and not a
|
|
||||||
// Metric so that no type conversion is required.
|
|
||||||
func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
|
|
||||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
|
||||||
if metric != nil {
|
|
||||||
return metric.(Untyped), err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
|
||||||
// difference is that this method returns an Untyped and not a Metric so that no
|
|
||||||
// type conversion is required.
|
|
||||||
func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
|
|
||||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
|
||||||
if metric != nil {
|
|
||||||
return metric.(Untyped), err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
|
||||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
|
||||||
// error, WithLabelValues allows shortcuts like
|
|
||||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
|
||||||
func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
|
|
||||||
return m.MetricVec.WithLabelValues(lvs...).(Untyped)
|
|
||||||
}
|
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
|
||||||
// returned an error. By not returning an error, With allows shortcuts like
|
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
|
||||||
func (m *UntypedVec) With(labels Labels) Untyped {
|
|
||||||
return m.MetricVec.With(labels).(Untyped)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UntypedFunc is an Untyped whose value is determined at collect time by
|
|
||||||
// calling a provided function.
|
|
||||||
//
|
//
|
||||||
// To create UntypedFunc instances, use NewUntypedFunc.
|
// To create UntypedFunc instances, use NewUntypedFunc.
|
||||||
type UntypedFunc interface {
|
type UntypedFunc interface {
|
||||||
|
99
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
99
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
@ -14,16 +14,12 @@
|
|||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ValueType is an enumeration of metric types that represent a simple value.
|
// ValueType is an enumeration of metric types that represent a simple value.
|
||||||
@ -37,81 +33,6 @@ const (
|
|||||||
UntypedValue
|
UntypedValue
|
||||||
)
|
)
|
||||||
|
|
||||||
var errInconsistentCardinality = errors.New("inconsistent label cardinality")
|
|
||||||
|
|
||||||
// value is a generic metric for simple values. It implements Metric, Collector,
|
|
||||||
// Counter, Gauge, and Untyped. Its effective type is determined by
|
|
||||||
// ValueType. This is a low-level building block used by the library to back the
|
|
||||||
// implementations of Counter, Gauge, and Untyped.
|
|
||||||
type value struct {
|
|
||||||
// valBits contains the bits of the represented float64 value. It has
|
|
||||||
// to go first in the struct to guarantee alignment for atomic
|
|
||||||
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
|
||||||
valBits uint64
|
|
||||||
|
|
||||||
selfCollector
|
|
||||||
|
|
||||||
desc *Desc
|
|
||||||
valType ValueType
|
|
||||||
labelPairs []*dto.LabelPair
|
|
||||||
}
|
|
||||||
|
|
||||||
// newValue returns a newly allocated value with the given Desc, ValueType,
|
|
||||||
// sample value and label values. It panics if the number of label
|
|
||||||
// values is different from the number of variable labels in Desc.
|
|
||||||
func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
|
|
||||||
if len(labelValues) != len(desc.variableLabels) {
|
|
||||||
panic(errInconsistentCardinality)
|
|
||||||
}
|
|
||||||
result := &value{
|
|
||||||
desc: desc,
|
|
||||||
valType: valueType,
|
|
||||||
valBits: math.Float64bits(val),
|
|
||||||
labelPairs: makeLabelPairs(desc, labelValues),
|
|
||||||
}
|
|
||||||
result.init(result)
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Desc() *Desc {
|
|
||||||
return v.desc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Set(val float64) {
|
|
||||||
atomic.StoreUint64(&v.valBits, math.Float64bits(val))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) SetToCurrentTime() {
|
|
||||||
v.Set(float64(time.Now().UnixNano()) / 1e9)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Inc() {
|
|
||||||
v.Add(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Dec() {
|
|
||||||
v.Add(-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Add(val float64) {
|
|
||||||
for {
|
|
||||||
oldBits := atomic.LoadUint64(&v.valBits)
|
|
||||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
|
|
||||||
if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Sub(val float64) {
|
|
||||||
v.Add(val * -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Write(out *dto.Metric) error {
|
|
||||||
val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
|
|
||||||
return populateMetric(v.valType, val, v.labelPairs, out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// valueFunc is a generic metric for simple values retrieved on collect time
|
// valueFunc is a generic metric for simple values retrieved on collect time
|
||||||
// from a function. It implements Metric and Collector. Its effective type is
|
// from a function. It implements Metric and Collector. Its effective type is
|
||||||
// determined by ValueType. This is a low-level building block used by the
|
// determined by ValueType. This is a low-level building block used by the
|
||||||
@ -156,10 +77,14 @@ func (v *valueFunc) Write(out *dto.Metric) error {
|
|||||||
// operations. However, when implementing custom Collectors, it is useful as a
|
// operations. However, when implementing custom Collectors, it is useful as a
|
||||||
// throw-away metric that is generated on the fly to send it to Prometheus in
|
// throw-away metric that is generated on the fly to send it to Prometheus in
|
||||||
// the Collect method. NewConstMetric returns an error if the length of
|
// the Collect method. NewConstMetric returns an error if the length of
|
||||||
// labelValues is not consistent with the variable labels in Desc.
|
// labelValues is not consistent with the variable labels in Desc or if Desc is
|
||||||
|
// invalid.
|
||||||
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
|
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
if desc.err != nil {
|
||||||
return nil, errInconsistentCardinality
|
return nil, desc.err
|
||||||
|
}
|
||||||
|
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
return &constMetric{
|
return &constMetric{
|
||||||
desc: desc,
|
desc: desc,
|
||||||
@ -231,9 +156,7 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
|
|||||||
Value: proto.String(labelValues[i]),
|
Value: proto.String(labelValues[i]),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
for _, lp := range desc.constLabelPairs {
|
labelPairs = append(labelPairs, desc.constLabelPairs...)
|
||||||
labelPairs = append(labelPairs, lp)
|
sort.Sort(labelPairSorter(labelPairs))
|
||||||
}
|
|
||||||
sort.Sort(LabelPairSorter(labelPairs))
|
|
||||||
return labelPairs
|
return labelPairs
|
||||||
}
|
}
|
||||||
|
516
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
516
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
@ -20,33 +20,180 @@ import (
|
|||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MetricVec is a Collector to bundle metrics of the same name that
|
// metricVec is a Collector to bundle metrics of the same name that differ in
|
||||||
// differ in their label values. MetricVec is usually not used directly but as a
|
// their label values. metricVec is not used directly (and therefore
|
||||||
// building block for implementations of vectors of a given metric
|
// unexported). It is used as a building block for implementations of vectors of
|
||||||
// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
|
// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
|
||||||
// provided in this package.
|
// It also handles label currying. It uses basicMetricVec internally.
|
||||||
type MetricVec struct {
|
type metricVec struct {
|
||||||
mtx sync.RWMutex // Protects the children.
|
*metricMap
|
||||||
children map[uint64][]metricWithLabelValues
|
|
||||||
desc *Desc
|
|
||||||
|
|
||||||
newMetric func(labelValues ...string) Metric
|
curry []curriedLabelValue
|
||||||
hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
|
|
||||||
|
// hashAdd and hashAddByte can be replaced for testing collision handling.
|
||||||
|
hashAdd func(h uint64, s string) uint64
|
||||||
hashAddByte func(h uint64, b byte) uint64
|
hashAddByte func(h uint64, b byte) uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// newMetricVec returns an initialized MetricVec. The concrete value is
|
// newMetricVec returns an initialized metricVec.
|
||||||
// returned for embedding into another struct.
|
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
|
||||||
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
|
return &metricVec{
|
||||||
return &MetricVec{
|
metricMap: &metricMap{
|
||||||
children: map[uint64][]metricWithLabelValues{},
|
metrics: map[uint64][]metricWithLabelValues{},
|
||||||
desc: desc,
|
desc: desc,
|
||||||
newMetric: newMetric,
|
newMetric: newMetric,
|
||||||
|
},
|
||||||
hashAdd: hashAdd,
|
hashAdd: hashAdd,
|
||||||
hashAddByte: hashAddByte,
|
hashAddByte: hashAddByte,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteLabelValues removes the metric where the variable labels are the same
|
||||||
|
// as those passed in as labels (same order as the VariableLabels in Desc). It
|
||||||
|
// returns true if a metric was deleted.
|
||||||
|
//
|
||||||
|
// It is not an error if the number of label values is not the same as the
|
||||||
|
// number of VariableLabels in Desc. However, such inconsistent label count can
|
||||||
|
// never match an actual metric, so the method will always return false in that
|
||||||
|
// case.
|
||||||
|
//
|
||||||
|
// Note that for more than one label value, this method is prone to mistakes
|
||||||
|
// caused by an incorrect order of arguments. Consider Delete(Labels) as an
|
||||||
|
// alternative to avoid that type of mistake. For higher label numbers, the
|
||||||
|
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||||
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
|
// See also the CounterVec example.
|
||||||
|
func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
|
||||||
|
h, err := m.hashLabelValues(lvs)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes the metric where the variable labels are the same as those
|
||||||
|
// passed in as labels. It returns true if a metric was deleted.
|
||||||
|
//
|
||||||
|
// It is not an error if the number and names of the Labels are inconsistent
|
||||||
|
// with those of the VariableLabels in Desc. However, such inconsistent Labels
|
||||||
|
// can never match an actual metric, so the method will always return false in
|
||||||
|
// that case.
|
||||||
|
//
|
||||||
|
// This method is used for the same purpose as DeleteLabelValues(...string). See
|
||||||
|
// there for pros and cons of the two methods.
|
||||||
|
func (m *metricVec) Delete(labels Labels) bool {
|
||||||
|
h, err := m.hashLabels(labels)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
|
||||||
|
var (
|
||||||
|
newCurry []curriedLabelValue
|
||||||
|
oldCurry = m.curry
|
||||||
|
iCurry int
|
||||||
|
)
|
||||||
|
for i, label := range m.desc.variableLabels {
|
||||||
|
val, ok := labels[label]
|
||||||
|
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
|
||||||
|
if ok {
|
||||||
|
return nil, fmt.Errorf("label name %q is already curried", label)
|
||||||
|
}
|
||||||
|
newCurry = append(newCurry, oldCurry[iCurry])
|
||||||
|
iCurry++
|
||||||
|
} else {
|
||||||
|
if !ok {
|
||||||
|
continue // Label stays uncurried.
|
||||||
|
}
|
||||||
|
newCurry = append(newCurry, curriedLabelValue{i, val})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
|
||||||
|
return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &metricVec{
|
||||||
|
metricMap: m.metricMap,
|
||||||
|
curry: newCurry,
|
||||||
|
hashAdd: m.hashAdd,
|
||||||
|
hashAddByte: m.hashAddByte,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
|
||||||
|
h, err := m.hashLabelValues(lvs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
|
||||||
|
h, err := m.hashLabels(labels)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
|
||||||
|
if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
h = hashNew()
|
||||||
|
curry = m.curry
|
||||||
|
iVals, iCurry int
|
||||||
|
)
|
||||||
|
for i := 0; i < len(m.desc.variableLabels); i++ {
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
h = m.hashAdd(h, curry[iCurry].value)
|
||||||
|
iCurry++
|
||||||
|
} else {
|
||||||
|
h = m.hashAdd(h, vals[iVals])
|
||||||
|
iVals++
|
||||||
|
}
|
||||||
|
h = m.hashAddByte(h, model.SeparatorByte)
|
||||||
|
}
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metricVec) hashLabels(labels Labels) (uint64, error) {
|
||||||
|
if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
h = hashNew()
|
||||||
|
curry = m.curry
|
||||||
|
iCurry int
|
||||||
|
)
|
||||||
|
for i, label := range m.desc.variableLabels {
|
||||||
|
val, ok := labels[label]
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
if ok {
|
||||||
|
return 0, fmt.Errorf("label name %q is already curried", label)
|
||||||
|
}
|
||||||
|
h = m.hashAdd(h, curry[iCurry].value)
|
||||||
|
iCurry++
|
||||||
|
} else {
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("label name %q missing in label map", label)
|
||||||
|
}
|
||||||
|
h = m.hashAdd(h, val)
|
||||||
|
}
|
||||||
|
h = m.hashAddByte(h, model.SeparatorByte)
|
||||||
|
}
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
// metricWithLabelValues provides the metric and its label values for
|
// metricWithLabelValues provides the metric and its label values for
|
||||||
// disambiguation on hash collision.
|
// disambiguation on hash collision.
|
||||||
type metricWithLabelValues struct {
|
type metricWithLabelValues struct {
|
||||||
@ -54,166 +201,72 @@ type metricWithLabelValues struct {
|
|||||||
metric Metric
|
metric Metric
|
||||||
}
|
}
|
||||||
|
|
||||||
// Describe implements Collector. The length of the returned slice
|
// curriedLabelValue sets the curried value for a label at the given index.
|
||||||
// is always one.
|
type curriedLabelValue struct {
|
||||||
func (m *MetricVec) Describe(ch chan<- *Desc) {
|
index int
|
||||||
|
value string
|
||||||
|
}
|
||||||
|
|
||||||
|
// metricMap is a helper for metricVec and shared between differently curried
|
||||||
|
// metricVecs.
|
||||||
|
type metricMap struct {
|
||||||
|
mtx sync.RWMutex // Protects metrics.
|
||||||
|
metrics map[uint64][]metricWithLabelValues
|
||||||
|
desc *Desc
|
||||||
|
newMetric func(labelValues ...string) Metric
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe implements Collector. It will send exactly one Desc to the provided
|
||||||
|
// channel.
|
||||||
|
func (m *metricMap) Describe(ch chan<- *Desc) {
|
||||||
ch <- m.desc
|
ch <- m.desc
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collect implements Collector.
|
// Collect implements Collector.
|
||||||
func (m *MetricVec) Collect(ch chan<- Metric) {
|
func (m *metricMap) Collect(ch chan<- Metric) {
|
||||||
m.mtx.RLock()
|
m.mtx.RLock()
|
||||||
defer m.mtx.RUnlock()
|
defer m.mtx.RUnlock()
|
||||||
|
|
||||||
for _, metrics := range m.children {
|
for _, metrics := range m.metrics {
|
||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
ch <- metric.metric
|
ch <- metric.metric
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWithLabelValues returns the Metric for the given slice of label
|
// Reset deletes all metrics in this vector.
|
||||||
// values (same order as the VariableLabels in Desc). If that combination of
|
func (m *metricMap) Reset() {
|
||||||
// label values is accessed for the first time, a new Metric is created.
|
|
||||||
//
|
|
||||||
// It is possible to call this method without using the returned Metric to only
|
|
||||||
// create the new Metric but leave it at its start value (e.g. a Summary or
|
|
||||||
// Histogram without any observations). See also the SummaryVec example.
|
|
||||||
//
|
|
||||||
// Keeping the Metric for later use is possible (and should be considered if
|
|
||||||
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
|
||||||
// Delete can be used to delete the Metric from the MetricVec. In that case, the
|
|
||||||
// Metric will still exist, but it will not be exported anymore, even if a
|
|
||||||
// Metric with the same label values is created later. See also the CounterVec
|
|
||||||
// example.
|
|
||||||
//
|
|
||||||
// An error is returned if the number of label values is not the same as the
|
|
||||||
// number of VariableLabels in Desc.
|
|
||||||
//
|
|
||||||
// Note that for more than one label value, this method is prone to mistakes
|
|
||||||
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
|
||||||
// an alternative to avoid that type of mistake. For higher label numbers, the
|
|
||||||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
|
||||||
// with a performance overhead (for creating and processing the Labels map).
|
|
||||||
// See also the GaugeVec example.
|
|
||||||
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
|
||||||
h, err := m.hashLabelValues(lvs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return m.getOrCreateMetricWithLabelValues(h, lvs), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWith returns the Metric for the given Labels map (the label names
|
|
||||||
// must match those of the VariableLabels in Desc). If that label map is
|
|
||||||
// accessed for the first time, a new Metric is created. Implications of
|
|
||||||
// creating a Metric without using it and keeping the Metric for later use are
|
|
||||||
// the same as for GetMetricWithLabelValues.
|
|
||||||
//
|
|
||||||
// An error is returned if the number and names of the Labels are inconsistent
|
|
||||||
// with those of the VariableLabels in Desc.
|
|
||||||
//
|
|
||||||
// This method is used for the same purpose as
|
|
||||||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
|
||||||
// methods.
|
|
||||||
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
|
||||||
h, err := m.hashLabels(labels)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return m.getOrCreateMetricWithLabels(h, labels), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
|
|
||||||
// occurs. The method allows neat syntax like:
|
|
||||||
// httpReqs.WithLabelValues("404", "POST").Inc()
|
|
||||||
func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
|
|
||||||
metric, err := m.GetMetricWithLabelValues(lvs...)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return metric
|
|
||||||
}
|
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics if an error occurs. The method allows
|
|
||||||
// neat syntax like:
|
|
||||||
// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
|
|
||||||
func (m *MetricVec) With(labels Labels) Metric {
|
|
||||||
metric, err := m.GetMetricWith(labels)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return metric
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteLabelValues removes the metric where the variable labels are the same
|
|
||||||
// as those passed in as labels (same order as the VariableLabels in Desc). It
|
|
||||||
// returns true if a metric was deleted.
|
|
||||||
//
|
|
||||||
// It is not an error if the number of label values is not the same as the
|
|
||||||
// number of VariableLabels in Desc. However, such inconsistent label count can
|
|
||||||
// never match an actual Metric, so the method will always return false in that
|
|
||||||
// case.
|
|
||||||
//
|
|
||||||
// Note that for more than one label value, this method is prone to mistakes
|
|
||||||
// caused by an incorrect order of arguments. Consider Delete(Labels) as an
|
|
||||||
// alternative to avoid that type of mistake. For higher label numbers, the
|
|
||||||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
|
||||||
// with a performance overhead (for creating and processing the Labels map).
|
|
||||||
// See also the CounterVec example.
|
|
||||||
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
h, err := m.hashLabelValues(lvs)
|
for h := range m.metrics {
|
||||||
if err != nil {
|
delete(m.metrics, h)
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
return m.deleteByHashWithLabelValues(h, lvs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes the metric where the variable labels are the same as those
|
|
||||||
// passed in as labels. It returns true if a metric was deleted.
|
|
||||||
//
|
|
||||||
// It is not an error if the number and names of the Labels are inconsistent
|
|
||||||
// with those of the VariableLabels in the Desc of the MetricVec. However, such
|
|
||||||
// inconsistent Labels can never match an actual Metric, so the method will
|
|
||||||
// always return false in that case.
|
|
||||||
//
|
|
||||||
// This method is used for the same purpose as DeleteLabelValues(...string). See
|
|
||||||
// there for pros and cons of the two methods.
|
|
||||||
func (m *MetricVec) Delete(labels Labels) bool {
|
|
||||||
m.mtx.Lock()
|
|
||||||
defer m.mtx.Unlock()
|
|
||||||
|
|
||||||
h, err := m.hashLabels(labels)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return m.deleteByHashWithLabels(h, labels)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
|
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
|
||||||
// there are multiple matches in the bucket, use lvs to select a metric and
|
// there are multiple matches in the bucket, use lvs to select a metric and
|
||||||
// remove only that metric.
|
// remove only that metric.
|
||||||
func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
|
func (m *metricMap) deleteByHashWithLabelValues(
|
||||||
metrics, ok := m.children[h]
|
h uint64, lvs []string, curry []curriedLabelValue,
|
||||||
|
) bool {
|
||||||
|
m.mtx.Lock()
|
||||||
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
metrics, ok := m.metrics[h]
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
i := m.findMetricWithLabelValues(metrics, lvs)
|
i := findMetricWithLabelValues(metrics, lvs, curry)
|
||||||
if i >= len(metrics) {
|
if i >= len(metrics) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(metrics) > 1 {
|
if len(metrics) > 1 {
|
||||||
m.children[h] = append(metrics[:i], metrics[i+1:]...)
|
m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
|
||||||
} else {
|
} else {
|
||||||
delete(m.children, h)
|
delete(m.metrics, h)
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -221,69 +274,38 @@ func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
|
|||||||
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
|
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
|
||||||
// are multiple matches in the bucket, use lvs to select a metric and remove
|
// are multiple matches in the bucket, use lvs to select a metric and remove
|
||||||
// only that metric.
|
// only that metric.
|
||||||
func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
|
func (m *metricMap) deleteByHashWithLabels(
|
||||||
metrics, ok := m.children[h]
|
h uint64, labels Labels, curry []curriedLabelValue,
|
||||||
|
) bool {
|
||||||
|
m.mtx.Lock()
|
||||||
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
metrics, ok := m.metrics[h]
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
i := m.findMetricWithLabels(metrics, labels)
|
i := findMetricWithLabels(m.desc, metrics, labels, curry)
|
||||||
if i >= len(metrics) {
|
if i >= len(metrics) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(metrics) > 1 {
|
if len(metrics) > 1 {
|
||||||
m.children[h] = append(metrics[:i], metrics[i+1:]...)
|
m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
|
||||||
} else {
|
} else {
|
||||||
delete(m.children, h)
|
delete(m.metrics, h)
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset deletes all metrics in this vector.
|
|
||||||
func (m *MetricVec) Reset() {
|
|
||||||
m.mtx.Lock()
|
|
||||||
defer m.mtx.Unlock()
|
|
||||||
|
|
||||||
for h := range m.children {
|
|
||||||
delete(m.children, h)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
|
|
||||||
if len(vals) != len(m.desc.variableLabels) {
|
|
||||||
return 0, errInconsistentCardinality
|
|
||||||
}
|
|
||||||
h := hashNew()
|
|
||||||
for _, val := range vals {
|
|
||||||
h = m.hashAdd(h, val)
|
|
||||||
h = m.hashAddByte(h, model.SeparatorByte)
|
|
||||||
}
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
|
|
||||||
if len(labels) != len(m.desc.variableLabels) {
|
|
||||||
return 0, errInconsistentCardinality
|
|
||||||
}
|
|
||||||
h := hashNew()
|
|
||||||
for _, label := range m.desc.variableLabels {
|
|
||||||
val, ok := labels[label]
|
|
||||||
if !ok {
|
|
||||||
return 0, fmt.Errorf("label name %q missing in label map", label)
|
|
||||||
}
|
|
||||||
h = m.hashAdd(h, val)
|
|
||||||
h = m.hashAddByte(h, model.SeparatorByte)
|
|
||||||
}
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
||||||
// or creates it and returns the new one.
|
// or creates it and returns the new one.
|
||||||
//
|
//
|
||||||
// This function holds the mutex.
|
// This function holds the mutex.
|
||||||
func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
|
func (m *metricMap) getOrCreateMetricWithLabelValues(
|
||||||
|
hash uint64, lvs []string, curry []curriedLabelValue,
|
||||||
|
) Metric {
|
||||||
m.mtx.RLock()
|
m.mtx.RLock()
|
||||||
metric, ok := m.getMetricWithLabelValues(hash, lvs)
|
metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
|
||||||
m.mtx.RUnlock()
|
m.mtx.RUnlock()
|
||||||
if ok {
|
if ok {
|
||||||
return metric
|
return metric
|
||||||
@ -291,13 +313,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string)
|
|||||||
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
metric, ok = m.getMetricWithLabelValues(hash, lvs)
|
metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
|
||||||
if !ok {
|
if !ok {
|
||||||
// Copy to avoid allocation in case wo don't go down this code path.
|
inlinedLVs := inlineLabelValues(lvs, curry)
|
||||||
copiedLVs := make([]string, len(lvs))
|
metric = m.newMetric(inlinedLVs...)
|
||||||
copy(copiedLVs, lvs)
|
m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
|
||||||
metric = m.newMetric(copiedLVs...)
|
|
||||||
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
|
|
||||||
}
|
}
|
||||||
return metric
|
return metric
|
||||||
}
|
}
|
||||||
@ -306,9 +326,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string)
|
|||||||
// or creates it and returns the new one.
|
// or creates it and returns the new one.
|
||||||
//
|
//
|
||||||
// This function holds the mutex.
|
// This function holds the mutex.
|
||||||
func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
|
func (m *metricMap) getOrCreateMetricWithLabels(
|
||||||
|
hash uint64, labels Labels, curry []curriedLabelValue,
|
||||||
|
) Metric {
|
||||||
m.mtx.RLock()
|
m.mtx.RLock()
|
||||||
metric, ok := m.getMetricWithLabels(hash, labels)
|
metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
|
||||||
m.mtx.RUnlock()
|
m.mtx.RUnlock()
|
||||||
if ok {
|
if ok {
|
||||||
return metric
|
return metric
|
||||||
@ -316,33 +338,37 @@ func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metr
|
|||||||
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
metric, ok = m.getMetricWithLabels(hash, labels)
|
metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
|
||||||
if !ok {
|
if !ok {
|
||||||
lvs := m.extractLabelValues(labels)
|
lvs := extractLabelValues(m.desc, labels, curry)
|
||||||
metric = m.newMetric(lvs...)
|
metric = m.newMetric(lvs...)
|
||||||
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
|
m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
|
||||||
}
|
}
|
||||||
return metric
|
return metric
|
||||||
}
|
}
|
||||||
|
|
||||||
// getMetricWithLabelValues gets a metric while handling possible collisions in
|
// getMetricWithHashAndLabelValues gets a metric while handling possible
|
||||||
// the hash space. Must be called while holding read mutex.
|
// collisions in the hash space. Must be called while holding the read mutex.
|
||||||
func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
|
func (m *metricMap) getMetricWithHashAndLabelValues(
|
||||||
metrics, ok := m.children[h]
|
h uint64, lvs []string, curry []curriedLabelValue,
|
||||||
|
) (Metric, bool) {
|
||||||
|
metrics, ok := m.metrics[h]
|
||||||
if ok {
|
if ok {
|
||||||
if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
|
if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
|
||||||
return metrics[i].metric, true
|
return metrics[i].metric, true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
// getMetricWithLabels gets a metric while handling possible collisions in
|
// getMetricWithHashAndLabels gets a metric while handling possible collisions in
|
||||||
// the hash space. Must be called while holding read mutex.
|
// the hash space. Must be called while holding read mutex.
|
||||||
func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
|
func (m *metricMap) getMetricWithHashAndLabels(
|
||||||
metrics, ok := m.children[h]
|
h uint64, labels Labels, curry []curriedLabelValue,
|
||||||
|
) (Metric, bool) {
|
||||||
|
metrics, ok := m.metrics[h]
|
||||||
if ok {
|
if ok {
|
||||||
if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
|
if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
|
||||||
return metrics[i].metric, true
|
return metrics[i].metric, true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -351,9 +377,11 @@ func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool)
|
|||||||
|
|
||||||
// findMetricWithLabelValues returns the index of the matching metric or
|
// findMetricWithLabelValues returns the index of the matching metric or
|
||||||
// len(metrics) if not found.
|
// len(metrics) if not found.
|
||||||
func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
|
func findMetricWithLabelValues(
|
||||||
|
metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
|
||||||
|
) int {
|
||||||
for i, metric := range metrics {
|
for i, metric := range metrics {
|
||||||
if m.matchLabelValues(metric.values, lvs) {
|
if matchLabelValues(metric.values, lvs, curry) {
|
||||||
return i
|
return i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -362,32 +390,51 @@ func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, l
|
|||||||
|
|
||||||
// findMetricWithLabels returns the index of the matching metric or len(metrics)
|
// findMetricWithLabels returns the index of the matching metric or len(metrics)
|
||||||
// if not found.
|
// if not found.
|
||||||
func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
|
func findMetricWithLabels(
|
||||||
|
desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
|
||||||
|
) int {
|
||||||
for i, metric := range metrics {
|
for i, metric := range metrics {
|
||||||
if m.matchLabels(metric.values, labels) {
|
if matchLabels(desc, metric.values, labels, curry) {
|
||||||
return i
|
return i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return len(metrics)
|
return len(metrics)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
|
func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
|
||||||
if len(values) != len(lvs) {
|
if len(values) != len(lvs)+len(curry) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
var iLVs, iCurry int
|
||||||
for i, v := range values {
|
for i, v := range values {
|
||||||
if v != lvs[i] {
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
if v != curry[iCurry].value {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
iCurry++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if v != lvs[iLVs] {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
iLVs++
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
|
func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
|
||||||
if len(labels) != len(values) {
|
if len(values) != len(labels)+len(curry) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
for i, k := range m.desc.variableLabels {
|
iCurry := 0
|
||||||
|
for i, k := range desc.variableLabels {
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
if values[i] != curry[iCurry].value {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
iCurry++
|
||||||
|
continue
|
||||||
|
}
|
||||||
if values[i] != labels[k] {
|
if values[i] != labels[k] {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -395,10 +442,31 @@ func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MetricVec) extractLabelValues(labels Labels) []string {
|
func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
|
||||||
labelValues := make([]string, len(labels))
|
labelValues := make([]string, len(labels)+len(curry))
|
||||||
for i, k := range m.desc.variableLabels {
|
iCurry := 0
|
||||||
|
for i, k := range desc.variableLabels {
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
labelValues[i] = curry[iCurry].value
|
||||||
|
iCurry++
|
||||||
|
continue
|
||||||
|
}
|
||||||
labelValues[i] = labels[k]
|
labelValues[i] = labels[k]
|
||||||
}
|
}
|
||||||
return labelValues
|
return labelValues
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
|
||||||
|
labelValues := make([]string, len(lvs)+len(curry))
|
||||||
|
var iCurry, iLVs int
|
||||||
|
for i := range labelValues {
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
labelValues[i] = curry[iCurry].value
|
||||||
|
iCurry++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
labelValues[i] = lvs[iLVs]
|
||||||
|
iLVs++
|
||||||
|
}
|
||||||
|
return labelValues
|
||||||
|
}
|
||||||
|
179
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
Normal file
179
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
Normal file
@ -0,0 +1,179 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WrapRegistererWith returns a Registerer wrapping the provided
|
||||||
|
// Registerer. Collectors registered with the returned Registerer will be
|
||||||
|
// registered with the wrapped Registerer in a modified way. The modified
|
||||||
|
// Collector adds the provided Labels to all Metrics it collects (as
|
||||||
|
// ConstLabels). The Metrics collected by the unmodified Collector must not
|
||||||
|
// duplicate any of those labels.
|
||||||
|
//
|
||||||
|
// WrapRegistererWith provides a way to add fixed labels to a subset of
|
||||||
|
// Collectors. It should not be used to add fixed labels to all metrics exposed.
|
||||||
|
//
|
||||||
|
// The Collector example demonstrates a use of WrapRegistererWith.
|
||||||
|
func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
|
||||||
|
return &wrappingRegisterer{
|
||||||
|
wrappedRegisterer: reg,
|
||||||
|
labels: labels,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrapRegistererWithPrefix returns a Registerer wrapping the provided
|
||||||
|
// Registerer. Collectors registered with the returned Registerer will be
|
||||||
|
// registered with the wrapped Registerer in a modified way. The modified
|
||||||
|
// Collector adds the provided prefix to the name of all Metrics it collects.
|
||||||
|
//
|
||||||
|
// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
|
||||||
|
// a sub-system. To make this work, register metrics of the sub-system with the
|
||||||
|
// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
|
||||||
|
// to use the same prefix for all metrics exposed. In particular, do not prefix
|
||||||
|
// metric names that are standardized across applications, as that would break
|
||||||
|
// horizontal monitoring, for example the metrics provided by the Go collector
|
||||||
|
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
|
||||||
|
// fact, those metrics are already prefixed with “go_” or “process_”,
|
||||||
|
// respectively.)
|
||||||
|
func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
|
||||||
|
return &wrappingRegisterer{
|
||||||
|
wrappedRegisterer: reg,
|
||||||
|
prefix: prefix,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type wrappingRegisterer struct {
|
||||||
|
wrappedRegisterer Registerer
|
||||||
|
prefix string
|
||||||
|
labels Labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *wrappingRegisterer) Register(c Collector) error {
|
||||||
|
return r.wrappedRegisterer.Register(&wrappingCollector{
|
||||||
|
wrappedCollector: c,
|
||||||
|
prefix: r.prefix,
|
||||||
|
labels: r.labels,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
|
||||||
|
for _, c := range cs {
|
||||||
|
if err := r.Register(c); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *wrappingRegisterer) Unregister(c Collector) bool {
|
||||||
|
return r.wrappedRegisterer.Unregister(&wrappingCollector{
|
||||||
|
wrappedCollector: c,
|
||||||
|
prefix: r.prefix,
|
||||||
|
labels: r.labels,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type wrappingCollector struct {
|
||||||
|
wrappedCollector Collector
|
||||||
|
prefix string
|
||||||
|
labels Labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *wrappingCollector) Collect(ch chan<- Metric) {
|
||||||
|
wrappedCh := make(chan Metric)
|
||||||
|
go func() {
|
||||||
|
c.wrappedCollector.Collect(wrappedCh)
|
||||||
|
close(wrappedCh)
|
||||||
|
}()
|
||||||
|
for m := range wrappedCh {
|
||||||
|
ch <- &wrappingMetric{
|
||||||
|
wrappedMetric: m,
|
||||||
|
prefix: c.prefix,
|
||||||
|
labels: c.labels,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *wrappingCollector) Describe(ch chan<- *Desc) {
|
||||||
|
wrappedCh := make(chan *Desc)
|
||||||
|
go func() {
|
||||||
|
c.wrappedCollector.Describe(wrappedCh)
|
||||||
|
close(wrappedCh)
|
||||||
|
}()
|
||||||
|
for desc := range wrappedCh {
|
||||||
|
ch <- wrapDesc(desc, c.prefix, c.labels)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type wrappingMetric struct {
|
||||||
|
wrappedMetric Metric
|
||||||
|
prefix string
|
||||||
|
labels Labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *wrappingMetric) Desc() *Desc {
|
||||||
|
return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *wrappingMetric) Write(out *dto.Metric) error {
|
||||||
|
if err := m.wrappedMetric.Write(out); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(m.labels) == 0 {
|
||||||
|
// No wrapping labels.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for ln, lv := range m.labels {
|
||||||
|
out.Label = append(out.Label, &dto.LabelPair{
|
||||||
|
Name: proto.String(ln),
|
||||||
|
Value: proto.String(lv),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
sort.Sort(labelPairSorter(out.Label))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
|
||||||
|
constLabels := Labels{}
|
||||||
|
for _, lp := range desc.constLabelPairs {
|
||||||
|
constLabels[*lp.Name] = *lp.Value
|
||||||
|
}
|
||||||
|
for ln, lv := range labels {
|
||||||
|
if _, alreadyUsed := constLabels[ln]; alreadyUsed {
|
||||||
|
return &Desc{
|
||||||
|
fqName: desc.fqName,
|
||||||
|
help: desc.help,
|
||||||
|
variableLabels: desc.variableLabels,
|
||||||
|
constLabelPairs: desc.constLabelPairs,
|
||||||
|
err: fmt.Errorf("attempted wrapping with already existing label name %q", ln),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
constLabels[ln] = lv
|
||||||
|
}
|
||||||
|
// NewDesc will do remaining validations.
|
||||||
|
newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
|
||||||
|
// Propagate errors if there was any. This will override any errer
|
||||||
|
// created by NewDesc above, i.e. earlier errors get precedence.
|
||||||
|
if desc.err != nil {
|
||||||
|
newDesc.err = desc.err
|
||||||
|
}
|
||||||
|
return newDesc
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user