don't emit prometheus metrics that are ignored
This commit is contained in:
parent
01ef7f1fc3
commit
c225d06adf
30
cadvisor.go
30
cadvisor.go
@ -124,6 +124,8 @@ func main() {
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
includedMetrics := toIncludedMetrics(ignoreMetrics.MetricSet)
|
||||
|
||||
setMaxProcs()
|
||||
|
||||
memoryStorage, err := NewMemoryStorage()
|
||||
@ -135,7 +137,7 @@ func main() {
|
||||
|
||||
collectorHttpClient := createCollectorHttpClient(*collectorCert, *collectorKey)
|
||||
|
||||
containerManager, err := manager.New(memoryStorage, sysFs, *maxHousekeepingInterval, *allowDynamicHousekeeping, ignoreMetrics.MetricSet, &collectorHttpClient, []string{"/"})
|
||||
containerManager, err := manager.New(memoryStorage, sysFs, *maxHousekeepingInterval, *allowDynamicHousekeeping, includedMetrics, &collectorHttpClient, []string{"/"})
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to create a Container Manager: %s", err)
|
||||
}
|
||||
@ -159,7 +161,7 @@ func main() {
|
||||
if !*storeContainerLabels {
|
||||
containerLabelFunc = metrics.BaseContainerLabels
|
||||
}
|
||||
cadvisorhttp.RegisterPrometheusHandler(mux, containerManager, *prometheusEndpoint, containerLabelFunc)
|
||||
cadvisorhttp.RegisterPrometheusHandler(mux, containerManager, *prometheusEndpoint, containerLabelFunc, includedMetrics)
|
||||
|
||||
// Start the manager.
|
||||
if err := containerManager.Start(); err != nil {
|
||||
@ -233,3 +235,27 @@ func createCollectorHttpClient(collectorCert, collectorKey string) http.Client {
|
||||
|
||||
return http.Client{Transport: transport}
|
||||
}
|
||||
|
||||
func toIncludedMetrics(ignoreMetrics container.MetricSet) container.MetricSet {
|
||||
set := container.MetricSet{}
|
||||
allMetrics := []container.MetricKind{
|
||||
container.CpuUsageMetrics,
|
||||
container.ProcessSchedulerMetrics,
|
||||
container.PerCpuUsageMetrics,
|
||||
container.MemoryUsageMetrics,
|
||||
container.CpuLoadMetrics,
|
||||
container.DiskIOMetrics,
|
||||
container.DiskUsageMetrics,
|
||||
container.NetworkUsageMetrics,
|
||||
container.NetworkTcpUsageMetrics,
|
||||
container.NetworkUdpUsageMetrics,
|
||||
container.AcceleratorUsageMetrics,
|
||||
container.AppMetrics,
|
||||
}
|
||||
for _, metric := range allMetrics {
|
||||
if !ignoreMetrics.Has(metric) {
|
||||
set[metric] = struct{}{}
|
||||
}
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
@ -47,8 +47,8 @@ type containerdFactory struct {
|
||||
// Information about the mounted cgroup subsystems.
|
||||
cgroupSubsystems libcontainer.CgroupSubsystems
|
||||
// Information about mounted filesystems.
|
||||
fsInfo fs.FsInfo
|
||||
ignoreMetrics container.MetricSet
|
||||
fsInfo fs.FsInfo
|
||||
includedMetrics container.MetricSet
|
||||
}
|
||||
|
||||
func (self *containerdFactory) String() string {
|
||||
@ -70,7 +70,7 @@ func (self *containerdFactory) NewContainerHandler(name string, inHostNamespace
|
||||
&self.cgroupSubsystems,
|
||||
inHostNamespace,
|
||||
metadataEnvs,
|
||||
self.ignoreMetrics,
|
||||
self.includedMetrics,
|
||||
)
|
||||
}
|
||||
|
||||
@ -117,7 +117,7 @@ func (self *containerdFactory) DebugInfo() map[string][]string {
|
||||
}
|
||||
|
||||
// Register root container before running this function!
|
||||
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error {
|
||||
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error {
|
||||
client, err := Client()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create containerd client: %v", err)
|
||||
@ -140,7 +140,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
|
||||
fsInfo: fsInfo,
|
||||
machineInfoFactory: factory,
|
||||
version: containerdVersion,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
}
|
||||
|
||||
container.RegisterContainerHandlerFactory(f, []watcher.ContainerWatchSource{watcher.Raw})
|
||||
|
@ -62,7 +62,7 @@ func TestCanHandleAndAccept(t *testing.T) {
|
||||
cgroupSubsystems: containerlibcontainer.CgroupSubsystems{},
|
||||
fsInfo: nil,
|
||||
machineInfoFactory: nil,
|
||||
ignoreMetrics: nil,
|
||||
includedMetrics: nil,
|
||||
}
|
||||
for k, v := range map[string]bool{
|
||||
"/kubepods/besteffort/podd76e26fba3bf2bfd215eb29011d55250/40af7cdcbe507acad47a5a62025743ad3ddc6ab93b77b21363aa1c1d641047c9": true,
|
||||
|
@ -48,7 +48,7 @@ type containerdContainerHandler struct {
|
||||
// Image name used for this container.
|
||||
image string
|
||||
// Filesystem handler.
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
libcontainerHandler *containerlibcontainer.Handler
|
||||
}
|
||||
@ -64,7 +64,7 @@ func newContainerdContainerHandler(
|
||||
cgroupSubsystems *containerlibcontainer.CgroupSubsystems,
|
||||
inHostNamespace bool,
|
||||
metadataEnvs []string,
|
||||
ignoreMetrics container.MetricSet,
|
||||
includedMetrics container.MetricSet,
|
||||
) (container.ContainerHandler, error) {
|
||||
// Create the cgroup paths.
|
||||
cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
|
||||
@ -127,7 +127,7 @@ func newContainerdContainerHandler(
|
||||
Aliases: []string{id, name},
|
||||
}
|
||||
|
||||
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootfs, int(taskPid), ignoreMetrics)
|
||||
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootfs, int(taskPid), includedMetrics)
|
||||
|
||||
handler := &containerdContainerHandler{
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
@ -135,7 +135,7 @@ func newContainerdContainerHandler(
|
||||
fsInfo: fsInfo,
|
||||
envs: make(map[string]string),
|
||||
labels: cntr.Labels,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
reference: containerReference,
|
||||
libcontainerHandler: libcontainerHandler,
|
||||
}
|
||||
@ -159,9 +159,9 @@ func (self *containerdContainerHandler) ContainerReference() (info.ContainerRefe
|
||||
|
||||
func (self *containerdContainerHandler) needNet() bool {
|
||||
// Since containerd does not handle networking ideally we need to return based
|
||||
// on ignoreMetrics list. Here the assumption is the presence of cri-containerd
|
||||
// on includedMetrics list. Here the assumption is the presence of cri-containerd
|
||||
// label
|
||||
if !self.ignoreMetrics.Has(container.NetworkUsageMetrics) {
|
||||
if self.includedMetrics.Has(container.NetworkUsageMetrics) {
|
||||
//TODO change it to exported cri-containerd constants
|
||||
return self.labels["io.cri-containerd.kind"] == "sandbox"
|
||||
}
|
||||
@ -186,7 +186,7 @@ func (self *containerdContainerHandler) getFsStats(stats *info.ContainerStats) e
|
||||
return err
|
||||
}
|
||||
|
||||
if !self.ignoreMetrics.Has(container.DiskIOMetrics) {
|
||||
if self.includedMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
return nil
|
||||
|
@ -42,7 +42,7 @@ func TestHandler(t *testing.T) {
|
||||
cgroupSubsystems *containerlibcontainer.CgroupSubsystems
|
||||
inHostNamespace bool
|
||||
metadataEnvs []string
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
storageDir string
|
||||
|
||||
hasErr bool
|
||||
@ -92,7 +92,7 @@ func TestHandler(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
handler, err := newContainerdContainerHandler(ts.client, ts.name, ts.machineInfoFactory, ts.fsInfo, ts.cgroupSubsystems, ts.inHostNamespace, ts.metadataEnvs, ts.ignoreMetrics)
|
||||
handler, err := newContainerdContainerHandler(ts.client, ts.name, ts.machineInfoFactory, ts.fsInfo, ts.cgroupSubsystems, ts.inHostNamespace, ts.metadataEnvs, ts.includedMetrics)
|
||||
if ts.hasErr {
|
||||
as.NotNil(err)
|
||||
if ts.errContains != "" {
|
||||
|
@ -55,7 +55,7 @@ type crioFactory struct {
|
||||
// Information about mounted filesystems.
|
||||
fsInfo fs.FsInfo
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
client crioClient
|
||||
}
|
||||
@ -81,7 +81,7 @@ func (self *crioFactory) NewContainerHandler(name string, inHostNamespace bool)
|
||||
&self.cgroupSubsystems,
|
||||
inHostNamespace,
|
||||
metadataEnvs,
|
||||
self.ignoreMetrics,
|
||||
self.includedMetrics,
|
||||
)
|
||||
return
|
||||
}
|
||||
@ -136,7 +136,7 @@ var (
|
||||
)
|
||||
|
||||
// Register root container before running this function!
|
||||
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error {
|
||||
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error {
|
||||
client, err := Client()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -162,7 +162,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
|
||||
machineInfoFactory: factory,
|
||||
storageDriver: storageDriver(info.StorageDriver),
|
||||
storageDir: info.StorageRoot,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
}
|
||||
|
||||
container.RegisterContainerHandlerFactory(f, []watcher.ContainerWatchSource{watcher.Raw})
|
||||
|
@ -30,7 +30,7 @@ func TestCanHandleAndAccept(t *testing.T) {
|
||||
machineInfoFactory: nil,
|
||||
storageDriver: "",
|
||||
storageDir: "",
|
||||
ignoreMetrics: nil,
|
||||
includedMetrics: nil,
|
||||
}
|
||||
for k, v := range map[string]bool{
|
||||
"/kubepods/pod068e8fa0-9213-11e7-a01f-507b9d4141fa/crio-81e5c2990803c383229c9680ce964738d5e566d97f5bd436ac34808d2ec75d5f": true,
|
||||
|
@ -63,7 +63,7 @@ type crioContainerHandler struct {
|
||||
// The IP address of the container
|
||||
ipAddress string
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
reference info.ContainerReference
|
||||
|
||||
@ -83,7 +83,7 @@ func newCrioContainerHandler(
|
||||
cgroupSubsystems *containerlibcontainer.CgroupSubsystems,
|
||||
inHostNamespace bool,
|
||||
metadataEnvs []string,
|
||||
ignoreMetrics container.MetricSet,
|
||||
includedMetrics container.MetricSet,
|
||||
) (container.ContainerHandler, error) {
|
||||
// Create the cgroup paths.
|
||||
cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
|
||||
@ -141,7 +141,7 @@ func newCrioContainerHandler(
|
||||
Namespace: CrioNamespace,
|
||||
}
|
||||
|
||||
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootFs, cInfo.Pid, ignoreMetrics)
|
||||
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootFs, cInfo.Pid, includedMetrics)
|
||||
|
||||
// TODO: extract object mother method
|
||||
handler := &crioContainerHandler{
|
||||
@ -152,7 +152,7 @@ func newCrioContainerHandler(
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
envs: make(map[string]string),
|
||||
labels: cInfo.Labels,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
reference: containerReference,
|
||||
libcontainerHandler: libcontainerHandler,
|
||||
}
|
||||
@ -171,7 +171,7 @@ func newCrioContainerHandler(
|
||||
handler.ipAddress = cInfo.IP
|
||||
|
||||
// we optionally collect disk usage metrics
|
||||
if !ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
if includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
handler.fsHandler = common.NewFsHandler(common.DefaultPeriod, rootfsStorageDir, storageLogDir, fsInfo)
|
||||
}
|
||||
// TODO for env vars we wanted to show from container.Config.Env from whitelist
|
||||
@ -199,14 +199,14 @@ func (self *crioContainerHandler) ContainerReference() (info.ContainerReference,
|
||||
}
|
||||
|
||||
func (self *crioContainerHandler) needNet() bool {
|
||||
if !self.ignoreMetrics.Has(container.NetworkUsageMetrics) {
|
||||
if self.includedMetrics.Has(container.NetworkUsageMetrics) {
|
||||
return self.labels["io.kubernetes.container.name"] == "POD"
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (self *crioContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
hasFilesystem := !self.ignoreMetrics.Has(container.DiskUsageMetrics)
|
||||
hasFilesystem := self.includedMetrics.Has(container.DiskUsageMetrics)
|
||||
spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, self.needNet(), hasFilesystem)
|
||||
|
||||
spec.Labels = self.labels
|
||||
@ -222,11 +222,11 @@ func (self *crioContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if !self.ignoreMetrics.Has(container.DiskIOMetrics) {
|
||||
if self.includedMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
|
||||
if self.ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
if !self.includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
return nil
|
||||
}
|
||||
var device string
|
||||
|
@ -37,7 +37,7 @@ func TestHandler(t *testing.T) {
|
||||
cgroupSubsystems *containerlibcontainer.CgroupSubsystems
|
||||
inHostNamespace bool
|
||||
metadataEnvs []string
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
hasErr bool
|
||||
errContains string
|
||||
@ -102,7 +102,7 @@ func TestHandler(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
handler, err := newCrioContainerHandler(ts.client, ts.name, ts.machineInfoFactory, ts.fsInfo, ts.storageDriver, ts.storageDir, ts.cgroupSubsystems, ts.inHostNamespace, ts.metadataEnvs, ts.ignoreMetrics)
|
||||
handler, err := newCrioContainerHandler(ts.client, ts.name, ts.machineInfoFactory, ts.fsInfo, ts.storageDriver, ts.storageDir, ts.cgroupSubsystems, ts.inHostNamespace, ts.metadataEnvs, ts.includedMetrics)
|
||||
if ts.hasErr {
|
||||
as.NotNil(err)
|
||||
if ts.errContains != "" {
|
||||
|
@ -110,7 +110,7 @@ type dockerFactory struct {
|
||||
|
||||
dockerAPIVersion []int
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
thinPoolName string
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher
|
||||
@ -141,7 +141,7 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool
|
||||
inHostNamespace,
|
||||
metadataEnvs,
|
||||
self.dockerVersion,
|
||||
self.ignoreMetrics,
|
||||
self.includedMetrics,
|
||||
self.thinPoolName,
|
||||
self.thinPoolWatcher,
|
||||
self.zfsWatcher,
|
||||
@ -309,7 +309,7 @@ func ensureThinLsKernelVersion(kernelVersion string) error {
|
||||
}
|
||||
|
||||
// Register root container before running this function!
|
||||
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error {
|
||||
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error {
|
||||
client, err := Client()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to communicate with docker daemon: %v", err)
|
||||
@ -363,7 +363,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
|
||||
machineInfoFactory: factory,
|
||||
storageDriver: storageDriver(dockerInfo.Driver),
|
||||
storageDir: RootDir(),
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
thinPoolName: thinPoolName,
|
||||
thinPoolWatcher: thinPoolWatcher,
|
||||
zfsWatcher: zfsWatcher,
|
||||
|
@ -83,7 +83,7 @@ type dockerContainerHandler struct {
|
||||
// The IP address of the container
|
||||
ipAddress string
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
// the devicemapper poolname
|
||||
poolName string
|
||||
@ -128,7 +128,7 @@ func newDockerContainerHandler(
|
||||
inHostNamespace bool,
|
||||
metadataEnvs []string,
|
||||
dockerVersion []int,
|
||||
ignoreMetrics container.MetricSet,
|
||||
includedMetrics container.MetricSet,
|
||||
thinPoolName string,
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher,
|
||||
zfsWatcher *zfs.ZfsWatcher,
|
||||
@ -203,7 +203,7 @@ func newDockerContainerHandler(
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
envs: make(map[string]string),
|
||||
labels: ctnr.Config.Labels,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
zfsParent: zfsParent,
|
||||
}
|
||||
// Timestamp returned by Docker is in time.RFC3339Nano format.
|
||||
@ -212,7 +212,7 @@ func newDockerContainerHandler(
|
||||
// This should not happen, report the error just in case
|
||||
return nil, fmt.Errorf("failed to parse the create timestamp %q for container %q: %v", ctnr.Created, id, err)
|
||||
}
|
||||
handler.libcontainerHandler = containerlibcontainer.NewHandler(cgroupManager, rootFs, ctnr.State.Pid, ignoreMetrics)
|
||||
handler.libcontainerHandler = containerlibcontainer.NewHandler(cgroupManager, rootFs, ctnr.State.Pid, includedMetrics)
|
||||
|
||||
// Add the name and bare ID as aliases of the container.
|
||||
handler.reference = info.ContainerReference{
|
||||
@ -244,7 +244,7 @@ func newDockerContainerHandler(
|
||||
|
||||
handler.ipAddress = ipAddress
|
||||
|
||||
if !ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
if includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
handler.fsHandler = &dockerFsHandler{
|
||||
fsHandler: common.NewFsHandler(common.DefaultPeriod, rootfsStorageDir, otherStorageDir, fsInfo),
|
||||
thinPoolWatcher: thinPoolWatcher,
|
||||
@ -345,14 +345,14 @@ func (self *dockerContainerHandler) ContainerReference() (info.ContainerReferenc
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) needNet() bool {
|
||||
if !self.ignoreMetrics.Has(container.NetworkUsageMetrics) {
|
||||
if self.includedMetrics.Has(container.NetworkUsageMetrics) {
|
||||
return !self.networkMode.IsContainer()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
hasFilesystem := !self.ignoreMetrics.Has(container.DiskUsageMetrics)
|
||||
hasFilesystem := self.includedMetrics.Has(container.DiskUsageMetrics)
|
||||
spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, self.needNet(), hasFilesystem)
|
||||
|
||||
spec.Labels = self.labels
|
||||
@ -369,11 +369,11 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
return err
|
||||
}
|
||||
|
||||
if !self.ignoreMetrics.Has(container.DiskIOMetrics) {
|
||||
if self.includedMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
|
||||
if self.ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
if !self.includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
return nil
|
||||
}
|
||||
var device string
|
||||
|
@ -51,6 +51,7 @@ const (
|
||||
NetworkUsageMetrics MetricKind = "network"
|
||||
NetworkTcpUsageMetrics MetricKind = "tcp"
|
||||
NetworkUdpUsageMetrics MetricKind = "udp"
|
||||
AcceleratorUsageMetrics MetricKind = "accelerator"
|
||||
AppMetrics MetricKind = "app"
|
||||
)
|
||||
|
||||
|
@ -43,16 +43,16 @@ type Handler struct {
|
||||
cgroupManager cgroups.Manager
|
||||
rootFs string
|
||||
pid int
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
pidMetricsCache map[int]*info.CpuSchedstat
|
||||
}
|
||||
|
||||
func NewHandler(cgroupManager cgroups.Manager, rootFs string, pid int, ignoreMetrics container.MetricSet) *Handler {
|
||||
func NewHandler(cgroupManager cgroups.Manager, rootFs string, pid int, includedMetrics container.MetricSet) *Handler {
|
||||
return &Handler{
|
||||
cgroupManager: cgroupManager,
|
||||
rootFs: rootFs,
|
||||
pid: pid,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
pidMetricsCache: make(map[int]*info.CpuSchedstat),
|
||||
}
|
||||
}
|
||||
@ -66,10 +66,10 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
libcontainerStats := &libcontainer.Stats{
|
||||
CgroupStats: cgroupStats,
|
||||
}
|
||||
withPerCPU := !h.ignoreMetrics.Has(container.PerCpuUsageMetrics)
|
||||
withPerCPU := h.includedMetrics.Has(container.PerCpuUsageMetrics)
|
||||
stats := newContainerStats(libcontainerStats, withPerCPU)
|
||||
|
||||
if !h.ignoreMetrics.Has(container.ProcessSchedulerMetrics) {
|
||||
if h.includedMetrics.Has(container.ProcessSchedulerMetrics) {
|
||||
pids, err := h.cgroupManager.GetAllPids()
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Could not get PIDs for container %d: %v", h.pid, err)
|
||||
@ -85,7 +85,7 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
if h.pid == 0 {
|
||||
return stats, nil
|
||||
}
|
||||
if !h.ignoreMetrics.Has(container.NetworkUsageMetrics) {
|
||||
if h.includedMetrics.Has(container.NetworkUsageMetrics) {
|
||||
netStats, err := networkStatsFromProc(h.rootFs, h.pid)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get network stats from pid %d: %v", h.pid, err)
|
||||
@ -93,7 +93,7 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...)
|
||||
}
|
||||
}
|
||||
if !h.ignoreMetrics.Has(container.NetworkTcpUsageMetrics) {
|
||||
if h.includedMetrics.Has(container.NetworkTcpUsageMetrics) {
|
||||
t, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get tcp stats from pid %d: %v", h.pid, err)
|
||||
@ -108,7 +108,7 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
stats.Network.Tcp6 = t6
|
||||
}
|
||||
}
|
||||
if !h.ignoreMetrics.Has(container.NetworkUdpUsageMetrics) {
|
||||
if h.includedMetrics.Has(container.NetworkUdpUsageMetrics) {
|
||||
u, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp")
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get udp stats from pid %d: %v", h.pid, err)
|
||||
|
@ -50,7 +50,7 @@ type mesosFactory struct {
|
||||
// Information about mounted filesystems.
|
||||
fsInfo fs.FsInfo
|
||||
|
||||
ignoreMetrics map[container.MetricKind]struct{}
|
||||
includedMetrics map[container.MetricKind]struct{}
|
||||
|
||||
client mesosAgentClient
|
||||
}
|
||||
@ -70,7 +70,7 @@ func (self *mesosFactory) NewContainerHandler(name string, inHostNamespace bool)
|
||||
&self.cgroupSubsystems,
|
||||
self.machineInfoFactory,
|
||||
self.fsInfo,
|
||||
self.ignoreMetrics,
|
||||
self.includedMetrics,
|
||||
inHostNamespace,
|
||||
client,
|
||||
)
|
||||
@ -127,7 +127,7 @@ func (self *mesosFactory) DebugInfo() map[string][]string {
|
||||
func Register(
|
||||
machineInfoFactory info.MachineInfoFactory,
|
||||
fsInfo fs.FsInfo,
|
||||
ignoreMetrics container.MetricSet,
|
||||
includedMetrics container.MetricSet,
|
||||
) error {
|
||||
client, err := Client()
|
||||
|
||||
@ -145,7 +145,7 @@ func Register(
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupSubsystems: cgroupSubsystems,
|
||||
fsInfo: fsInfo,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
client: client,
|
||||
}
|
||||
container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Raw})
|
||||
|
@ -59,7 +59,7 @@ func TestCanHandleAndAccept(t *testing.T) {
|
||||
machineInfoFactory: nil,
|
||||
cgroupSubsystems: containerlibcontainer.CgroupSubsystems{},
|
||||
fsInfo: nil,
|
||||
ignoreMetrics: nil,
|
||||
includedMetrics: nil,
|
||||
client: fakeMesosAgentClient(testContainers, nil),
|
||||
}
|
||||
tests := []struct {
|
||||
|
@ -43,8 +43,8 @@ type mesosContainerHandler struct {
|
||||
// File System Info
|
||||
fsInfo fs.FsInfo
|
||||
|
||||
// Metrics to be ignored.
|
||||
ignoreMetrics container.MetricSet
|
||||
// Metrics to be included.
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
labels map[string]string
|
||||
|
||||
@ -63,7 +63,7 @@ func newMesosContainerHandler(
|
||||
cgroupSubsystems *containerlibcontainer.CgroupSubsystems,
|
||||
machineInfoFactory info.MachineInfoFactory,
|
||||
fsInfo fs.FsInfo,
|
||||
ignoreMetrics container.MetricSet,
|
||||
includedMetrics container.MetricSet,
|
||||
inHostNamespace bool,
|
||||
client mesosAgentClient,
|
||||
) (container.ContainerHandler, error) {
|
||||
@ -96,7 +96,7 @@ func newMesosContainerHandler(
|
||||
labels := cinfo.labels
|
||||
pid := int(*cinfo.cntr.ContainerStatus.ExecutorPID)
|
||||
|
||||
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootFs, pid, ignoreMetrics)
|
||||
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootFs, pid, includedMetrics)
|
||||
|
||||
reference := info.ContainerReference{
|
||||
Id: id,
|
||||
@ -110,7 +110,7 @@ func newMesosContainerHandler(
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
fsInfo: fsInfo,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
labels: labels,
|
||||
reference: reference,
|
||||
libcontainerHandler: libcontainerHandler,
|
||||
@ -154,7 +154,7 @@ func (self *mesosContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
return err
|
||||
}
|
||||
|
||||
if !self.ignoreMetrics.Has(container.DiskIOMetrics) {
|
||||
if self.includedMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ func TestContainerReference(t *testing.T) {
|
||||
fsInfo fs.FsInfo
|
||||
cgroupSubsystems *containerlibcontainer.CgroupSubsystems
|
||||
inHostNamespace bool
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
hasErr bool
|
||||
errContains string
|
||||
@ -95,7 +95,7 @@ func TestContainerReference(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
handler, err := newMesosContainerHandler(ts.name, ts.cgroupSubsystems, ts.machineInfoFactory, ts.fsInfo, ts.ignoreMetrics, ts.inHostNamespace, ts.client)
|
||||
handler, err := newMesosContainerHandler(ts.name, ts.cgroupSubsystems, ts.machineInfoFactory, ts.fsInfo, ts.includedMetrics, ts.inHostNamespace, ts.client)
|
||||
if ts.hasErr {
|
||||
as.NotNil(err)
|
||||
if ts.errContains != "" {
|
||||
|
@ -44,8 +44,8 @@ type rawFactory struct {
|
||||
// Watcher for inotify events.
|
||||
watcher *common.InotifyWatcher
|
||||
|
||||
// List of metrics to be ignored.
|
||||
ignoreMetrics map[container.MetricKind]struct{}
|
||||
// List of metrics to be included.
|
||||
includedMetrics map[container.MetricKind]struct{}
|
||||
|
||||
// List of raw container cgroup path prefix whitelist.
|
||||
rawPrefixWhiteList []string
|
||||
@ -60,7 +60,7 @@ func (self *rawFactory) NewContainerHandler(name string, inHostNamespace bool) (
|
||||
if !inHostNamespace {
|
||||
rootFs = "/rootfs"
|
||||
}
|
||||
return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, self.watcher, rootFs, self.ignoreMetrics)
|
||||
return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, self.watcher, rootFs, self.includedMetrics)
|
||||
}
|
||||
|
||||
// The raw factory can handle any container. If --docker_only is set to false, non-docker containers are ignored.
|
||||
@ -80,7 +80,7 @@ func (self *rawFactory) DebugInfo() map[string][]string {
|
||||
return common.DebugInfo(self.watcher.GetWatches())
|
||||
}
|
||||
|
||||
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics map[container.MetricKind]struct{}, rawPrefixWhiteList []string) error {
|
||||
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics map[container.MetricKind]struct{}, rawPrefixWhiteList []string) error {
|
||||
cgroupSubsystems, err := libcontainer.GetCgroupSubsystems()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
|
||||
@ -100,7 +100,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, igno
|
||||
fsInfo: fsInfo,
|
||||
cgroupSubsystems: &cgroupSubsystems,
|
||||
watcher: watcher,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
rawPrefixWhiteList: rawPrefixWhiteList,
|
||||
}
|
||||
container.RegisterContainerHandlerFactory(factory, []watch.ContainerWatchSource{watch.Raw})
|
||||
|
@ -49,7 +49,7 @@ func isRootCgroup(name string) bool {
|
||||
return name == "/"
|
||||
}
|
||||
|
||||
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *common.InotifyWatcher, rootFs string, ignoreMetrics container.MetricSet) (container.ContainerHandler, error) {
|
||||
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *common.InotifyWatcher, rootFs string, includedMetrics container.MetricSet) (container.ContainerHandler, error) {
|
||||
cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems.MountPoints, name)
|
||||
|
||||
cHints, err := common.GetContainerHintsFromFile(*common.ArgContainerHints)
|
||||
@ -78,7 +78,7 @@ func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSu
|
||||
pid = 1
|
||||
}
|
||||
|
||||
handler := libcontainer.NewHandler(cgroupManager, rootFs, pid, ignoreMetrics)
|
||||
handler := libcontainer.NewHandler(cgroupManager, rootFs, pid, includedMetrics)
|
||||
|
||||
return &rawContainerHandler{
|
||||
name: name,
|
||||
|
@ -35,7 +35,7 @@ type rktFactory struct {
|
||||
|
||||
fsInfo fs.FsInfo
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
rktPath string
|
||||
}
|
||||
@ -54,7 +54,7 @@ func (self *rktFactory) NewContainerHandler(name string, inHostNamespace bool) (
|
||||
if !inHostNamespace {
|
||||
rootFs = "/rootfs"
|
||||
}
|
||||
return newRktContainerHandler(name, client, self.rktPath, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, rootFs, self.ignoreMetrics)
|
||||
return newRktContainerHandler(name, client, self.rktPath, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, rootFs, self.includedMetrics)
|
||||
}
|
||||
|
||||
func (self *rktFactory) CanHandleAndAccept(name string) (bool, bool, error) {
|
||||
@ -67,7 +67,7 @@ func (self *rktFactory) DebugInfo() map[string][]string {
|
||||
return map[string][]string{}
|
||||
}
|
||||
|
||||
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error {
|
||||
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error {
|
||||
_, err := Client()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to communicate with Rkt api service: %v", err)
|
||||
@ -91,7 +91,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, igno
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
fsInfo: fsInfo,
|
||||
cgroupSubsystems: &cgroupSubsystems,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
rktPath: rktPath,
|
||||
}
|
||||
container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Rkt})
|
||||
|
@ -48,7 +48,7 @@ type rktContainerHandler struct {
|
||||
// Filesystem handler.
|
||||
fsHandler common.FsHandler
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
apiPod *rktapi.Pod
|
||||
|
||||
@ -59,7 +59,7 @@ type rktContainerHandler struct {
|
||||
libcontainerHandler *libcontainer.Handler
|
||||
}
|
||||
|
||||
func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPath string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, rootFs string, ignoreMetrics container.MetricSet) (container.ContainerHandler, error) {
|
||||
func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPath string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, rootFs string, includedMetrics container.MetricSet) (container.ContainerHandler, error) {
|
||||
aliases := make([]string, 1)
|
||||
isPod := false
|
||||
|
||||
@ -109,7 +109,7 @@ func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPa
|
||||
Paths: cgroupPaths,
|
||||
}
|
||||
|
||||
libcontainerHandler := libcontainer.NewHandler(cgroupManager, rootFs, pid, ignoreMetrics)
|
||||
libcontainerHandler := libcontainer.NewHandler(cgroupManager, rootFs, pid, includedMetrics)
|
||||
|
||||
rootfsStorageDir := getRootFs(rktPath, parsed)
|
||||
|
||||
@ -125,14 +125,14 @@ func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPa
|
||||
fsInfo: fsInfo,
|
||||
isPod: isPod,
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
includedMetrics: includedMetrics,
|
||||
apiPod: apiPod,
|
||||
labels: labels,
|
||||
reference: containerReference,
|
||||
libcontainerHandler: libcontainerHandler,
|
||||
}
|
||||
|
||||
if !ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
if includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
handler.fsHandler = common.NewFsHandler(common.DefaultPeriod, rootfsStorageDir, "", fsInfo)
|
||||
}
|
||||
|
||||
@ -170,8 +170,8 @@ func (handler *rktContainerHandler) Cleanup() {
|
||||
}
|
||||
|
||||
func (handler *rktContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
hasNetwork := handler.isPod && !handler.ignoreMetrics.Has(container.NetworkUsageMetrics)
|
||||
hasFilesystem := !handler.ignoreMetrics.Has(container.DiskUsageMetrics)
|
||||
hasNetwork := handler.isPod && handler.includedMetrics.Has(container.NetworkUsageMetrics)
|
||||
hasFilesystem := handler.includedMetrics.Has(container.DiskUsageMetrics)
|
||||
|
||||
spec, err := common.GetSpec(handler.cgroupPaths, handler.machineInfoFactory, hasNetwork, hasFilesystem)
|
||||
|
||||
@ -186,11 +186,11 @@ func (handler *rktContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
return err
|
||||
}
|
||||
|
||||
if !handler.ignoreMetrics.Has(container.DiskIOMetrics) {
|
||||
if handler.includedMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
|
||||
if handler.ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
if !handler.includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ func (f *systemdFactory) DebugInfo() map[string][]string {
|
||||
}
|
||||
|
||||
// Register registers the systemd container factory.
|
||||
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error {
|
||||
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error {
|
||||
glog.V(1).Infof("Registering systemd factory")
|
||||
factory := &systemdFactory{}
|
||||
container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Raw})
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/google/cadvisor/api"
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/healthz"
|
||||
httpmux "github.com/google/cadvisor/http/mux"
|
||||
"github.com/google/cadvisor/manager"
|
||||
@ -93,10 +94,11 @@ func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAut
|
||||
|
||||
// RegisterPrometheusHandler creates a new PrometheusCollector and configures
|
||||
// the provided HTTP mux to handle the given Prometheus endpoint.
|
||||
func RegisterPrometheusHandler(mux httpmux.Mux, containerManager manager.Manager, prometheusEndpoint string, f metrics.ContainerLabelsFunc) {
|
||||
func RegisterPrometheusHandler(mux httpmux.Mux, containerManager manager.Manager, prometheusEndpoint string,
|
||||
f metrics.ContainerLabelsFunc, includedMetrics container.MetricSet) {
|
||||
r := prometheus.NewRegistry()
|
||||
r.MustRegister(
|
||||
metrics.NewPrometheusCollector(containerManager, f),
|
||||
metrics.NewPrometheusCollector(containerManager, f, includedMetrics),
|
||||
prometheus.NewGoCollector(),
|
||||
prometheus.NewProcessCollector(os.Getpid(), ""),
|
||||
)
|
||||
|
@ -142,7 +142,7 @@ type Manager interface {
|
||||
}
|
||||
|
||||
// New takes a memory storage and returns a new manager.
|
||||
func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool, ignoreMetricsSet container.MetricSet, collectorHttpClient *http.Client, rawContainerCgroupPathPrefixWhiteList []string) (Manager, error) {
|
||||
func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool, includedMetricsSet container.MetricSet, collectorHttpClient *http.Client, rawContainerCgroupPathPrefixWhiteList []string) (Manager, error) {
|
||||
if memoryCache == nil {
|
||||
return nil, fmt.Errorf("manager requires memory storage")
|
||||
}
|
||||
@ -213,7 +213,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
|
||||
startupTime: time.Now(),
|
||||
maxHousekeepingInterval: maxHousekeepingInterval,
|
||||
allowDynamicHousekeeping: allowDynamicHousekeeping,
|
||||
ignoreMetrics: ignoreMetricsSet,
|
||||
includedMetrics: includedMetricsSet,
|
||||
containerWatchers: []watcher.ContainerWatcher{},
|
||||
eventsChannel: eventsChannel,
|
||||
collectorHttpClient: collectorHttpClient,
|
||||
@ -285,7 +285,7 @@ type manager struct {
|
||||
startupTime time.Time
|
||||
maxHousekeepingInterval time.Duration
|
||||
allowDynamicHousekeeping bool
|
||||
ignoreMetrics container.MetricSet
|
||||
includedMetrics container.MetricSet
|
||||
containerWatchers []watcher.ContainerWatcher
|
||||
eventsChannel chan watcher.ContainerEvent
|
||||
collectorHttpClient *http.Client
|
||||
@ -296,12 +296,12 @@ type manager struct {
|
||||
|
||||
// Start the container manager.
|
||||
func (self *manager) Start() error {
|
||||
err := docker.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
err := docker.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the Docker container factory failed: %v.", err)
|
||||
}
|
||||
|
||||
err = rkt.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
err = rkt.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the rkt container factory failed: %v", err)
|
||||
} else {
|
||||
@ -312,27 +312,27 @@ func (self *manager) Start() error {
|
||||
self.containerWatchers = append(self.containerWatchers, watcher)
|
||||
}
|
||||
|
||||
err = containerd.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
err = containerd.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the containerd container factory failed: %v", err)
|
||||
}
|
||||
|
||||
err = crio.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
err = crio.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the crio container factory failed: %v", err)
|
||||
}
|
||||
|
||||
err = mesos.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
err = mesos.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the mesos container factory failed: %v", err)
|
||||
}
|
||||
|
||||
err = systemd.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
err = systemd.Register(self, self.fsInfo, self.includedMetrics)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Registration of the systemd container factory failed: %v", err)
|
||||
}
|
||||
|
||||
err = raw.Register(self, self.fsInfo, self.ignoreMetrics, self.rawContainerCgroupPathPrefixWhiteList)
|
||||
err = raw.Register(self, self.fsInfo, self.includedMetrics, self.rawContainerCgroupPathPrefixWhiteList)
|
||||
if err != nil {
|
||||
glog.Errorf("Registration of the raw container factory failed: %v", err)
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@ -114,7 +115,7 @@ type PrometheusCollector struct {
|
||||
// ContainerLabelsFunc specifies which base labels will be attached to all
|
||||
// exported metrics. If left to nil, the DefaultContainerLabels function
|
||||
// will be used instead.
|
||||
func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCollector {
|
||||
func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetrics container.MetricSet) *PrometheusCollector {
|
||||
if f == nil {
|
||||
f = DefaultContainerLabels
|
||||
}
|
||||
@ -134,7 +135,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(time.Now().Unix())}}
|
||||
},
|
||||
}, {
|
||||
},
|
||||
},
|
||||
}
|
||||
if includedMetrics.Has(container.CpuUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_cpu_user_seconds_total",
|
||||
help: "Cumulative user cpu time consumed in seconds.",
|
||||
valueType: prometheus.CounterValue,
|
||||
@ -197,7 +203,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Cpu.CFS.ThrottledTime) / float64(time.Second)}}
|
||||
},
|
||||
}, {
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.ProcessSchedulerMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_cpu_schedstat_run_seconds_total",
|
||||
help: "Time duration the processes of the container have run on the CPU.",
|
||||
valueType: prometheus.CounterValue,
|
||||
@ -218,7 +229,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Cpu.Schedstat.RunPeriods)}}
|
||||
},
|
||||
}, {
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.CpuLoadMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_cpu_load_average_10s",
|
||||
help: "Value of container cpu load average over the last 10 seconds.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
@ -226,6 +242,40 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
return metricValues{{value: float64(s.Cpu.LoadAverage)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_tasks_state",
|
||||
help: "Number of tasks in given state",
|
||||
extraLabels: []string{"state"},
|
||||
valueType: prometheus.GaugeValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{
|
||||
{
|
||||
value: float64(s.TaskStats.NrSleeping),
|
||||
labels: []string{"sleeping"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrRunning),
|
||||
labels: []string{"running"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrStopped),
|
||||
labels: []string{"stopped"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrUninterruptible),
|
||||
labels: []string{"uninterruptible"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrIoWait),
|
||||
labels: []string{"iowaiting"},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.MemoryUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_memory_cache",
|
||||
help: "Number of bytes of page cache memory.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
@ -300,7 +350,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
},
|
||||
}
|
||||
},
|
||||
}, {
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.AcceleratorUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_accelerator_memory_total_bytes",
|
||||
help: "Total accelerator memory.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
@ -345,7 +400,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_fs_inodes_free",
|
||||
help: "Number of available Inodes",
|
||||
valueType: prometheus.GaugeValue,
|
||||
@ -385,7 +445,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
return float64(fs.Usage)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.DiskIOMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_fs_reads_bytes_total",
|
||||
help: "Cumulative count of bytes read",
|
||||
valueType: prometheus.CounterValue,
|
||||
@ -547,7 +612,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
return float64(fs.WeightedIoTime) / float64(time.Second)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.NetworkUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_network_receive_bytes_total",
|
||||
help: "Cumulative count of bytes received",
|
||||
valueType: prometheus.CounterValue,
|
||||
@ -667,7 +737,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.NetworkTcpUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_network_tcp_usage_total",
|
||||
help: "tcp connection usage statistic for container",
|
||||
valueType: prometheus.GaugeValue,
|
||||
@ -720,7 +795,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
},
|
||||
}
|
||||
},
|
||||
}, {
|
||||
},
|
||||
}...)
|
||||
}
|
||||
if includedMetrics.Has(container.NetworkTcpUsageMetrics) {
|
||||
c.containerMetrics = append(c.containerMetrics, []containerMetric{
|
||||
{
|
||||
name: "container_network_udp_usage_total",
|
||||
help: "udp connection usage statistic for container",
|
||||
valueType: prometheus.GaugeValue,
|
||||
@ -745,37 +825,8 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
},
|
||||
}
|
||||
},
|
||||
}, {
|
||||
name: "container_tasks_state",
|
||||
help: "Number of tasks in given state",
|
||||
extraLabels: []string{"state"},
|
||||
valueType: prometheus.GaugeValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{
|
||||
{
|
||||
value: float64(s.TaskStats.NrSleeping),
|
||||
labels: []string{"sleeping"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrRunning),
|
||||
labels: []string{"running"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrStopped),
|
||||
labels: []string{"stopped"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrUninterruptible),
|
||||
labels: []string{"uninterruptible"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrIoWait),
|
||||
labels: []string{"iowaiting"},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
}...)
|
||||
}
|
||||
|
||||
return c
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@ -48,6 +49,20 @@ func (p testSubcontainersInfoProvider) GetMachineInfo() (*info.MachineInfo, erro
|
||||
}, nil
|
||||
}
|
||||
|
||||
var allMetrics = container.MetricSet{
|
||||
container.CpuUsageMetrics: struct{}{},
|
||||
container.ProcessSchedulerMetrics: struct{}{},
|
||||
container.PerCpuUsageMetrics: struct{}{},
|
||||
container.MemoryUsageMetrics: struct{}{},
|
||||
container.CpuLoadMetrics: struct{}{},
|
||||
container.DiskIOMetrics: struct{}{},
|
||||
container.AcceleratorUsageMetrics: struct{}{},
|
||||
container.DiskUsageMetrics: struct{}{},
|
||||
container.NetworkUsageMetrics: struct{}{},
|
||||
container.NetworkTcpUsageMetrics: struct{}{},
|
||||
container.NetworkUdpUsageMetrics: struct{}{},
|
||||
}
|
||||
|
||||
func (p testSubcontainersInfoProvider) SubcontainersInfo(string, *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
|
||||
return []*info.ContainerInfo{
|
||||
{
|
||||
@ -237,7 +252,7 @@ func TestPrometheusCollector(t *testing.T) {
|
||||
s := DefaultContainerLabels(container)
|
||||
s["zone.name"] = "hello"
|
||||
return s
|
||||
})
|
||||
}, allMetrics)
|
||||
prometheus.MustRegister(c)
|
||||
defer prometheus.Unregister(c)
|
||||
|
||||
@ -307,7 +322,7 @@ func TestPrometheusCollector_scrapeFailure(t *testing.T) {
|
||||
s := DefaultContainerLabels(container)
|
||||
s["zone.name"] = "hello"
|
||||
return s
|
||||
})
|
||||
}, allMetrics)
|
||||
prometheus.MustRegister(c)
|
||||
defer prometheus.Unregister(c)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user