parent
bc14de84cc
commit
2ccad4b42f
@ -116,6 +116,15 @@ func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoF
|
||||
}
|
||||
}
|
||||
|
||||
// Processes, read it's value from pids path directly
|
||||
pidsRoot, ok := cgroupPaths["pids"]
|
||||
if ok {
|
||||
if utils.FileExists(pidsRoot) {
|
||||
spec.HasProcesses = true
|
||||
spec.Processes.Limit = readUInt64(pidsRoot, "pids.max")
|
||||
}
|
||||
}
|
||||
|
||||
spec.HasNetwork = hasNetwork
|
||||
spec.HasFilesystem = hasFilesystem
|
||||
|
||||
@ -143,7 +152,7 @@ func readString(dirpath string, file string) string {
|
||||
|
||||
func readUInt64(dirpath string, file string) uint64 {
|
||||
out := readString(dirpath, file)
|
||||
if out == "" {
|
||||
if out == "" || out == "max" {
|
||||
return 0
|
||||
}
|
||||
|
||||
|
@ -130,6 +130,9 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
klog.V(4).Infof("Unable to get Process Stats: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// if include processes metrics, just set threads metrics if exist, and has no relationship with cpu path
|
||||
setThreadsStats(cgroupStats, stats)
|
||||
}
|
||||
|
||||
// For backwards compatibility.
|
||||
@ -588,6 +591,15 @@ func setNetworkStats(libcontainerStats *libcontainer.Stats, ret *info.ContainerS
|
||||
}
|
||||
}
|
||||
|
||||
// read from pids path not cpu
|
||||
func setThreadsStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
if s != nil {
|
||||
ret.Processes.ThreadsCurrent = s.PidsStats.Current
|
||||
ret.Processes.ThreadsMax = s.PidsStats.Limit
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func newContainerStats(libcontainerStats *libcontainer.Stats, includedMetrics container.MetricSet) *info.ContainerStats {
|
||||
ret := &info.ContainerStats{
|
||||
Timestamp: time.Now(),
|
||||
|
@ -134,3 +134,44 @@ func TestMorePossibleCPUs(t *testing.T) {
|
||||
t.Fatalf("expected %+v == %+v", ret, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetProcessesStats(t *testing.T) {
|
||||
ret := info.ContainerStats{
|
||||
Processes: info.ProcessStats{
|
||||
ProcessCount: 1,
|
||||
FdCount: 2,
|
||||
},
|
||||
}
|
||||
s := &cgroups.Stats{
|
||||
PidsStats: cgroups.PidsStats{
|
||||
Current: 5,
|
||||
Limit: 100,
|
||||
},
|
||||
}
|
||||
setThreadsStats(s, &ret)
|
||||
|
||||
expected := info.ContainerStats{
|
||||
|
||||
Processes: info.ProcessStats{
|
||||
ProcessCount: 1,
|
||||
FdCount: 2,
|
||||
ThreadsCurrent: s.PidsStats.Current,
|
||||
ThreadsMax: s.PidsStats.Limit,
|
||||
},
|
||||
}
|
||||
|
||||
if expected.Processes.ProcessCount != ret.Processes.ProcessCount {
|
||||
t.Fatalf("expected ProcessCount: %d == %d", ret.Processes.ProcessCount, expected.Processes.ProcessCount)
|
||||
}
|
||||
if expected.Processes.FdCount != ret.Processes.FdCount {
|
||||
t.Fatalf("expected FdCount: %d == %d", ret.Processes.FdCount, expected.Processes.FdCount)
|
||||
}
|
||||
|
||||
if expected.Processes.ThreadsCurrent != ret.Processes.ThreadsCurrent {
|
||||
t.Fatalf("expected current threads: %d == %d", ret.Processes.ThreadsCurrent, expected.Processes.ThreadsCurrent)
|
||||
}
|
||||
if expected.Processes.ThreadsMax != ret.Processes.ThreadsMax {
|
||||
t.Fatalf("expected max threads: %d == %d", ret.Processes.ThreadsMax, expected.Processes.ThreadsMax)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -106,6 +106,7 @@ var supportedSubsystems map[string]struct{} = map[string]struct{}{
|
||||
"cpu": {},
|
||||
"cpuacct": {},
|
||||
"memory": {},
|
||||
"pids": {},
|
||||
"cpuset": {},
|
||||
"blkio": {},
|
||||
"devices": {},
|
||||
|
@ -42,7 +42,7 @@ func cgroupMountsAt(path string, subsystems []string) []cgroups.Mount {
|
||||
}
|
||||
|
||||
func TestGetCgroupSubsystems(t *testing.T) {
|
||||
ourSubsystems := []string{"cpu,cpuacct", "devices", "memory", "cpuset", "blkio"}
|
||||
ourSubsystems := []string{"cpu,cpuacct", "devices", "memory", "cpuset", "blkio", "pids"}
|
||||
|
||||
testCases := []struct {
|
||||
mounts []cgroups.Mount
|
||||
@ -64,6 +64,7 @@ func TestGetCgroupSubsystems(t *testing.T) {
|
||||
"cpuset": "/sys/fs/cgroup/cpuset",
|
||||
"devices": "/sys/fs/cgroup/devices",
|
||||
"memory": "/sys/fs/cgroup/memory",
|
||||
"pids": "/sys/fs/cgroup/pids",
|
||||
},
|
||||
Mounts: cgroupMountsAt("/sys/fs/cgroup", ourSubsystems),
|
||||
},
|
||||
@ -80,6 +81,7 @@ func TestGetCgroupSubsystems(t *testing.T) {
|
||||
"cpuset": "/sys/fs/cgroup/cpuset",
|
||||
"devices": "/sys/fs/cgroup/devices",
|
||||
"memory": "/sys/fs/cgroup/memory",
|
||||
"pids": "/sys/fs/cgroup/pids",
|
||||
},
|
||||
Mounts: cgroupMountsAt("/sys/fs/cgroup", ourSubsystems),
|
||||
},
|
||||
|
@ -77,6 +77,9 @@ func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSu
|
||||
pid := 0
|
||||
if isRootCgroup(name) {
|
||||
pid = 1
|
||||
|
||||
// delete pids from cgroup paths because /sys/fs/cgroup/pids/pids.current not exist
|
||||
delete(cgroupPaths, "pids")
|
||||
}
|
||||
|
||||
handler := libcontainer.NewHandler(cgroupManager, rootFs, pid, includedMetrics)
|
||||
|
@ -41,6 +41,10 @@ type MemorySpec struct {
|
||||
SwapLimit uint64 `json:"swap_limit,omitempty"`
|
||||
}
|
||||
|
||||
type ProcessSpec struct {
|
||||
Limit uint64 `json:"limit,omitempty"`
|
||||
}
|
||||
|
||||
type ContainerSpec struct {
|
||||
// Time at which the container was created.
|
||||
CreationTime time.Time `json:"creation_time,omitempty"`
|
||||
@ -58,6 +62,9 @@ type ContainerSpec struct {
|
||||
|
||||
HasNetwork bool `json:"has_network"`
|
||||
|
||||
HasProcesses bool `json:"has_processes"`
|
||||
Processes ProcessSpec `json:"processes,omitempty"`
|
||||
|
||||
HasFilesystem bool `json:"has_filesystem"`
|
||||
|
||||
// HasDiskIo when true, indicates that DiskIo stats will be available.
|
||||
@ -563,6 +570,12 @@ type ProcessStats struct {
|
||||
|
||||
// Number of open file descriptors
|
||||
FdCount uint64 `json:"fd_count"`
|
||||
|
||||
// Number of threads currently in container
|
||||
ThreadsCurrent uint64 `json:"threads_current,omitempty"`
|
||||
|
||||
// Maxium number of threads allowed in container
|
||||
ThreadsMax uint64 `json:"threads_max,omitempty"`
|
||||
}
|
||||
|
||||
type ContainerStats struct {
|
||||
@ -630,6 +643,9 @@ func (a *ContainerStats) StatsEq(b *ContainerStats) bool {
|
||||
if !reflect.DeepEqual(a.Network, b.Network) {
|
||||
return false
|
||||
}
|
||||
if !reflect.DeepEqual(a.Processes, b.Processes) {
|
||||
return false
|
||||
}
|
||||
if !reflect.DeepEqual(a.Filesystem, b.Filesystem) {
|
||||
return false
|
||||
}
|
||||
|
@ -90,6 +90,9 @@ type ContainerSpec struct {
|
||||
HasCustomMetrics bool `json:"has_custom_metrics"`
|
||||
CustomMetrics []v1.MetricSpec `json:"custom_metrics,omitempty"`
|
||||
|
||||
HasProcesses bool `json:"has_processes"`
|
||||
Processes v1.ProcessSpec `json:"processes,omitempty"`
|
||||
|
||||
// Following resources have no associated spec, but are being isolated.
|
||||
HasNetwork bool `json:"has_network"`
|
||||
HasFilesystem bool `json:"has_filesystem"`
|
||||
@ -117,6 +120,9 @@ type DeprecatedContainerStats struct {
|
||||
// Network statistics
|
||||
HasNetwork bool `json:"has_network"`
|
||||
Network NetworkStats `json:"network,omitempty"`
|
||||
// Processes statistics
|
||||
HasProcesses bool `json:"has_processes"`
|
||||
Processes v1.ProcessStats `json:"processes,omitempty"`
|
||||
// Filesystem statistics
|
||||
HasFilesystem bool `json:"has_filesystem"`
|
||||
Filesystem []v1.FsStats `json:"filesystem,omitempty"`
|
||||
@ -142,6 +148,8 @@ type ContainerStats struct {
|
||||
Memory *v1.MemoryStats `json:"memory,omitempty"`
|
||||
// Network statistics
|
||||
Network *NetworkStats `json:"network,omitempty"`
|
||||
// Processes statistics
|
||||
Processes *v1.ProcessStats `json:"processes,omitempty"`
|
||||
// Filesystem statistics
|
||||
Filesystem *FilesystemStats `json:"filesystem,omitempty"`
|
||||
// Task load statistics
|
||||
|
@ -124,6 +124,9 @@ func ContainerStatsFromV1(containerName string, spec *v1.ContainerSpec, stats []
|
||||
Interfaces: val.Network.Interfaces,
|
||||
}
|
||||
}
|
||||
if spec.HasProcesses {
|
||||
stat.Processes = &val.Processes
|
||||
}
|
||||
if spec.HasFilesystem {
|
||||
if len(val.Filesystem) == 1 {
|
||||
stat.Filesystem = &FilesystemStats{
|
||||
@ -180,6 +183,9 @@ func DeprecatedStatsFromV1(cont *v1.ContainerInfo) []DeprecatedContainerStats {
|
||||
if stat.HasNetwork {
|
||||
stat.Network.Interfaces = val.Network.Interfaces
|
||||
}
|
||||
if stat.HasProcesses {
|
||||
stat.Processes = val.Processes
|
||||
}
|
||||
if stat.HasFilesystem {
|
||||
stat.Filesystem = val.Filesystem
|
||||
}
|
||||
@ -255,6 +261,7 @@ func ContainerSpecFromV1(specV1 *v1.ContainerSpec, aliases []string, namespace s
|
||||
HasMemory: specV1.HasMemory,
|
||||
HasFilesystem: specV1.HasFilesystem,
|
||||
HasNetwork: specV1.HasNetwork,
|
||||
HasProcesses: specV1.HasProcesses,
|
||||
HasDiskIo: specV1.HasDiskIo,
|
||||
HasCustomMetrics: specV1.HasCustomMetrics,
|
||||
Image: specV1.Image,
|
||||
|
@ -47,6 +47,7 @@ func TestContanierSpecFromV1(t *testing.T) {
|
||||
SwapLimit: 8192,
|
||||
},
|
||||
HasNetwork: true,
|
||||
HasProcesses: true,
|
||||
HasFilesystem: true,
|
||||
HasDiskIo: true,
|
||||
HasCustomMetrics: true,
|
||||
@ -79,6 +80,7 @@ func TestContanierSpecFromV1(t *testing.T) {
|
||||
SwapLimit: 8192,
|
||||
},
|
||||
HasNetwork: true,
|
||||
HasProcesses: true,
|
||||
HasFilesystem: true,
|
||||
HasDiskIo: true,
|
||||
HasCustomMetrics: true,
|
||||
@ -116,6 +118,7 @@ func TestContainerStatsFromV1(t *testing.T) {
|
||||
SwapLimit: 8192,
|
||||
},
|
||||
HasNetwork: true,
|
||||
HasProcesses: true,
|
||||
HasFilesystem: true,
|
||||
HasDiskIo: true,
|
||||
HasCustomMetrics: true,
|
||||
@ -168,6 +171,12 @@ func TestContainerStatsFromV1(t *testing.T) {
|
||||
TxDropped: 80,
|
||||
}},
|
||||
},
|
||||
Processes: v1.ProcessStats{
|
||||
ProcessCount: 5,
|
||||
FdCount: 1,
|
||||
ThreadsCurrent: 66,
|
||||
ThreadsMax: 6000,
|
||||
},
|
||||
Filesystem: []v1.FsStats{{
|
||||
Device: "dev0",
|
||||
Limit: 500,
|
||||
@ -190,6 +199,7 @@ func TestContainerStatsFromV1(t *testing.T) {
|
||||
Cpu: &v1Stats.Cpu,
|
||||
DiskIo: &v1Stats.DiskIo,
|
||||
Memory: &v1Stats.Memory,
|
||||
Processes: &v1Stats.Processes,
|
||||
Network: &NetworkStats{
|
||||
Interfaces: v1Stats.Network.Interfaces,
|
||||
},
|
||||
|
@ -1045,7 +1045,34 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
|
||||
return metricValues{{value: float64(s.Processes.FdCount), timestamp: s.Timestamp}}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "container_threads_max",
|
||||
help: "Maximum number of threads allowed inside the container, infinity if value is zero",
|
||||
valueType: prometheus.GaugeValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{
|
||||
{
|
||||
value: float64(s.Processes.ThreadsMax),
|
||||
timestamp: s.Timestamp,
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "container_threads",
|
||||
help: "Number of threads running inside the container",
|
||||
valueType: prometheus.GaugeValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{
|
||||
{
|
||||
value: float64(s.Processes.ThreadsCurrent),
|
||||
timestamp: s.Timestamp,
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
}...)
|
||||
|
||||
}
|
||||
|
||||
return c
|
||||
|
@ -83,6 +83,10 @@ func (p testSubcontainersInfoProvider) SubcontainersInfo(string, *info.Container
|
||||
Reservation: 1024,
|
||||
SwapLimit: 4096,
|
||||
},
|
||||
HasProcesses: true,
|
||||
Processes: info.ProcessSpec{
|
||||
Limit: 100,
|
||||
},
|
||||
CreationTime: time.Unix(1257894000, 0),
|
||||
Labels: map[string]string{
|
||||
"foo.label": "bar",
|
||||
@ -251,8 +255,10 @@ func (p testSubcontainersInfoProvider) SubcontainersInfo(string, *info.Container
|
||||
},
|
||||
},
|
||||
Processes: info.ProcessStats{
|
||||
ProcessCount: 1,
|
||||
FdCount: 5,
|
||||
ProcessCount: 1,
|
||||
FdCount: 5,
|
||||
ThreadsCurrent: 5,
|
||||
ThreadsMax: 100,
|
||||
},
|
||||
TaskStats: info.LoadStats{
|
||||
NrSleeping: 50,
|
||||
|
6
metrics/testdata/prometheus_metrics
vendored
6
metrics/testdata/prometheus_metrics
vendored
@ -229,6 +229,12 @@ container_tasks_state{container_env_foo_env="prod",container_label_foo_label="ba
|
||||
container_tasks_state{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",state="sleeping",zone_name="hello"} 50 1395066363000
|
||||
container_tasks_state{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",state="stopped",zone_name="hello"} 52 1395066363000
|
||||
container_tasks_state{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",state="uninterruptible",zone_name="hello"} 53 1395066363000
|
||||
# HELP container_threads Number of threads running inside the container
|
||||
# TYPE container_threads gauge
|
||||
container_threads{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 5 1395066363000
|
||||
# HELP container_threads_max Maximum number of threads allowed inside the container, infinity if value is zero
|
||||
# TYPE container_threads_max gauge
|
||||
container_threads_max{container_env_foo_env="prod",container_label_foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 100 1395066363000
|
||||
# HELP machine_cpu_cores Number of CPU cores on the machine.
|
||||
# TYPE machine_cpu_cores gauge
|
||||
machine_cpu_cores 4
|
||||
|
Loading…
Reference in New Issue
Block a user