Merge pull request #969 from f0/master
add support for cgroup CPUQuota and CPUPeriod prometheus limit
This commit is contained in:
commit
a8946729de
@ -207,6 +207,16 @@ func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
if utils.FileExists(cpuRoot) {
|
||||
spec.HasCpu = true
|
||||
spec.Cpu.Limit = readUInt64(cpuRoot, "cpu.shares")
|
||||
spec.Cpu.Period = readUInt64(cpuRoot, "cpu.cfs_period_us")
|
||||
quota := readString(cpuRoot, "cpu.cfs_quota_us")
|
||||
|
||||
if quota != "-1" {
|
||||
val, err := strconv.ParseUint(quota, 10, 64)
|
||||
if err != nil {
|
||||
glog.Errorf("raw driver: Failed to parse CPUQuota from %q: %s", path.Join(cpuRoot, "cpu.cfs_quota_us"), err)
|
||||
}
|
||||
spec.Cpu.Quota = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -23,6 +23,8 @@ type CpuSpec struct {
|
||||
Limit uint64 `json:"limit"`
|
||||
MaxLimit uint64 `json:"max_limit"`
|
||||
Mask string `json:"mask,omitempty"`
|
||||
Quota uint64 `json:"quota,omitempty"`
|
||||
Period uint64 `json:"period,omitempty"`
|
||||
}
|
||||
|
||||
type MemorySpec struct {
|
||||
|
@ -36,6 +36,10 @@ type CpuSpec struct {
|
||||
// Cpu affinity mask.
|
||||
// TODO(rjnagal): Add a library to convert mask string to set of cpu bitmask.
|
||||
Mask string `json:"mask,omitempty"`
|
||||
// CPUQuota Default is disabled
|
||||
Quota uint64 `json:"quota,omitempty"`
|
||||
// Period is the CPU reference time in ns e.g the quota is compared aginst this.
|
||||
Period uint64 `json:"period,omitempty"`
|
||||
}
|
||||
|
||||
type MemorySpec struct {
|
||||
|
@ -542,10 +542,16 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.CreationTime.Unix()), baseLabelValues...)
|
||||
|
||||
if container.Spec.HasCpu {
|
||||
desc = prometheus.NewDesc("container_spec_cpu_period", "CPU period of the container.", baseLabels, nil)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Period), baseLabelValues...)
|
||||
if container.Spec.Cpu.Quota != 0 {
|
||||
desc = prometheus.NewDesc("container_spec_cpu_quota", "CPU quota of the container.", baseLabels, nil)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Quota), baseLabelValues...)
|
||||
}
|
||||
desc := prometheus.NewDesc("container_spec_cpu_shares", "CPU share of the container.", baseLabels, nil)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Limit), baseLabelValues...)
|
||||
}
|
||||
|
||||
}
|
||||
if container.Spec.HasMemory {
|
||||
desc := prometheus.NewDesc("container_spec_memory_limit_bytes", "Memory limit for the container.", baseLabels, nil)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.Limit), baseLabelValues...)
|
||||
|
@ -55,7 +55,13 @@ func (p testSubcontainersInfoProvider) SubcontainersInfo(string, *info.Container
|
||||
Aliases: []string{"testcontaineralias"},
|
||||
},
|
||||
Spec: info.ContainerSpec{
|
||||
Image: "test",
|
||||
Image: "test",
|
||||
HasCpu: true,
|
||||
Cpu: info.CpuSpec{
|
||||
Limit: 1000,
|
||||
Period: 10,
|
||||
Quota: 10000,
|
||||
},
|
||||
CreationTime: time.Unix(1257894000, 0),
|
||||
Labels: map[string]string{
|
||||
"foo.label": "bar",
|
||||
|
11
metrics/testdata/prometheus_metrics
vendored
11
metrics/testdata/prometheus_metrics
vendored
@ -116,6 +116,15 @@ container_network_transmit_packets_total{foo_env="prod",foo_label="bar",id="test
|
||||
# HELP container_scrape_error 1 if there was an error while getting container metrics, 0 otherwise
|
||||
# TYPE container_scrape_error gauge
|
||||
container_scrape_error 0
|
||||
# HELP container_spec_cpu_period CPU period of the container.
|
||||
# TYPE container_spec_cpu_period gauge
|
||||
container_spec_cpu_period{foo_env="prod",foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 10
|
||||
# HELP container_spec_cpu_quota CPU quota of the container.
|
||||
# TYPE container_spec_cpu_quota gauge
|
||||
container_spec_cpu_quota{foo_env="prod",foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 10000
|
||||
# HELP container_spec_cpu_shares CPU share of the container.
|
||||
# TYPE container_spec_cpu_shares gauge
|
||||
container_spec_cpu_shares{foo_env="prod",foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 1000
|
||||
# HELP container_start_time_seconds Start time of the container since unix epoch in seconds.
|
||||
# TYPE container_start_time_seconds gauge
|
||||
container_start_time_seconds{foo_env="prod",foo_label="bar",id="testcontainer",image="test",name="testcontaineralias",zone_name="hello"} 1.257894e+09
|
||||
@ -173,4 +182,4 @@ process_resident_memory_bytes 7.74144e+06
|
||||
process_start_time_seconds 1.42620369439e+09
|
||||
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
|
||||
# TYPE process_virtual_memory_bytes gauge
|
||||
process_virtual_memory_bytes 1.16420608e+08
|
||||
process_virtual_memory_bytes 1.16420608e+08
|
Loading…
Reference in New Issue
Block a user