Merge pull request #1887 from dashpole/refactor
Refactor handlers to move common code to libcontainer handler
This commit is contained in:
commit
8ec51bb848
@ -20,9 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
|
||||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||||
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
@ -36,31 +34,21 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type containerdContainerHandler struct {
|
type containerdContainerHandler struct {
|
||||||
client containerdClient
|
|
||||||
name string
|
|
||||||
id string
|
|
||||||
aliases []string
|
|
||||||
machineInfoFactory info.MachineInfoFactory
|
machineInfoFactory info.MachineInfoFactory
|
||||||
// Absolute path to the cgroup hierarchies of this container.
|
// Absolute path to the cgroup hierarchies of this container.
|
||||||
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
||||||
cgroupPaths map[string]string
|
cgroupPaths map[string]string
|
||||||
// Manager of this container's cgroups.
|
|
||||||
cgroupManager cgroups.Manager
|
|
||||||
fsInfo fs.FsInfo
|
fsInfo fs.FsInfo
|
||||||
poolName string
|
|
||||||
// Time at which this container was created.
|
|
||||||
creationTime time.Time
|
|
||||||
// Metadata associated with the container.
|
// Metadata associated with the container.
|
||||||
labels map[string]string
|
reference info.ContainerReference
|
||||||
envs map[string]string
|
envs map[string]string
|
||||||
// The container PID used to switch namespaces as required
|
labels map[string]string
|
||||||
pid int
|
|
||||||
// Image name used for this container.
|
// Image name used for this container.
|
||||||
image string
|
image string
|
||||||
// The host root FS to read
|
|
||||||
rootFs string
|
|
||||||
// Filesystem handler.
|
// Filesystem handler.
|
||||||
ignoreMetrics container.MetricSet
|
ignoreMetrics container.MetricSet
|
||||||
|
|
||||||
|
libcontainerHandler *containerlibcontainer.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ container.ContainerHandler = &containerdContainerHandler{}
|
var _ container.ContainerHandler = &containerdContainerHandler{}
|
||||||
@ -112,25 +100,27 @@ func newContainerdContainerHandler(
|
|||||||
rootfs = "/rootfs"
|
rootfs = "/rootfs"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
containerReference := info.ContainerReference{
|
||||||
|
Id: id,
|
||||||
|
Name: name,
|
||||||
|
Namespace: k8sContainerdNamespace,
|
||||||
|
Aliases: []string{id, name},
|
||||||
|
}
|
||||||
|
|
||||||
|
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootfs, int(taskPid), ignoreMetrics)
|
||||||
|
|
||||||
handler := &containerdContainerHandler{
|
handler := &containerdContainerHandler{
|
||||||
id: id,
|
|
||||||
client: client,
|
|
||||||
name: name,
|
|
||||||
machineInfoFactory: machineInfoFactory,
|
machineInfoFactory: machineInfoFactory,
|
||||||
cgroupPaths: cgroupPaths,
|
cgroupPaths: cgroupPaths,
|
||||||
cgroupManager: cgroupManager,
|
|
||||||
rootFs: rootfs,
|
|
||||||
fsInfo: fsInfo,
|
fsInfo: fsInfo,
|
||||||
envs: make(map[string]string),
|
envs: make(map[string]string),
|
||||||
labels: make(map[string]string),
|
labels: cntr.Labels,
|
||||||
ignoreMetrics: ignoreMetrics,
|
ignoreMetrics: ignoreMetrics,
|
||||||
pid: int(taskPid),
|
reference: containerReference,
|
||||||
creationTime: cntr.CreatedAt,
|
libcontainerHandler: libcontainerHandler,
|
||||||
}
|
}
|
||||||
// Add the name and bare ID as aliases of the container.
|
// Add the name and bare ID as aliases of the container.
|
||||||
handler.labels = cntr.Labels
|
|
||||||
handler.image = cntr.Image
|
handler.image = cntr.Image
|
||||||
handler.aliases = []string{id, name}
|
|
||||||
for _, envVar := range spec.Process.Env {
|
for _, envVar := range spec.Process.Env {
|
||||||
if envVar != "" {
|
if envVar != "" {
|
||||||
splits := strings.SplitN(envVar, "=", 2)
|
splits := strings.SplitN(envVar, "=", 2)
|
||||||
@ -144,12 +134,7 @@ func newContainerdContainerHandler(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (self *containerdContainerHandler) ContainerReference() (info.ContainerReference, error) {
|
func (self *containerdContainerHandler) ContainerReference() (info.ContainerReference, error) {
|
||||||
return info.ContainerReference{
|
return self.reference, nil
|
||||||
Id: self.id,
|
|
||||||
Name: self.name,
|
|
||||||
Namespace: k8sContainerdNamespace,
|
|
||||||
Aliases: self.aliases,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *containerdContainerHandler) needNet() bool {
|
func (self *containerdContainerHandler) needNet() bool {
|
||||||
@ -188,7 +173,7 @@ func (self *containerdContainerHandler) getFsStats(stats *info.ContainerStats) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (self *containerdContainerHandler) GetStats() (*info.ContainerStats, error) {
|
func (self *containerdContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||||
stats, err := containerlibcontainer.GetStats(self.cgroupManager, self.rootFs, self.pid, self.ignoreMetrics)
|
stats, err := self.libcontainerHandler.GetStats()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return stats, err
|
return stats, err
|
||||||
}
|
}
|
||||||
@ -212,7 +197,7 @@ func (self *containerdContainerHandler) ListContainers(listType container.ListTy
|
|||||||
func (self *containerdContainerHandler) GetCgroupPath(resource string) (string, error) {
|
func (self *containerdContainerHandler) GetCgroupPath(resource string) (string, error) {
|
||||||
path, ok := self.cgroupPaths[resource]
|
path, ok := self.cgroupPaths[resource]
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.name)
|
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.reference.Name)
|
||||||
}
|
}
|
||||||
return path, nil
|
return path, nil
|
||||||
}
|
}
|
||||||
@ -222,7 +207,7 @@ func (self *containerdContainerHandler) GetContainerLabels() map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (self *containerdContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
func (self *containerdContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||||
return containerlibcontainer.GetProcesses(self.cgroupManager)
|
return self.libcontainerHandler.GetProcesses()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *containerdContainerHandler) Exists() bool {
|
func (self *containerdContainerHandler) Exists() bool {
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/cadvisor/container"
|
"github.com/google/cadvisor/container"
|
||||||
"github.com/google/cadvisor/container/common"
|
"github.com/google/cadvisor/container/common"
|
||||||
@ -29,48 +28,32 @@ import (
|
|||||||
"github.com/google/cadvisor/fs"
|
"github.com/google/cadvisor/fs"
|
||||||
info "github.com/google/cadvisor/info/v1"
|
info "github.com/google/cadvisor/info/v1"
|
||||||
|
|
||||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
|
||||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||||
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type crioContainerHandler struct {
|
type crioContainerHandler struct {
|
||||||
name string
|
|
||||||
id string
|
|
||||||
aliases []string
|
|
||||||
machineInfoFactory info.MachineInfoFactory
|
machineInfoFactory info.MachineInfoFactory
|
||||||
|
|
||||||
// Absolute path to the cgroup hierarchies of this container.
|
// Absolute path to the cgroup hierarchies of this container.
|
||||||
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
||||||
cgroupPaths map[string]string
|
cgroupPaths map[string]string
|
||||||
|
|
||||||
// Manager of this container's cgroups.
|
|
||||||
cgroupManager cgroups.Manager
|
|
||||||
|
|
||||||
// the CRI-O storage driver
|
// the CRI-O storage driver
|
||||||
storageDriver storageDriver
|
storageDriver storageDriver
|
||||||
fsInfo fs.FsInfo
|
fsInfo fs.FsInfo
|
||||||
rootfsStorageDir string
|
rootfsStorageDir string
|
||||||
|
|
||||||
// Time at which this container was created.
|
|
||||||
creationTime time.Time
|
|
||||||
|
|
||||||
// Metadata associated with the container.
|
// Metadata associated with the container.
|
||||||
labels map[string]string
|
|
||||||
envs map[string]string
|
envs map[string]string
|
||||||
|
labels map[string]string
|
||||||
|
|
||||||
// TODO
|
// TODO
|
||||||
// crio version handling...
|
// crio version handling...
|
||||||
|
|
||||||
// The container PID used to switch namespaces as required
|
|
||||||
pid int
|
|
||||||
|
|
||||||
// Image name used for this container.
|
// Image name used for this container.
|
||||||
image string
|
image string
|
||||||
|
|
||||||
// The host root FS to read
|
|
||||||
rootFs string
|
|
||||||
|
|
||||||
// The network mode of the container
|
// The network mode of the container
|
||||||
// TODO
|
// TODO
|
||||||
|
|
||||||
@ -84,6 +67,10 @@ type crioContainerHandler struct {
|
|||||||
|
|
||||||
// container restart count
|
// container restart count
|
||||||
restartCount int
|
restartCount int
|
||||||
|
|
||||||
|
reference info.ContainerReference
|
||||||
|
|
||||||
|
libcontainerHandler *containerlibcontainer.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ container.ContainerHandler = &crioContainerHandler{}
|
var _ container.ContainerHandler = &crioContainerHandler{}
|
||||||
@ -150,25 +137,29 @@ func newCrioContainerHandler(
|
|||||||
rootfsStorageDir = filepath.Join(rootfsStorageDir, "diff")
|
rootfsStorageDir = filepath.Join(rootfsStorageDir, "diff")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: extract object mother method
|
containerReference := info.ContainerReference{
|
||||||
handler := &crioContainerHandler{
|
Id: id,
|
||||||
id: id,
|
Name: name,
|
||||||
name: name,
|
Aliases: []string{cInfo.Name, id},
|
||||||
machineInfoFactory: machineInfoFactory,
|
Namespace: CrioNamespace,
|
||||||
cgroupPaths: cgroupPaths,
|
}
|
||||||
cgroupManager: cgroupManager,
|
|
||||||
storageDriver: storageDriver,
|
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootFs, cInfo.Pid, ignoreMetrics)
|
||||||
fsInfo: fsInfo,
|
|
||||||
rootFs: rootFs,
|
// TODO: extract object mother method
|
||||||
rootfsStorageDir: rootfsStorageDir,
|
handler := &crioContainerHandler{
|
||||||
envs: make(map[string]string),
|
machineInfoFactory: machineInfoFactory,
|
||||||
ignoreMetrics: ignoreMetrics,
|
cgroupPaths: cgroupPaths,
|
||||||
|
storageDriver: storageDriver,
|
||||||
|
fsInfo: fsInfo,
|
||||||
|
rootfsStorageDir: rootfsStorageDir,
|
||||||
|
envs: make(map[string]string),
|
||||||
|
labels: cInfo.Labels,
|
||||||
|
ignoreMetrics: ignoreMetrics,
|
||||||
|
reference: containerReference,
|
||||||
|
libcontainerHandler: libcontainerHandler,
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.creationTime = time.Unix(0, cInfo.CreatedTime)
|
|
||||||
handler.pid = cInfo.Pid
|
|
||||||
handler.aliases = append(handler.aliases, cInfo.Name, id)
|
|
||||||
handler.labels = cInfo.Labels
|
|
||||||
handler.image = cInfo.Image
|
handler.image = cInfo.Image
|
||||||
// TODO: we wantd to know graph driver DeviceId (dont think this is needed now)
|
// TODO: we wantd to know graph driver DeviceId (dont think this is needed now)
|
||||||
|
|
||||||
@ -204,12 +195,7 @@ func (self *crioContainerHandler) Cleanup() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (self *crioContainerHandler) ContainerReference() (info.ContainerReference, error) {
|
func (self *crioContainerHandler) ContainerReference() (info.ContainerReference, error) {
|
||||||
return info.ContainerReference{
|
return self.reference, nil
|
||||||
Id: self.id,
|
|
||||||
Name: self.name,
|
|
||||||
Aliases: self.aliases,
|
|
||||||
Namespace: CrioNamespace,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *crioContainerHandler) needNet() bool {
|
func (self *crioContainerHandler) needNet() bool {
|
||||||
@ -285,7 +271,7 @@ func (self *crioContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (self *crioContainerHandler) GetStats() (*info.ContainerStats, error) {
|
func (self *crioContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||||
stats, err := containerlibcontainer.GetStats(self.cgroupManager, self.rootFs, self.pid, self.ignoreMetrics)
|
stats, err := self.libcontainerHandler.GetStats()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return stats, err
|
return stats, err
|
||||||
}
|
}
|
||||||
@ -314,7 +300,7 @@ func (self *crioContainerHandler) ListContainers(listType container.ListType) ([
|
|||||||
func (self *crioContainerHandler) GetCgroupPath(resource string) (string, error) {
|
func (self *crioContainerHandler) GetCgroupPath(resource string) (string, error) {
|
||||||
path, ok := self.cgroupPaths[resource]
|
path, ok := self.cgroupPaths[resource]
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.name)
|
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.reference.Name)
|
||||||
}
|
}
|
||||||
return path, nil
|
return path, nil
|
||||||
}
|
}
|
||||||
@ -328,7 +314,7 @@ func (self *crioContainerHandler) GetContainerIPAddress() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (self *crioContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
func (self *crioContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||||
return containerlibcontainer.GetProcesses(self.cgroupManager)
|
return self.libcontainerHandler.GetProcesses()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *crioContainerHandler) Exists() bool {
|
func (self *crioContainerHandler) Exists() bool {
|
||||||
|
@ -35,7 +35,6 @@ import (
|
|||||||
dockercontainer "github.com/docker/docker/api/types/container"
|
dockercontainer "github.com/docker/docker/api/types/container"
|
||||||
docker "github.com/docker/docker/client"
|
docker "github.com/docker/docker/client"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
|
||||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||||
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
@ -52,53 +51,29 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type dockerContainerHandler struct {
|
type dockerContainerHandler struct {
|
||||||
client *docker.Client
|
|
||||||
name string
|
// machineInfoFactory provides info.MachineInfo
|
||||||
id string
|
|
||||||
aliases []string
|
|
||||||
machineInfoFactory info.MachineInfoFactory
|
machineInfoFactory info.MachineInfoFactory
|
||||||
|
|
||||||
// Absolute path to the cgroup hierarchies of this container.
|
// Absolute path to the cgroup hierarchies of this container.
|
||||||
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
||||||
cgroupPaths map[string]string
|
cgroupPaths map[string]string
|
||||||
|
|
||||||
// Manager of this container's cgroups.
|
|
||||||
cgroupManager cgroups.Manager
|
|
||||||
|
|
||||||
// the docker storage driver
|
// the docker storage driver
|
||||||
storageDriver storageDriver
|
storageDriver storageDriver
|
||||||
fsInfo fs.FsInfo
|
fsInfo fs.FsInfo
|
||||||
rootfsStorageDir string
|
rootfsStorageDir string
|
||||||
|
|
||||||
// devicemapper state
|
|
||||||
|
|
||||||
// the devicemapper poolname
|
|
||||||
poolName string
|
|
||||||
// the devicemapper device id for the container
|
|
||||||
deviceID string
|
|
||||||
|
|
||||||
// zfs Filesystem
|
|
||||||
zfsFilesystem string
|
|
||||||
|
|
||||||
// zfsParent is the parent for docker zfs
|
|
||||||
zfsParent string
|
|
||||||
|
|
||||||
// Time at which this container was created.
|
// Time at which this container was created.
|
||||||
creationTime time.Time
|
creationTime time.Time
|
||||||
|
|
||||||
// Metadata associated with the container.
|
// Metadata associated with the container.
|
||||||
labels map[string]string
|
|
||||||
envs map[string]string
|
envs map[string]string
|
||||||
|
labels map[string]string
|
||||||
// The container PID used to switch namespaces as required
|
|
||||||
pid int
|
|
||||||
|
|
||||||
// Image name used for this container.
|
// Image name used for this container.
|
||||||
image string
|
image string
|
||||||
|
|
||||||
// The host root FS to read
|
|
||||||
rootFs string
|
|
||||||
|
|
||||||
// The network mode of the container
|
// The network mode of the container
|
||||||
networkMode dockercontainer.NetworkMode
|
networkMode dockercontainer.NetworkMode
|
||||||
|
|
||||||
@ -110,14 +85,19 @@ type dockerContainerHandler struct {
|
|||||||
|
|
||||||
ignoreMetrics container.MetricSet
|
ignoreMetrics container.MetricSet
|
||||||
|
|
||||||
// thin pool watcher
|
// the devicemapper poolname
|
||||||
thinPoolWatcher *devicemapper.ThinPoolWatcher
|
poolName string
|
||||||
|
|
||||||
// zfs watcher
|
// zfsParent is the parent for docker zfs
|
||||||
zfsWatcher *zfs.ZfsWatcher
|
zfsParent string
|
||||||
|
|
||||||
// container restart count
|
// container restart count
|
||||||
restartCount int
|
restartCount int
|
||||||
|
|
||||||
|
// Reference to the container
|
||||||
|
reference info.ContainerReference
|
||||||
|
|
||||||
|
libcontainerHandler *containerlibcontainer.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ container.ContainerHandler = &dockerContainerHandler{}
|
var _ container.ContainerHandler = &dockerContainerHandler{}
|
||||||
@ -210,46 +190,42 @@ func newDockerContainerHandler(
|
|||||||
zfsFilesystem = path.Join(zfsParent, rwLayerID)
|
zfsFilesystem = path.Join(zfsParent, rwLayerID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: extract object mother method
|
|
||||||
handler := &dockerContainerHandler{
|
|
||||||
id: id,
|
|
||||||
client: client,
|
|
||||||
name: name,
|
|
||||||
machineInfoFactory: machineInfoFactory,
|
|
||||||
cgroupPaths: cgroupPaths,
|
|
||||||
cgroupManager: cgroupManager,
|
|
||||||
storageDriver: storageDriver,
|
|
||||||
fsInfo: fsInfo,
|
|
||||||
rootFs: rootFs,
|
|
||||||
poolName: thinPoolName,
|
|
||||||
zfsFilesystem: zfsFilesystem,
|
|
||||||
rootfsStorageDir: rootfsStorageDir,
|
|
||||||
envs: make(map[string]string),
|
|
||||||
ignoreMetrics: ignoreMetrics,
|
|
||||||
thinPoolWatcher: thinPoolWatcher,
|
|
||||||
zfsWatcher: zfsWatcher,
|
|
||||||
zfsParent: zfsParent,
|
|
||||||
}
|
|
||||||
|
|
||||||
// We assume that if Inspect fails then the container is not known to docker.
|
// We assume that if Inspect fails then the container is not known to docker.
|
||||||
ctnr, err := client.ContainerInspect(context.Background(), id)
|
ctnr, err := client.ContainerInspect(context.Background(), id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to inspect container %q: %v", id, err)
|
return nil, fmt.Errorf("failed to inspect container %q: %v", id, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: extract object mother method
|
||||||
|
handler := &dockerContainerHandler{
|
||||||
|
machineInfoFactory: machineInfoFactory,
|
||||||
|
cgroupPaths: cgroupPaths,
|
||||||
|
fsInfo: fsInfo,
|
||||||
|
storageDriver: storageDriver,
|
||||||
|
poolName: thinPoolName,
|
||||||
|
rootfsStorageDir: rootfsStorageDir,
|
||||||
|
envs: make(map[string]string),
|
||||||
|
labels: ctnr.Config.Labels,
|
||||||
|
ignoreMetrics: ignoreMetrics,
|
||||||
|
zfsParent: zfsParent,
|
||||||
|
}
|
||||||
// Timestamp returned by Docker is in time.RFC3339Nano format.
|
// Timestamp returned by Docker is in time.RFC3339Nano format.
|
||||||
handler.creationTime, err = time.Parse(time.RFC3339Nano, ctnr.Created)
|
handler.creationTime, err = time.Parse(time.RFC3339Nano, ctnr.Created)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// This should not happen, report the error just in case
|
// This should not happen, report the error just in case
|
||||||
return nil, fmt.Errorf("failed to parse the create timestamp %q for container %q: %v", ctnr.Created, id, err)
|
return nil, fmt.Errorf("failed to parse the create timestamp %q for container %q: %v", ctnr.Created, id, err)
|
||||||
}
|
}
|
||||||
handler.pid = ctnr.State.Pid
|
handler.libcontainerHandler = containerlibcontainer.NewHandler(cgroupManager, rootFs, ctnr.State.Pid, ignoreMetrics)
|
||||||
|
|
||||||
// Add the name and bare ID as aliases of the container.
|
// Add the name and bare ID as aliases of the container.
|
||||||
handler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, "/"), id)
|
handler.reference = info.ContainerReference{
|
||||||
handler.labels = ctnr.Config.Labels
|
Id: id,
|
||||||
|
Name: name,
|
||||||
|
Aliases: []string{strings.TrimPrefix(ctnr.Name, "/"), id},
|
||||||
|
Namespace: DockerNamespace,
|
||||||
|
}
|
||||||
handler.image = ctnr.Config.Image
|
handler.image = ctnr.Config.Image
|
||||||
handler.networkMode = ctnr.HostConfig.NetworkMode
|
handler.networkMode = ctnr.HostConfig.NetworkMode
|
||||||
handler.deviceID = ctnr.GraphDriver.Data["DeviceId"]
|
|
||||||
handler.restartCount = ctnr.RestartCount
|
handler.restartCount = ctnr.RestartCount
|
||||||
|
|
||||||
// Obtain the IP address for the contianer.
|
// Obtain the IP address for the contianer.
|
||||||
@ -273,7 +249,7 @@ func newDockerContainerHandler(
|
|||||||
fsHandler: common.NewFsHandler(common.DefaultPeriod, rootfsStorageDir, otherStorageDir, fsInfo),
|
fsHandler: common.NewFsHandler(common.DefaultPeriod, rootfsStorageDir, otherStorageDir, fsInfo),
|
||||||
thinPoolWatcher: thinPoolWatcher,
|
thinPoolWatcher: thinPoolWatcher,
|
||||||
zfsWatcher: zfsWatcher,
|
zfsWatcher: zfsWatcher,
|
||||||
deviceID: handler.deviceID,
|
deviceID: ctnr.GraphDriver.Data["DeviceId"],
|
||||||
zfsFilesystem: zfsFilesystem,
|
zfsFilesystem: zfsFilesystem,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -365,12 +341,7 @@ func (self *dockerContainerHandler) Cleanup() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (self *dockerContainerHandler) ContainerReference() (info.ContainerReference, error) {
|
func (self *dockerContainerHandler) ContainerReference() (info.ContainerReference, error) {
|
||||||
return info.ContainerReference{
|
return self.reference, nil
|
||||||
Id: self.id,
|
|
||||||
Name: self.name,
|
|
||||||
Aliases: self.aliases,
|
|
||||||
Namespace: DockerNamespace,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *dockerContainerHandler) needNet() bool {
|
func (self *dockerContainerHandler) needNet() bool {
|
||||||
@ -454,7 +425,7 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
|||||||
|
|
||||||
// TODO(vmarmol): Get from libcontainer API instead of cgroup manager when we don't have to support older Dockers.
|
// TODO(vmarmol): Get from libcontainer API instead of cgroup manager when we don't have to support older Dockers.
|
||||||
func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
|
func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||||
stats, err := containerlibcontainer.GetStats(self.cgroupManager, self.rootFs, self.pid, self.ignoreMetrics)
|
stats, err := self.libcontainerHandler.GetStats()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return stats, err
|
return stats, err
|
||||||
}
|
}
|
||||||
@ -483,7 +454,7 @@ func (self *dockerContainerHandler) ListContainers(listType container.ListType)
|
|||||||
func (self *dockerContainerHandler) GetCgroupPath(resource string) (string, error) {
|
func (self *dockerContainerHandler) GetCgroupPath(resource string) (string, error) {
|
||||||
path, ok := self.cgroupPaths[resource]
|
path, ok := self.cgroupPaths[resource]
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.name)
|
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.reference.Name)
|
||||||
}
|
}
|
||||||
return path, nil
|
return path, nil
|
||||||
}
|
}
|
||||||
@ -497,7 +468,7 @@ func (self *dockerContainerHandler) GetContainerIPAddress() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (self *dockerContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
func (self *dockerContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||||
return containerlibcontainer.GetProcesses(self.cgroupManager)
|
return self.libcontainerHandler.GetProcesses()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *dockerContainerHandler) Exists() bool {
|
func (self *dockerContainerHandler) Exists() bool {
|
||||||
|
507
container/libcontainer/handler.go
Normal file
507
container/libcontainer/handler.go
Normal file
@ -0,0 +1,507 @@
|
|||||||
|
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package libcontainer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/cadvisor/container"
|
||||||
|
info "github.com/google/cadvisor/info/v1"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"github.com/opencontainers/runc/libcontainer"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <unistd.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
type Handler struct {
|
||||||
|
cgroupManager cgroups.Manager
|
||||||
|
rootFs string
|
||||||
|
pid int
|
||||||
|
ignoreMetrics container.MetricSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHandler(cgroupManager cgroups.Manager, rootFs string, pid int, ignoreMetrics container.MetricSet) *Handler {
|
||||||
|
return &Handler{
|
||||||
|
cgroupManager: cgroupManager,
|
||||||
|
rootFs: rootFs,
|
||||||
|
pid: pid,
|
||||||
|
ignoreMetrics: ignoreMetrics,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get cgroup and networking stats of the specified container
|
||||||
|
func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||||
|
cgroupStats, err := h.cgroupManager.GetStats()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
libcontainerStats := &libcontainer.Stats{
|
||||||
|
CgroupStats: cgroupStats,
|
||||||
|
}
|
||||||
|
withPerCPU := !h.ignoreMetrics.Has(container.PerCpuUsageMetrics)
|
||||||
|
stats := newContainerStats(libcontainerStats, withPerCPU)
|
||||||
|
|
||||||
|
// If we know the pid then get network stats from /proc/<pid>/net/dev
|
||||||
|
if h.pid == 0 {
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
if !h.ignoreMetrics.Has(container.NetworkUsageMetrics) {
|
||||||
|
netStats, err := networkStatsFromProc(h.rootFs, h.pid)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(4).Infof("Unable to get network stats from pid %d: %v", h.pid, err)
|
||||||
|
} else {
|
||||||
|
stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !h.ignoreMetrics.Has(container.NetworkTcpUsageMetrics) {
|
||||||
|
t, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp")
|
||||||
|
if err != nil {
|
||||||
|
glog.V(4).Infof("Unable to get tcp stats from pid %d: %v", h.pid, err)
|
||||||
|
} else {
|
||||||
|
stats.Network.Tcp = t
|
||||||
|
}
|
||||||
|
|
||||||
|
t6, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp6")
|
||||||
|
if err != nil {
|
||||||
|
glog.V(4).Infof("Unable to get tcp6 stats from pid %d: %v", h.pid, err)
|
||||||
|
} else {
|
||||||
|
stats.Network.Tcp6 = t6
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !h.ignoreMetrics.Has(container.NetworkUdpUsageMetrics) {
|
||||||
|
u, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp")
|
||||||
|
if err != nil {
|
||||||
|
glog.V(4).Infof("Unable to get udp stats from pid %d: %v", h.pid, err)
|
||||||
|
} else {
|
||||||
|
stats.Network.Udp = u
|
||||||
|
}
|
||||||
|
|
||||||
|
u6, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp6")
|
||||||
|
if err != nil {
|
||||||
|
glog.V(4).Infof("Unable to get udp6 stats from pid %d: %v", h.pid, err)
|
||||||
|
} else {
|
||||||
|
stats.Network.Udp6 = u6
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For backwards compatibility.
|
||||||
|
if len(stats.Network.Interfaces) > 0 {
|
||||||
|
stats.Network.InterfaceStats = stats.Network.Interfaces[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func networkStatsFromProc(rootFs string, pid int) ([]info.InterfaceStats, error) {
|
||||||
|
netStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), "/net/dev")
|
||||||
|
|
||||||
|
ifaceStats, err := scanInterfaceStats(netStatsFile)
|
||||||
|
if err != nil {
|
||||||
|
return []info.InterfaceStats{}, fmt.Errorf("couldn't read network stats: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ifaceStats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ignoredDevicePrefixes = []string{"lo", "veth", "docker"}
|
||||||
|
)
|
||||||
|
|
||||||
|
func isIgnoredDevice(ifName string) bool {
|
||||||
|
for _, prefix := range ignoredDevicePrefixes {
|
||||||
|
if strings.HasPrefix(strings.ToLower(ifName), prefix) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func scanInterfaceStats(netStatsFile string) ([]info.InterfaceStats, error) {
|
||||||
|
file, err := os.Open(netStatsFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failure opening %s: %v", netStatsFile, err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
|
||||||
|
// Discard header lines
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
if b := scanner.Scan(); !b {
|
||||||
|
return nil, scanner.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stats := []info.InterfaceStats{}
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
line = strings.Replace(line, ":", "", -1)
|
||||||
|
|
||||||
|
fields := strings.Fields(line)
|
||||||
|
// If the format of the line is invalid then don't trust any of the stats
|
||||||
|
// in this file.
|
||||||
|
if len(fields) != 17 {
|
||||||
|
return nil, fmt.Errorf("invalid interface stats line: %v", line)
|
||||||
|
}
|
||||||
|
|
||||||
|
devName := fields[0]
|
||||||
|
if isIgnoredDevice(devName) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
i := info.InterfaceStats{
|
||||||
|
Name: devName,
|
||||||
|
}
|
||||||
|
|
||||||
|
statFields := append(fields[1:5], fields[9:13]...)
|
||||||
|
statPointers := []*uint64{
|
||||||
|
&i.RxBytes, &i.RxPackets, &i.RxErrors, &i.RxDropped,
|
||||||
|
&i.TxBytes, &i.TxPackets, &i.TxErrors, &i.TxDropped,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := setInterfaceStatValues(statFields, statPointers)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot parse interface stats (%v): %v", err, line)
|
||||||
|
}
|
||||||
|
|
||||||
|
stats = append(stats, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setInterfaceStatValues(fields []string, pointers []*uint64) error {
|
||||||
|
for i, v := range fields {
|
||||||
|
val, err := strconv.ParseUint(v, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*pointers[i] = val
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func tcpStatsFromProc(rootFs string, pid int, file string) (info.TcpStat, error) {
|
||||||
|
tcpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
|
||||||
|
|
||||||
|
tcpStats, err := scanTcpStats(tcpStatsFile)
|
||||||
|
if err != nil {
|
||||||
|
return tcpStats, fmt.Errorf("couldn't read tcp stats: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tcpStats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func scanTcpStats(tcpStatsFile string) (info.TcpStat, error) {
|
||||||
|
|
||||||
|
var stats info.TcpStat
|
||||||
|
|
||||||
|
data, err := ioutil.ReadFile(tcpStatsFile)
|
||||||
|
if err != nil {
|
||||||
|
return stats, fmt.Errorf("failure opening %s: %v", tcpStatsFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tcpStateMap := map[string]uint64{
|
||||||
|
"01": 0, //ESTABLISHED
|
||||||
|
"02": 0, //SYN_SENT
|
||||||
|
"03": 0, //SYN_RECV
|
||||||
|
"04": 0, //FIN_WAIT1
|
||||||
|
"05": 0, //FIN_WAIT2
|
||||||
|
"06": 0, //TIME_WAIT
|
||||||
|
"07": 0, //CLOSE
|
||||||
|
"08": 0, //CLOSE_WAIT
|
||||||
|
"09": 0, //LAST_ACK
|
||||||
|
"0A": 0, //LISTEN
|
||||||
|
"0B": 0, //CLOSING
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := strings.NewReader(string(data))
|
||||||
|
scanner := bufio.NewScanner(reader)
|
||||||
|
|
||||||
|
scanner.Split(bufio.ScanLines)
|
||||||
|
|
||||||
|
// Discard header line
|
||||||
|
if b := scanner.Scan(); !b {
|
||||||
|
return stats, scanner.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
|
||||||
|
state := strings.Fields(line)
|
||||||
|
// TCP state is the 4th field.
|
||||||
|
// Format: sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
|
||||||
|
tcpState := state[3]
|
||||||
|
_, ok := tcpStateMap[tcpState]
|
||||||
|
if !ok {
|
||||||
|
return stats, fmt.Errorf("invalid TCP stats line: %v", line)
|
||||||
|
}
|
||||||
|
tcpStateMap[tcpState]++
|
||||||
|
}
|
||||||
|
|
||||||
|
stats = info.TcpStat{
|
||||||
|
Established: tcpStateMap["01"],
|
||||||
|
SynSent: tcpStateMap["02"],
|
||||||
|
SynRecv: tcpStateMap["03"],
|
||||||
|
FinWait1: tcpStateMap["04"],
|
||||||
|
FinWait2: tcpStateMap["05"],
|
||||||
|
TimeWait: tcpStateMap["06"],
|
||||||
|
Close: tcpStateMap["07"],
|
||||||
|
CloseWait: tcpStateMap["08"],
|
||||||
|
LastAck: tcpStateMap["09"],
|
||||||
|
Listen: tcpStateMap["0A"],
|
||||||
|
Closing: tcpStateMap["0B"],
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func udpStatsFromProc(rootFs string, pid int, file string) (info.UdpStat, error) {
|
||||||
|
var err error
|
||||||
|
var udpStats info.UdpStat
|
||||||
|
|
||||||
|
udpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
|
||||||
|
|
||||||
|
r, err := os.Open(udpStatsFile)
|
||||||
|
if err != nil {
|
||||||
|
return udpStats, fmt.Errorf("failure opening %s: %v", udpStatsFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
udpStats, err = scanUdpStats(r)
|
||||||
|
if err != nil {
|
||||||
|
return udpStats, fmt.Errorf("couldn't read udp stats: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return udpStats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func scanUdpStats(r io.Reader) (info.UdpStat, error) {
|
||||||
|
var stats info.UdpStat
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(r)
|
||||||
|
scanner.Split(bufio.ScanLines)
|
||||||
|
|
||||||
|
// Discard header line
|
||||||
|
if b := scanner.Scan(); !b {
|
||||||
|
return stats, scanner.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
listening := uint64(0)
|
||||||
|
dropped := uint64(0)
|
||||||
|
rxQueued := uint64(0)
|
||||||
|
txQueued := uint64(0)
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
// Format: sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
|
||||||
|
|
||||||
|
listening++
|
||||||
|
|
||||||
|
fs := strings.Fields(line)
|
||||||
|
if len(fs) != 13 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
rx, tx := uint64(0), uint64(0)
|
||||||
|
fmt.Sscanf(fs[4], "%X:%X", &rx, &tx)
|
||||||
|
rxQueued += rx
|
||||||
|
txQueued += tx
|
||||||
|
|
||||||
|
d, err := strconv.Atoi(string(fs[12]))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dropped += uint64(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
stats = info.UdpStat{
|
||||||
|
Listen: listening,
|
||||||
|
Dropped: dropped,
|
||||||
|
RxQueued: rxQueued,
|
||||||
|
TxQueued: txQueued,
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) GetProcesses() ([]int, error) {
|
||||||
|
pids, err := h.cgroupManager.GetPids()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return pids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func minUint32(x, y uint32) uint32 {
|
||||||
|
if x < y {
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
return y
|
||||||
|
}
|
||||||
|
|
||||||
|
// var to allow unit tests to stub it out
|
||||||
|
var numCpusFunc = getNumberOnlineCPUs
|
||||||
|
|
||||||
|
// Convert libcontainer stats to info.ContainerStats.
|
||||||
|
func setCpuStats(s *cgroups.Stats, ret *info.ContainerStats, withPerCPU bool) {
|
||||||
|
ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode
|
||||||
|
ret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode
|
||||||
|
ret.Cpu.Usage.Total = s.CpuStats.CpuUsage.TotalUsage
|
||||||
|
ret.Cpu.CFS.Periods = s.CpuStats.ThrottlingData.Periods
|
||||||
|
ret.Cpu.CFS.ThrottledPeriods = s.CpuStats.ThrottlingData.ThrottledPeriods
|
||||||
|
ret.Cpu.CFS.ThrottledTime = s.CpuStats.ThrottlingData.ThrottledTime
|
||||||
|
|
||||||
|
if !withPerCPU {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(s.CpuStats.CpuUsage.PercpuUsage) == 0 {
|
||||||
|
// libcontainer's 'GetStats' can leave 'PercpuUsage' nil if it skipped the
|
||||||
|
// cpuacct subsystem.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
numPossible := uint32(len(s.CpuStats.CpuUsage.PercpuUsage))
|
||||||
|
// Note that as of https://patchwork.kernel.org/patch/8607101/ (kernel v4.7),
|
||||||
|
// the percpu usage information includes extra zero values for all additional
|
||||||
|
// possible CPUs. This is to allow statistic collection after CPU-hotplug.
|
||||||
|
// We intentionally ignore these extra zeroes.
|
||||||
|
numActual, err := numCpusFunc()
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("unable to determine number of actual cpus; defaulting to maximum possible number: errno %v", err)
|
||||||
|
numActual = numPossible
|
||||||
|
}
|
||||||
|
if numActual > numPossible {
|
||||||
|
// The real number of cores should never be greater than the number of
|
||||||
|
// datapoints reported in cpu usage.
|
||||||
|
glog.Errorf("PercpuUsage had %v cpus, but the actual number is %v; ignoring extra CPUs", numPossible, numActual)
|
||||||
|
}
|
||||||
|
numActual = minUint32(numPossible, numActual)
|
||||||
|
ret.Cpu.Usage.PerCpu = make([]uint64, numActual)
|
||||||
|
|
||||||
|
for i := uint32(0); i < numActual; i++ {
|
||||||
|
ret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copied from
|
||||||
|
// https://github.com/moby/moby/blob/8b1adf55c2af329a4334f21d9444d6a169000c81/daemon/stats/collector_unix.go#L73
|
||||||
|
// Apache 2.0, Copyright Docker, Inc.
|
||||||
|
func getNumberOnlineCPUs() (uint32, error) {
|
||||||
|
i, err := C.sysconf(C._SC_NPROCESSORS_ONLN)
|
||||||
|
// According to POSIX - errno is undefined after successful
|
||||||
|
// sysconf, and can be non-zero in several cases, so look for
|
||||||
|
// error in returned value not in errno.
|
||||||
|
// (https://sourceware.org/bugzilla/show_bug.cgi?id=21536)
|
||||||
|
if i == -1 {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return uint32(i), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setDiskIoStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||||
|
ret.DiskIo.IoServiceBytes = DiskStatsCopy(s.BlkioStats.IoServiceBytesRecursive)
|
||||||
|
ret.DiskIo.IoServiced = DiskStatsCopy(s.BlkioStats.IoServicedRecursive)
|
||||||
|
ret.DiskIo.IoQueued = DiskStatsCopy(s.BlkioStats.IoQueuedRecursive)
|
||||||
|
ret.DiskIo.Sectors = DiskStatsCopy(s.BlkioStats.SectorsRecursive)
|
||||||
|
ret.DiskIo.IoServiceTime = DiskStatsCopy(s.BlkioStats.IoServiceTimeRecursive)
|
||||||
|
ret.DiskIo.IoWaitTime = DiskStatsCopy(s.BlkioStats.IoWaitTimeRecursive)
|
||||||
|
ret.DiskIo.IoMerged = DiskStatsCopy(s.BlkioStats.IoMergedRecursive)
|
||||||
|
ret.DiskIo.IoTime = DiskStatsCopy(s.BlkioStats.IoTimeRecursive)
|
||||||
|
}
|
||||||
|
|
||||||
|
func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||||
|
ret.Memory.Usage = s.MemoryStats.Usage.Usage
|
||||||
|
ret.Memory.MaxUsage = s.MemoryStats.Usage.MaxUsage
|
||||||
|
ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt
|
||||||
|
ret.Memory.Cache = s.MemoryStats.Stats["cache"]
|
||||||
|
|
||||||
|
if s.MemoryStats.UseHierarchy {
|
||||||
|
ret.Memory.RSS = s.MemoryStats.Stats["total_rss"]
|
||||||
|
ret.Memory.Swap = s.MemoryStats.Stats["total_swap"]
|
||||||
|
} else {
|
||||||
|
ret.Memory.RSS = s.MemoryStats.Stats["rss"]
|
||||||
|
ret.Memory.Swap = s.MemoryStats.Stats["swap"]
|
||||||
|
}
|
||||||
|
if v, ok := s.MemoryStats.Stats["pgfault"]; ok {
|
||||||
|
ret.Memory.ContainerData.Pgfault = v
|
||||||
|
ret.Memory.HierarchicalData.Pgfault = v
|
||||||
|
}
|
||||||
|
if v, ok := s.MemoryStats.Stats["pgmajfault"]; ok {
|
||||||
|
ret.Memory.ContainerData.Pgmajfault = v
|
||||||
|
ret.Memory.HierarchicalData.Pgmajfault = v
|
||||||
|
}
|
||||||
|
|
||||||
|
workingSet := ret.Memory.Usage
|
||||||
|
if v, ok := s.MemoryStats.Stats["total_inactive_file"]; ok {
|
||||||
|
if workingSet < v {
|
||||||
|
workingSet = 0
|
||||||
|
} else {
|
||||||
|
workingSet -= v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ret.Memory.WorkingSet = workingSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func setNetworkStats(libcontainerStats *libcontainer.Stats, ret *info.ContainerStats) {
|
||||||
|
ret.Network.Interfaces = make([]info.InterfaceStats, len(libcontainerStats.Interfaces))
|
||||||
|
for i := range libcontainerStats.Interfaces {
|
||||||
|
ret.Network.Interfaces[i] = info.InterfaceStats{
|
||||||
|
Name: libcontainerStats.Interfaces[i].Name,
|
||||||
|
RxBytes: libcontainerStats.Interfaces[i].RxBytes,
|
||||||
|
RxPackets: libcontainerStats.Interfaces[i].RxPackets,
|
||||||
|
RxErrors: libcontainerStats.Interfaces[i].RxErrors,
|
||||||
|
RxDropped: libcontainerStats.Interfaces[i].RxDropped,
|
||||||
|
TxBytes: libcontainerStats.Interfaces[i].TxBytes,
|
||||||
|
TxPackets: libcontainerStats.Interfaces[i].TxPackets,
|
||||||
|
TxErrors: libcontainerStats.Interfaces[i].TxErrors,
|
||||||
|
TxDropped: libcontainerStats.Interfaces[i].TxDropped,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add to base struct for backwards compatibility.
|
||||||
|
if len(ret.Network.Interfaces) > 0 {
|
||||||
|
ret.Network.InterfaceStats = ret.Network.Interfaces[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newContainerStats(libcontainerStats *libcontainer.Stats, withPerCPU bool) *info.ContainerStats {
|
||||||
|
ret := &info.ContainerStats{
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if s := libcontainerStats.CgroupStats; s != nil {
|
||||||
|
setCpuStats(s, ret, withPerCPU)
|
||||||
|
setDiskIoStats(s, ret)
|
||||||
|
setMemoryStats(s, ret)
|
||||||
|
}
|
||||||
|
if len(libcontainerStats.Interfaces) > 0 {
|
||||||
|
setNetworkStats(libcontainerStats, ret)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
136
container/libcontainer/handler_test.go
Normal file
136
container/libcontainer/handler_test.go
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package libcontainer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
info "github.com/google/cadvisor/info/v1"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestScanInterfaceStats(t *testing.T) {
|
||||||
|
stats, err := scanInterfaceStats("testdata/procnetdev")
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var netdevstats = []info.InterfaceStats{
|
||||||
|
{
|
||||||
|
Name: "wlp4s0",
|
||||||
|
RxBytes: 1,
|
||||||
|
RxPackets: 2,
|
||||||
|
RxErrors: 3,
|
||||||
|
RxDropped: 4,
|
||||||
|
TxBytes: 9,
|
||||||
|
TxPackets: 10,
|
||||||
|
TxErrors: 11,
|
||||||
|
TxDropped: 12,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "em1",
|
||||||
|
RxBytes: 315849,
|
||||||
|
RxPackets: 1172,
|
||||||
|
RxErrors: 0,
|
||||||
|
RxDropped: 0,
|
||||||
|
TxBytes: 315850,
|
||||||
|
TxPackets: 1173,
|
||||||
|
TxErrors: 0,
|
||||||
|
TxDropped: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(stats) != len(netdevstats) {
|
||||||
|
t.Errorf("Expected 2 net stats, got %d", len(stats))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, v := range netdevstats {
|
||||||
|
if v != stats[i] {
|
||||||
|
t.Errorf("Expected %#v, got %#v", v, stats[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestScanUDPStats(t *testing.T) {
|
||||||
|
udpStatsFile := "testdata/procnetudp"
|
||||||
|
r, err := os.Open(udpStatsFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failure opening %s: %v", udpStatsFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stats, err := scanUdpStats(r)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var udpstats = info.UdpStat{
|
||||||
|
Listen: 2,
|
||||||
|
Dropped: 4,
|
||||||
|
RxQueued: 10,
|
||||||
|
TxQueued: 11,
|
||||||
|
}
|
||||||
|
|
||||||
|
if stats != udpstats {
|
||||||
|
t.Errorf("Expected %#v, got %#v", udpstats, stats)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://github.com/docker/libcontainer/blob/v2.2.1/cgroups/fs/cpuacct.go#L19
|
||||||
|
const nanosecondsInSeconds = 1000000000
|
||||||
|
|
||||||
|
var clockTicks = uint64(system.GetClockTicks())
|
||||||
|
|
||||||
|
func TestMorePossibleCPUs(t *testing.T) {
|
||||||
|
realNumCPUs := uint32(8)
|
||||||
|
numCpusFunc = func() (uint32, error) {
|
||||||
|
return realNumCPUs, nil
|
||||||
|
}
|
||||||
|
possibleCPUs := uint32(31)
|
||||||
|
|
||||||
|
perCpuUsage := make([]uint64, possibleCPUs)
|
||||||
|
for i := uint32(0); i < realNumCPUs; i++ {
|
||||||
|
perCpuUsage[i] = 8562955455524
|
||||||
|
}
|
||||||
|
|
||||||
|
s := &cgroups.Stats{
|
||||||
|
CpuStats: cgroups.CpuStats{
|
||||||
|
CpuUsage: cgroups.CpuUsage{
|
||||||
|
PercpuUsage: perCpuUsage,
|
||||||
|
TotalUsage: 33802947350272,
|
||||||
|
UsageInKernelmode: 734746 * nanosecondsInSeconds / clockTicks,
|
||||||
|
UsageInUsermode: 2767637 * nanosecondsInSeconds / clockTicks,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
var ret info.ContainerStats
|
||||||
|
setCpuStats(s, &ret, true)
|
||||||
|
|
||||||
|
expected := info.ContainerStats{
|
||||||
|
Cpu: info.CpuStats{
|
||||||
|
Usage: info.CpuUsage{
|
||||||
|
PerCpu: perCpuUsage[0:realNumCPUs],
|
||||||
|
User: s.CpuStats.CpuUsage.UsageInUsermode,
|
||||||
|
System: s.CpuStats.CpuUsage.UsageInKernelmode,
|
||||||
|
Total: 33802947350272,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ret.Eq(&expected) {
|
||||||
|
t.Fatalf("expected %+v == %+v", ret, expected)
|
||||||
|
}
|
||||||
|
}
|
@ -15,29 +15,14 @@
|
|||||||
package libcontainer
|
package libcontainer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/cadvisor/container"
|
|
||||||
info "github.com/google/cadvisor/info/v1"
|
info "github.com/google/cadvisor/info/v1"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"github.com/opencontainers/runc/libcontainer"
|
|
||||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
|
||||||
#include <unistd.h>
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
type CgroupSubsystems struct {
|
type CgroupSubsystems struct {
|
||||||
// Cgroup subsystem mounts.
|
// Cgroup subsystem mounts.
|
||||||
// e.g.: "/sys/fs/cgroup/cpu" -> ["cpu", "cpuacct"]
|
// e.g.: "/sys/fs/cgroup/cpu" -> ["cpu", "cpuacct"]
|
||||||
@ -104,309 +89,6 @@ var supportedSubsystems map[string]struct{} = map[string]struct{}{
|
|||||||
"devices": {},
|
"devices": {},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get cgroup and networking stats of the specified container
|
|
||||||
func GetStats(cgroupManager cgroups.Manager, rootFs string, pid int, ignoreMetrics container.MetricSet) (*info.ContainerStats, error) {
|
|
||||||
cgroupStats, err := cgroupManager.GetStats()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
libcontainerStats := &libcontainer.Stats{
|
|
||||||
CgroupStats: cgroupStats,
|
|
||||||
}
|
|
||||||
withPerCPU := !ignoreMetrics.Has(container.PerCpuUsageMetrics)
|
|
||||||
stats := newContainerStats(libcontainerStats, withPerCPU)
|
|
||||||
|
|
||||||
// If we know the pid then get network stats from /proc/<pid>/net/dev
|
|
||||||
if pid == 0 {
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
||||||
if !ignoreMetrics.Has(container.NetworkUsageMetrics) {
|
|
||||||
netStats, err := networkStatsFromProc(rootFs, pid)
|
|
||||||
if err != nil {
|
|
||||||
glog.V(4).Infof("Unable to get network stats from pid %d: %v", pid, err)
|
|
||||||
} else {
|
|
||||||
stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !ignoreMetrics.Has(container.NetworkTcpUsageMetrics) {
|
|
||||||
t, err := tcpStatsFromProc(rootFs, pid, "net/tcp")
|
|
||||||
if err != nil {
|
|
||||||
glog.V(4).Infof("Unable to get tcp stats from pid %d: %v", pid, err)
|
|
||||||
} else {
|
|
||||||
stats.Network.Tcp = t
|
|
||||||
}
|
|
||||||
|
|
||||||
t6, err := tcpStatsFromProc(rootFs, pid, "net/tcp6")
|
|
||||||
if err != nil {
|
|
||||||
glog.V(4).Infof("Unable to get tcp6 stats from pid %d: %v", pid, err)
|
|
||||||
} else {
|
|
||||||
stats.Network.Tcp6 = t6
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !ignoreMetrics.Has(container.NetworkUdpUsageMetrics) {
|
|
||||||
u, err := udpStatsFromProc(rootFs, pid, "net/udp")
|
|
||||||
if err != nil {
|
|
||||||
glog.V(4).Infof("Unable to get udp stats from pid %d: %v", pid, err)
|
|
||||||
} else {
|
|
||||||
stats.Network.Udp = u
|
|
||||||
}
|
|
||||||
|
|
||||||
u6, err := udpStatsFromProc(rootFs, pid, "net/udp6")
|
|
||||||
if err != nil {
|
|
||||||
glog.V(4).Infof("Unable to get udp6 stats from pid %d: %v", pid, err)
|
|
||||||
} else {
|
|
||||||
stats.Network.Udp6 = u6
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// For backwards compatibility.
|
|
||||||
if len(stats.Network.Interfaces) > 0 {
|
|
||||||
stats.Network.InterfaceStats = stats.Network.Interfaces[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func networkStatsFromProc(rootFs string, pid int) ([]info.InterfaceStats, error) {
|
|
||||||
netStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), "/net/dev")
|
|
||||||
|
|
||||||
ifaceStats, err := scanInterfaceStats(netStatsFile)
|
|
||||||
if err != nil {
|
|
||||||
return []info.InterfaceStats{}, fmt.Errorf("couldn't read network stats: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ifaceStats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
ignoredDevicePrefixes = []string{"lo", "veth", "docker"}
|
|
||||||
)
|
|
||||||
|
|
||||||
func isIgnoredDevice(ifName string) bool {
|
|
||||||
for _, prefix := range ignoredDevicePrefixes {
|
|
||||||
if strings.HasPrefix(strings.ToLower(ifName), prefix) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func scanInterfaceStats(netStatsFile string) ([]info.InterfaceStats, error) {
|
|
||||||
file, err := os.Open(netStatsFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failure opening %s: %v", netStatsFile, err)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(file)
|
|
||||||
|
|
||||||
// Discard header lines
|
|
||||||
for i := 0; i < 2; i++ {
|
|
||||||
if b := scanner.Scan(); !b {
|
|
||||||
return nil, scanner.Err()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stats := []info.InterfaceStats{}
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := scanner.Text()
|
|
||||||
line = strings.Replace(line, ":", "", -1)
|
|
||||||
|
|
||||||
fields := strings.Fields(line)
|
|
||||||
// If the format of the line is invalid then don't trust any of the stats
|
|
||||||
// in this file.
|
|
||||||
if len(fields) != 17 {
|
|
||||||
return nil, fmt.Errorf("invalid interface stats line: %v", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
devName := fields[0]
|
|
||||||
if isIgnoredDevice(devName) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
i := info.InterfaceStats{
|
|
||||||
Name: devName,
|
|
||||||
}
|
|
||||||
|
|
||||||
statFields := append(fields[1:5], fields[9:13]...)
|
|
||||||
statPointers := []*uint64{
|
|
||||||
&i.RxBytes, &i.RxPackets, &i.RxErrors, &i.RxDropped,
|
|
||||||
&i.TxBytes, &i.TxPackets, &i.TxErrors, &i.TxDropped,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := setInterfaceStatValues(statFields, statPointers)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("cannot parse interface stats (%v): %v", err, line)
|
|
||||||
}
|
|
||||||
|
|
||||||
stats = append(stats, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setInterfaceStatValues(fields []string, pointers []*uint64) error {
|
|
||||||
for i, v := range fields {
|
|
||||||
val, err := strconv.ParseUint(v, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*pointers[i] = val
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func tcpStatsFromProc(rootFs string, pid int, file string) (info.TcpStat, error) {
|
|
||||||
tcpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
|
|
||||||
|
|
||||||
tcpStats, err := scanTcpStats(tcpStatsFile)
|
|
||||||
if err != nil {
|
|
||||||
return tcpStats, fmt.Errorf("couldn't read tcp stats: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return tcpStats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func scanTcpStats(tcpStatsFile string) (info.TcpStat, error) {
|
|
||||||
|
|
||||||
var stats info.TcpStat
|
|
||||||
|
|
||||||
data, err := ioutil.ReadFile(tcpStatsFile)
|
|
||||||
if err != nil {
|
|
||||||
return stats, fmt.Errorf("failure opening %s: %v", tcpStatsFile, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tcpStateMap := map[string]uint64{
|
|
||||||
"01": 0, //ESTABLISHED
|
|
||||||
"02": 0, //SYN_SENT
|
|
||||||
"03": 0, //SYN_RECV
|
|
||||||
"04": 0, //FIN_WAIT1
|
|
||||||
"05": 0, //FIN_WAIT2
|
|
||||||
"06": 0, //TIME_WAIT
|
|
||||||
"07": 0, //CLOSE
|
|
||||||
"08": 0, //CLOSE_WAIT
|
|
||||||
"09": 0, //LAST_ACK
|
|
||||||
"0A": 0, //LISTEN
|
|
||||||
"0B": 0, //CLOSING
|
|
||||||
}
|
|
||||||
|
|
||||||
reader := strings.NewReader(string(data))
|
|
||||||
scanner := bufio.NewScanner(reader)
|
|
||||||
|
|
||||||
scanner.Split(bufio.ScanLines)
|
|
||||||
|
|
||||||
// Discard header line
|
|
||||||
if b := scanner.Scan(); !b {
|
|
||||||
return stats, scanner.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := scanner.Text()
|
|
||||||
|
|
||||||
state := strings.Fields(line)
|
|
||||||
// TCP state is the 4th field.
|
|
||||||
// Format: sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
|
|
||||||
tcpState := state[3]
|
|
||||||
_, ok := tcpStateMap[tcpState]
|
|
||||||
if !ok {
|
|
||||||
return stats, fmt.Errorf("invalid TCP stats line: %v", line)
|
|
||||||
}
|
|
||||||
tcpStateMap[tcpState]++
|
|
||||||
}
|
|
||||||
|
|
||||||
stats = info.TcpStat{
|
|
||||||
Established: tcpStateMap["01"],
|
|
||||||
SynSent: tcpStateMap["02"],
|
|
||||||
SynRecv: tcpStateMap["03"],
|
|
||||||
FinWait1: tcpStateMap["04"],
|
|
||||||
FinWait2: tcpStateMap["05"],
|
|
||||||
TimeWait: tcpStateMap["06"],
|
|
||||||
Close: tcpStateMap["07"],
|
|
||||||
CloseWait: tcpStateMap["08"],
|
|
||||||
LastAck: tcpStateMap["09"],
|
|
||||||
Listen: tcpStateMap["0A"],
|
|
||||||
Closing: tcpStateMap["0B"],
|
|
||||||
}
|
|
||||||
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func udpStatsFromProc(rootFs string, pid int, file string) (info.UdpStat, error) {
|
|
||||||
var err error
|
|
||||||
var udpStats info.UdpStat
|
|
||||||
|
|
||||||
udpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
|
|
||||||
|
|
||||||
r, err := os.Open(udpStatsFile)
|
|
||||||
if err != nil {
|
|
||||||
return udpStats, fmt.Errorf("failure opening %s: %v", udpStatsFile, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
udpStats, err = scanUdpStats(r)
|
|
||||||
if err != nil {
|
|
||||||
return udpStats, fmt.Errorf("couldn't read udp stats: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return udpStats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func scanUdpStats(r io.Reader) (info.UdpStat, error) {
|
|
||||||
var stats info.UdpStat
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(r)
|
|
||||||
scanner.Split(bufio.ScanLines)
|
|
||||||
|
|
||||||
// Discard header line
|
|
||||||
if b := scanner.Scan(); !b {
|
|
||||||
return stats, scanner.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
listening := uint64(0)
|
|
||||||
dropped := uint64(0)
|
|
||||||
rxQueued := uint64(0)
|
|
||||||
txQueued := uint64(0)
|
|
||||||
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := scanner.Text()
|
|
||||||
// Format: sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
|
|
||||||
|
|
||||||
listening++
|
|
||||||
|
|
||||||
fs := strings.Fields(line)
|
|
||||||
if len(fs) != 13 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
rx, tx := uint64(0), uint64(0)
|
|
||||||
fmt.Sscanf(fs[4], "%X:%X", &rx, &tx)
|
|
||||||
rxQueued += rx
|
|
||||||
txQueued += tx
|
|
||||||
|
|
||||||
d, err := strconv.Atoi(string(fs[12]))
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dropped += uint64(d)
|
|
||||||
}
|
|
||||||
|
|
||||||
stats = info.UdpStat{
|
|
||||||
Listen: listening,
|
|
||||||
Dropped: dropped,
|
|
||||||
RxQueued: rxQueued,
|
|
||||||
TxQueued: txQueued,
|
|
||||||
}
|
|
||||||
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetProcesses(cgroupManager cgroups.Manager) ([]int, error) {
|
|
||||||
pids, err := cgroupManager.GetPids()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return pids, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func DiskStatsCopy0(major, minor uint64) *info.PerDiskStats {
|
func DiskStatsCopy0(major, minor uint64) *info.PerDiskStats {
|
||||||
disk := info.PerDiskStats{
|
disk := info.PerDiskStats{
|
||||||
Major: major,
|
Major: major,
|
||||||
@ -456,152 +138,3 @@ func DiskStatsCopy(blkio_stats []cgroups.BlkioStatEntry) (stat []info.PerDiskSta
|
|||||||
}
|
}
|
||||||
return DiskStatsCopy1(disk_stat)
|
return DiskStatsCopy1(disk_stat)
|
||||||
}
|
}
|
||||||
|
|
||||||
func minUint32(x, y uint32) uint32 {
|
|
||||||
if x < y {
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
return y
|
|
||||||
}
|
|
||||||
|
|
||||||
// var to allow unit tests to stub it out
|
|
||||||
var numCpusFunc = getNumberOnlineCPUs
|
|
||||||
|
|
||||||
// Convert libcontainer stats to info.ContainerStats.
|
|
||||||
func setCpuStats(s *cgroups.Stats, ret *info.ContainerStats, withPerCPU bool) {
|
|
||||||
ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode
|
|
||||||
ret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode
|
|
||||||
ret.Cpu.Usage.Total = s.CpuStats.CpuUsage.TotalUsage
|
|
||||||
ret.Cpu.CFS.Periods = s.CpuStats.ThrottlingData.Periods
|
|
||||||
ret.Cpu.CFS.ThrottledPeriods = s.CpuStats.ThrottlingData.ThrottledPeriods
|
|
||||||
ret.Cpu.CFS.ThrottledTime = s.CpuStats.ThrottlingData.ThrottledTime
|
|
||||||
|
|
||||||
if !withPerCPU {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(s.CpuStats.CpuUsage.PercpuUsage) == 0 {
|
|
||||||
// libcontainer's 'GetStats' can leave 'PercpuUsage' nil if it skipped the
|
|
||||||
// cpuacct subsystem.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
numPossible := uint32(len(s.CpuStats.CpuUsage.PercpuUsage))
|
|
||||||
// Note that as of https://patchwork.kernel.org/patch/8607101/ (kernel v4.7),
|
|
||||||
// the percpu usage information includes extra zero values for all additional
|
|
||||||
// possible CPUs. This is to allow statistic collection after CPU-hotplug.
|
|
||||||
// We intentionally ignore these extra zeroes.
|
|
||||||
numActual, err := numCpusFunc()
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("unable to determine number of actual cpus; defaulting to maximum possible number: errno %v", err)
|
|
||||||
numActual = numPossible
|
|
||||||
}
|
|
||||||
if numActual > numPossible {
|
|
||||||
// The real number of cores should never be greater than the number of
|
|
||||||
// datapoints reported in cpu usage.
|
|
||||||
glog.Errorf("PercpuUsage had %v cpus, but the actual number is %v; ignoring extra CPUs", numPossible, numActual)
|
|
||||||
}
|
|
||||||
numActual = minUint32(numPossible, numActual)
|
|
||||||
ret.Cpu.Usage.PerCpu = make([]uint64, numActual)
|
|
||||||
|
|
||||||
for i := uint32(0); i < numActual; i++ {
|
|
||||||
ret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copied from
|
|
||||||
// https://github.com/moby/moby/blob/8b1adf55c2af329a4334f21d9444d6a169000c81/daemon/stats/collector_unix.go#L73
|
|
||||||
// Apache 2.0, Copyright Docker, Inc.
|
|
||||||
func getNumberOnlineCPUs() (uint32, error) {
|
|
||||||
i, err := C.sysconf(C._SC_NPROCESSORS_ONLN)
|
|
||||||
// According to POSIX - errno is undefined after successful
|
|
||||||
// sysconf, and can be non-zero in several cases, so look for
|
|
||||||
// error in returned value not in errno.
|
|
||||||
// (https://sourceware.org/bugzilla/show_bug.cgi?id=21536)
|
|
||||||
if i == -1 {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return uint32(i), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setDiskIoStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
|
||||||
ret.DiskIo.IoServiceBytes = DiskStatsCopy(s.BlkioStats.IoServiceBytesRecursive)
|
|
||||||
ret.DiskIo.IoServiced = DiskStatsCopy(s.BlkioStats.IoServicedRecursive)
|
|
||||||
ret.DiskIo.IoQueued = DiskStatsCopy(s.BlkioStats.IoQueuedRecursive)
|
|
||||||
ret.DiskIo.Sectors = DiskStatsCopy(s.BlkioStats.SectorsRecursive)
|
|
||||||
ret.DiskIo.IoServiceTime = DiskStatsCopy(s.BlkioStats.IoServiceTimeRecursive)
|
|
||||||
ret.DiskIo.IoWaitTime = DiskStatsCopy(s.BlkioStats.IoWaitTimeRecursive)
|
|
||||||
ret.DiskIo.IoMerged = DiskStatsCopy(s.BlkioStats.IoMergedRecursive)
|
|
||||||
ret.DiskIo.IoTime = DiskStatsCopy(s.BlkioStats.IoTimeRecursive)
|
|
||||||
}
|
|
||||||
|
|
||||||
func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
|
||||||
ret.Memory.Usage = s.MemoryStats.Usage.Usage
|
|
||||||
ret.Memory.MaxUsage = s.MemoryStats.Usage.MaxUsage
|
|
||||||
ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt
|
|
||||||
ret.Memory.Cache = s.MemoryStats.Stats["cache"]
|
|
||||||
|
|
||||||
if s.MemoryStats.UseHierarchy {
|
|
||||||
ret.Memory.RSS = s.MemoryStats.Stats["total_rss"]
|
|
||||||
ret.Memory.Swap = s.MemoryStats.Stats["total_swap"]
|
|
||||||
} else {
|
|
||||||
ret.Memory.RSS = s.MemoryStats.Stats["rss"]
|
|
||||||
ret.Memory.Swap = s.MemoryStats.Stats["swap"]
|
|
||||||
}
|
|
||||||
if v, ok := s.MemoryStats.Stats["pgfault"]; ok {
|
|
||||||
ret.Memory.ContainerData.Pgfault = v
|
|
||||||
ret.Memory.HierarchicalData.Pgfault = v
|
|
||||||
}
|
|
||||||
if v, ok := s.MemoryStats.Stats["pgmajfault"]; ok {
|
|
||||||
ret.Memory.ContainerData.Pgmajfault = v
|
|
||||||
ret.Memory.HierarchicalData.Pgmajfault = v
|
|
||||||
}
|
|
||||||
|
|
||||||
workingSet := ret.Memory.Usage
|
|
||||||
if v, ok := s.MemoryStats.Stats["total_inactive_file"]; ok {
|
|
||||||
if workingSet < v {
|
|
||||||
workingSet = 0
|
|
||||||
} else {
|
|
||||||
workingSet -= v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ret.Memory.WorkingSet = workingSet
|
|
||||||
}
|
|
||||||
|
|
||||||
func setNetworkStats(libcontainerStats *libcontainer.Stats, ret *info.ContainerStats) {
|
|
||||||
ret.Network.Interfaces = make([]info.InterfaceStats, len(libcontainerStats.Interfaces))
|
|
||||||
for i := range libcontainerStats.Interfaces {
|
|
||||||
ret.Network.Interfaces[i] = info.InterfaceStats{
|
|
||||||
Name: libcontainerStats.Interfaces[i].Name,
|
|
||||||
RxBytes: libcontainerStats.Interfaces[i].RxBytes,
|
|
||||||
RxPackets: libcontainerStats.Interfaces[i].RxPackets,
|
|
||||||
RxErrors: libcontainerStats.Interfaces[i].RxErrors,
|
|
||||||
RxDropped: libcontainerStats.Interfaces[i].RxDropped,
|
|
||||||
TxBytes: libcontainerStats.Interfaces[i].TxBytes,
|
|
||||||
TxPackets: libcontainerStats.Interfaces[i].TxPackets,
|
|
||||||
TxErrors: libcontainerStats.Interfaces[i].TxErrors,
|
|
||||||
TxDropped: libcontainerStats.Interfaces[i].TxDropped,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add to base struct for backwards compatibility.
|
|
||||||
if len(ret.Network.Interfaces) > 0 {
|
|
||||||
ret.Network.InterfaceStats = ret.Network.Interfaces[0]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newContainerStats(libcontainerStats *libcontainer.Stats, withPerCPU bool) *info.ContainerStats {
|
|
||||||
ret := &info.ContainerStats{
|
|
||||||
Timestamp: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
if s := libcontainerStats.CgroupStats; s != nil {
|
|
||||||
setCpuStats(s, ret, withPerCPU)
|
|
||||||
setDiskIoStats(s, ret)
|
|
||||||
setMemoryStats(s, ret)
|
|
||||||
}
|
|
||||||
if len(libcontainerStats.Interfaces) > 0 {
|
|
||||||
setNetworkStats(libcontainerStats, ret)
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
@ -16,130 +16,15 @@ package libcontainer
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
info "github.com/google/cadvisor/info/v1"
|
|
||||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||||
"github.com/opencontainers/runc/libcontainer/system"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestScanInterfaceStats(t *testing.T) {
|
|
||||||
stats, err := scanInterfaceStats("testdata/procnetdev")
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var netdevstats = []info.InterfaceStats{
|
|
||||||
{
|
|
||||||
Name: "wlp4s0",
|
|
||||||
RxBytes: 1,
|
|
||||||
RxPackets: 2,
|
|
||||||
RxErrors: 3,
|
|
||||||
RxDropped: 4,
|
|
||||||
TxBytes: 9,
|
|
||||||
TxPackets: 10,
|
|
||||||
TxErrors: 11,
|
|
||||||
TxDropped: 12,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "em1",
|
|
||||||
RxBytes: 315849,
|
|
||||||
RxPackets: 1172,
|
|
||||||
RxErrors: 0,
|
|
||||||
RxDropped: 0,
|
|
||||||
TxBytes: 315850,
|
|
||||||
TxPackets: 1173,
|
|
||||||
TxErrors: 0,
|
|
||||||
TxDropped: 0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(stats) != len(netdevstats) {
|
|
||||||
t.Errorf("Expected 2 net stats, got %d", len(stats))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, v := range netdevstats {
|
|
||||||
if v != stats[i] {
|
|
||||||
t.Errorf("Expected %#v, got %#v", v, stats[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestScanUDPStats(t *testing.T) {
|
|
||||||
udpStatsFile := "testdata/procnetudp"
|
|
||||||
r, err := os.Open(udpStatsFile)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failure opening %s: %v", udpStatsFile, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
stats, err := scanUdpStats(r)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var udpstats = info.UdpStat{
|
|
||||||
Listen: 2,
|
|
||||||
Dropped: 4,
|
|
||||||
RxQueued: 10,
|
|
||||||
TxQueued: 11,
|
|
||||||
}
|
|
||||||
|
|
||||||
if stats != udpstats {
|
|
||||||
t.Errorf("Expected %#v, got %#v", udpstats, stats)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://github.com/docker/libcontainer/blob/v2.2.1/cgroups/fs/cpuacct.go#L19
|
|
||||||
const nanosecondsInSeconds = 1000000000
|
|
||||||
|
|
||||||
var clockTicks = uint64(system.GetClockTicks())
|
|
||||||
|
|
||||||
func TestMorePossibleCPUs(t *testing.T) {
|
|
||||||
realNumCPUs := uint32(8)
|
|
||||||
numCpusFunc = func() (uint32, error) {
|
|
||||||
return realNumCPUs, nil
|
|
||||||
}
|
|
||||||
possibleCPUs := uint32(31)
|
|
||||||
|
|
||||||
perCpuUsage := make([]uint64, possibleCPUs)
|
|
||||||
for i := uint32(0); i < realNumCPUs; i++ {
|
|
||||||
perCpuUsage[i] = 8562955455524
|
|
||||||
}
|
|
||||||
|
|
||||||
s := &cgroups.Stats{
|
|
||||||
CpuStats: cgroups.CpuStats{
|
|
||||||
CpuUsage: cgroups.CpuUsage{
|
|
||||||
PercpuUsage: perCpuUsage,
|
|
||||||
TotalUsage: 33802947350272,
|
|
||||||
UsageInKernelmode: 734746 * nanosecondsInSeconds / clockTicks,
|
|
||||||
UsageInUsermode: 2767637 * nanosecondsInSeconds / clockTicks,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
var ret info.ContainerStats
|
|
||||||
setCpuStats(s, &ret, true)
|
|
||||||
|
|
||||||
expected := info.ContainerStats{
|
|
||||||
Cpu: info.CpuStats{
|
|
||||||
Usage: info.CpuUsage{
|
|
||||||
PerCpu: perCpuUsage[0:realNumCPUs],
|
|
||||||
User: s.CpuStats.CpuUsage.UsageInUsermode,
|
|
||||||
System: s.CpuStats.CpuUsage.UsageInKernelmode,
|
|
||||||
Total: 33802947350272,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ret.Eq(&expected) {
|
|
||||||
t.Fatalf("expected %+v == %+v", ret, expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var defaultCgroupSubsystems = []string{
|
var defaultCgroupSubsystems = []string{
|
||||||
"systemd", "freezer", "memory", "blkio", "hugetlb", "net_cls,net_prio", "pids", "cpu,cpuacct", "devices", "cpuset", "perf_events",
|
"systemd", "freezer", "memory", "blkio", "hugetlb", "net_cls,net_prio", "pids", "cpu,cpuacct", "devices", "cpuset", "perf_events",
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
"github.com/google/cadvisor/machine"
|
"github.com/google/cadvisor/machine"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
|
||||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||||
"github.com/opencontainers/runc/libcontainer/configs"
|
"github.com/opencontainers/runc/libcontainer/configs"
|
||||||
)
|
)
|
||||||
@ -34,25 +33,16 @@ import (
|
|||||||
type rawContainerHandler struct {
|
type rawContainerHandler struct {
|
||||||
// Name of the container for this handler.
|
// Name of the container for this handler.
|
||||||
name string
|
name string
|
||||||
cgroupSubsystems *libcontainer.CgroupSubsystems
|
|
||||||
machineInfoFactory info.MachineInfoFactory
|
machineInfoFactory info.MachineInfoFactory
|
||||||
|
|
||||||
// Absolute path to the cgroup hierarchies of this container.
|
// Absolute path to the cgroup hierarchies of this container.
|
||||||
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
||||||
cgroupPaths map[string]string
|
cgroupPaths map[string]string
|
||||||
|
|
||||||
// Manager of this container's cgroups.
|
|
||||||
cgroupManager cgroups.Manager
|
|
||||||
|
|
||||||
fsInfo fs.FsInfo
|
fsInfo fs.FsInfo
|
||||||
externalMounts []common.Mount
|
externalMounts []common.Mount
|
||||||
|
|
||||||
rootFs string
|
libcontainerHandler *libcontainer.Handler
|
||||||
|
|
||||||
// Metrics to be ignored.
|
|
||||||
ignoreMetrics container.MetricSet
|
|
||||||
|
|
||||||
pid int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func isRootCgroup(name string) bool {
|
func isRootCgroup(name string) bool {
|
||||||
@ -88,17 +78,15 @@ func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSu
|
|||||||
pid = 1
|
pid = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
handler := libcontainer.NewHandler(cgroupManager, rootFs, pid, ignoreMetrics)
|
||||||
|
|
||||||
return &rawContainerHandler{
|
return &rawContainerHandler{
|
||||||
name: name,
|
name: name,
|
||||||
cgroupSubsystems: cgroupSubsystems,
|
|
||||||
machineInfoFactory: machineInfoFactory,
|
machineInfoFactory: machineInfoFactory,
|
||||||
cgroupPaths: cgroupPaths,
|
cgroupPaths: cgroupPaths,
|
||||||
cgroupManager: cgroupManager,
|
|
||||||
fsInfo: fsInfo,
|
fsInfo: fsInfo,
|
||||||
externalMounts: externalMounts,
|
externalMounts: externalMounts,
|
||||||
rootFs: rootFs,
|
libcontainerHandler: handler,
|
||||||
ignoreMetrics: ignoreMetrics,
|
|
||||||
pid: pid,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -231,7 +219,7 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (self *rawContainerHandler) GetStats() (*info.ContainerStats, error) {
|
func (self *rawContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||||
stats, err := libcontainer.GetStats(self.cgroupManager, self.rootFs, self.pid, self.ignoreMetrics)
|
stats, err := self.libcontainerHandler.GetStats()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return stats, err
|
return stats, err
|
||||||
}
|
}
|
||||||
@ -267,7 +255,7 @@ func (self *rawContainerHandler) ListContainers(listType container.ListType) ([]
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (self *rawContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
func (self *rawContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||||
return libcontainer.GetProcesses(self.cgroupManager)
|
return self.libcontainerHandler.GetProcesses()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *rawContainerHandler) Exists() bool {
|
func (self *rawContainerHandler) Exists() bool {
|
||||||
|
@ -28,48 +28,35 @@ import (
|
|||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
|
||||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||||
"github.com/opencontainers/runc/libcontainer/configs"
|
"github.com/opencontainers/runc/libcontainer/configs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type rktContainerHandler struct {
|
type rktContainerHandler struct {
|
||||||
rktClient rktapi.PublicAPIClient
|
|
||||||
// Name of the container for this handler.
|
|
||||||
name string
|
|
||||||
cgroupSubsystems *libcontainer.CgroupSubsystems
|
|
||||||
machineInfoFactory info.MachineInfoFactory
|
machineInfoFactory info.MachineInfoFactory
|
||||||
|
|
||||||
// Absolute path to the cgroup hierarchies of this container.
|
// Absolute path to the cgroup hierarchies of this container.
|
||||||
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
||||||
cgroupPaths map[string]string
|
cgroupPaths map[string]string
|
||||||
|
|
||||||
// Manager of this container's cgroups.
|
|
||||||
cgroupManager cgroups.Manager
|
|
||||||
|
|
||||||
// Whether this container has network isolation enabled.
|
|
||||||
hasNetwork bool
|
|
||||||
|
|
||||||
fsInfo fs.FsInfo
|
fsInfo fs.FsInfo
|
||||||
|
|
||||||
rootFs string
|
|
||||||
|
|
||||||
isPod bool
|
isPod bool
|
||||||
|
|
||||||
aliases []string
|
|
||||||
|
|
||||||
pid int
|
|
||||||
|
|
||||||
rootfsStorageDir string
|
rootfsStorageDir string
|
||||||
|
|
||||||
labels map[string]string
|
|
||||||
|
|
||||||
// Filesystem handler.
|
// Filesystem handler.
|
||||||
fsHandler common.FsHandler
|
fsHandler common.FsHandler
|
||||||
|
|
||||||
ignoreMetrics container.MetricSet
|
ignoreMetrics container.MetricSet
|
||||||
|
|
||||||
apiPod *rktapi.Pod
|
apiPod *rktapi.Pod
|
||||||
|
|
||||||
|
labels map[string]string
|
||||||
|
|
||||||
|
reference info.ContainerReference
|
||||||
|
|
||||||
|
libcontainerHandler *libcontainer.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPath string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, rootFs string, ignoreMetrics container.MetricSet) (container.ContainerHandler, error) {
|
func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPath string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, rootFs string, ignoreMetrics container.MetricSet) (container.ContainerHandler, error) {
|
||||||
@ -122,30 +109,27 @@ func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPa
|
|||||||
Paths: cgroupPaths,
|
Paths: cgroupPaths,
|
||||||
}
|
}
|
||||||
|
|
||||||
hasNetwork := false
|
libcontainerHandler := libcontainer.NewHandler(cgroupManager, rootFs, pid, ignoreMetrics)
|
||||||
if isPod {
|
|
||||||
hasNetwork = true
|
|
||||||
}
|
|
||||||
|
|
||||||
rootfsStorageDir := getRootFs(rktPath, parsed)
|
rootfsStorageDir := getRootFs(rktPath, parsed)
|
||||||
|
|
||||||
|
containerReference := info.ContainerReference{
|
||||||
|
Name: name,
|
||||||
|
Aliases: aliases,
|
||||||
|
Namespace: RktNamespace,
|
||||||
|
}
|
||||||
|
|
||||||
handler := &rktContainerHandler{
|
handler := &rktContainerHandler{
|
||||||
name: name,
|
|
||||||
rktClient: rktClient,
|
|
||||||
cgroupSubsystems: cgroupSubsystems,
|
|
||||||
machineInfoFactory: machineInfoFactory,
|
machineInfoFactory: machineInfoFactory,
|
||||||
cgroupPaths: cgroupPaths,
|
cgroupPaths: cgroupPaths,
|
||||||
cgroupManager: cgroupManager,
|
|
||||||
fsInfo: fsInfo,
|
fsInfo: fsInfo,
|
||||||
hasNetwork: hasNetwork,
|
|
||||||
rootFs: rootFs,
|
|
||||||
isPod: isPod,
|
isPod: isPod,
|
||||||
aliases: aliases,
|
|
||||||
pid: pid,
|
|
||||||
labels: labels,
|
|
||||||
rootfsStorageDir: rootfsStorageDir,
|
rootfsStorageDir: rootfsStorageDir,
|
||||||
ignoreMetrics: ignoreMetrics,
|
ignoreMetrics: ignoreMetrics,
|
||||||
apiPod: apiPod,
|
apiPod: apiPod,
|
||||||
|
labels: labels,
|
||||||
|
reference: containerReference,
|
||||||
|
libcontainerHandler: libcontainerHandler,
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ignoreMetrics.Has(container.DiskUsageMetrics) {
|
if !ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||||
@ -174,11 +158,7 @@ func createLabels(annotations []*rktapi.KeyValue) map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (handler *rktContainerHandler) ContainerReference() (info.ContainerReference, error) {
|
func (handler *rktContainerHandler) ContainerReference() (info.ContainerReference, error) {
|
||||||
return info.ContainerReference{
|
return handler.reference, nil
|
||||||
Name: handler.name,
|
|
||||||
Aliases: handler.aliases,
|
|
||||||
Namespace: RktNamespace,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (handler *rktContainerHandler) Start() {
|
func (handler *rktContainerHandler) Start() {
|
||||||
@ -190,7 +170,7 @@ func (handler *rktContainerHandler) Cleanup() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (handler *rktContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
func (handler *rktContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||||
hasNetwork := handler.hasNetwork && !handler.ignoreMetrics.Has(container.NetworkUsageMetrics)
|
hasNetwork := handler.isPod && !handler.ignoreMetrics.Has(container.NetworkUsageMetrics)
|
||||||
hasFilesystem := !handler.ignoreMetrics.Has(container.DiskUsageMetrics)
|
hasFilesystem := !handler.ignoreMetrics.Has(container.DiskUsageMetrics)
|
||||||
|
|
||||||
spec, err := common.GetSpec(handler.cgroupPaths, handler.machineInfoFactory, hasNetwork, hasFilesystem)
|
spec, err := common.GetSpec(handler.cgroupPaths, handler.machineInfoFactory, hasNetwork, hasFilesystem)
|
||||||
@ -242,7 +222,7 @@ func (handler *rktContainerHandler) getFsStats(stats *info.ContainerStats) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (handler *rktContainerHandler) GetStats() (*info.ContainerStats, error) {
|
func (handler *rktContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||||
stats, err := libcontainer.GetStats(handler.cgroupManager, handler.rootFs, handler.pid, handler.ignoreMetrics)
|
stats, err := handler.libcontainerHandler.GetStats()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return stats, err
|
return stats, err
|
||||||
}
|
}
|
||||||
@ -274,7 +254,7 @@ func (self *rktContainerHandler) GetContainerIPAddress() string {
|
|||||||
func (handler *rktContainerHandler) GetCgroupPath(resource string) (string, error) {
|
func (handler *rktContainerHandler) GetCgroupPath(resource string) (string, error) {
|
||||||
path, ok := handler.cgroupPaths[resource]
|
path, ok := handler.cgroupPaths[resource]
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, handler.name)
|
return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, handler.reference.Name)
|
||||||
}
|
}
|
||||||
return path, nil
|
return path, nil
|
||||||
}
|
}
|
||||||
@ -284,11 +264,11 @@ func (handler *rktContainerHandler) GetContainerLabels() map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (handler *rktContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
|
func (handler *rktContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
|
||||||
return common.ListContainers(handler.name, handler.cgroupPaths, listType)
|
return common.ListContainers(handler.reference.Name, handler.cgroupPaths, listType)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (handler *rktContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
func (handler *rktContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||||
return libcontainer.GetProcesses(handler.cgroupManager)
|
return handler.libcontainerHandler.GetProcesses()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (handler *rktContainerHandler) Exists() bool {
|
func (handler *rktContainerHandler) Exists() bool {
|
||||||
|
Loading…
Reference in New Issue
Block a user