Migrating cAdvisor to new libcontainer.
Backwards compatability is maintained with older versions of libcontainer.
This commit is contained in:
parent
e97e203d76
commit
64c0d3d8c3
@ -41,6 +41,13 @@ var DockerNamespace = "docker"
|
||||
|
||||
// Basepath to all container specific information that libcontainer stores.
|
||||
var dockerRootDir = flag.String("docker_root", "/var/lib/docker", "Absolute path to the Docker state root directory (default: /var/lib/docker)")
|
||||
var dockerRunDir = flag.String("docker_run", "/var/run/docker", "Absolute path to the Docker run directory (default: /var/run/docker)")
|
||||
|
||||
// TODO(vmarmol): Export run dir too for newer Dockers.
|
||||
// Directory holding Docker container state information.
|
||||
func DockerStateDir() string {
|
||||
return libcontainer.DockerStateDir(*dockerRootDir)
|
||||
}
|
||||
|
||||
// Whether the system is using Systemd.
|
||||
var useSystemd bool
|
||||
@ -97,7 +104,6 @@ func (self *dockerFactory) NewContainerHandler(name string) (handler container.C
|
||||
name,
|
||||
self.machineInfoFactory,
|
||||
self.fsInfo,
|
||||
*dockerRootDir,
|
||||
self.usesAufsDriver,
|
||||
&self.cgroupSubsystems,
|
||||
)
|
||||
|
@ -16,18 +16,15 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libcontainer"
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
cgroup_fs "github.com/docker/libcontainer/cgroups/fs"
|
||||
libcontainerConfigs "github.com/docker/libcontainer/configs"
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
"github.com/google/cadvisor/container"
|
||||
containerLibcontainer "github.com/google/cadvisor/container/libcontainer"
|
||||
@ -36,9 +33,6 @@ import (
|
||||
"github.com/google/cadvisor/utils"
|
||||
)
|
||||
|
||||
// Relative path from Docker root to the libcontainer per-container state.
|
||||
const pathToLibcontainerState = "execdriver/native"
|
||||
|
||||
// Path to aufs dir where all the files exist.
|
||||
// aufs/layers is ignored here since it does not hold a lot of data.
|
||||
// aufs/mnt contains the mount points used to compose the rootfs. Hence it is also ignored.
|
||||
@ -65,7 +59,9 @@ type dockerContainerHandler struct {
|
||||
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
||||
cgroupPaths map[string]string
|
||||
|
||||
cgroup cgroups.Cgroup
|
||||
// Manager of this container's cgroups.
|
||||
cgroupManager cgroups.Manager
|
||||
|
||||
usesAufsDriver bool
|
||||
fsInfo fs.FsInfo
|
||||
storageDirs []string
|
||||
@ -74,16 +70,11 @@ type dockerContainerHandler struct {
|
||||
creationTime time.Time
|
||||
}
|
||||
|
||||
func DockerStateDir() string {
|
||||
return path.Join(*dockerRootDir, pathToLibcontainerState)
|
||||
}
|
||||
|
||||
func newDockerContainerHandler(
|
||||
client *docker.Client,
|
||||
name string,
|
||||
machineInfoFactory info.MachineInfoFactory,
|
||||
fsInfo fs.FsInfo,
|
||||
dockerRootDir string,
|
||||
usesAufsDriver bool,
|
||||
cgroupSubsystems *containerLibcontainer.CgroupSubsystems,
|
||||
) (container.ContainerHandler, error) {
|
||||
@ -93,25 +84,26 @@ func newDockerContainerHandler(
|
||||
cgroupPaths[key] = path.Join(val, name)
|
||||
}
|
||||
|
||||
id := ContainerNameToDockerId(name)
|
||||
stateDir := DockerStateDir()
|
||||
handler := &dockerContainerHandler{
|
||||
id: id,
|
||||
client: client,
|
||||
name: name,
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
libcontainerConfigPath: path.Join(stateDir, id, "container.json"),
|
||||
libcontainerStatePath: path.Join(stateDir, id, "state.json"),
|
||||
libcontainerPidPath: path.Join(stateDir, id, "pid"),
|
||||
cgroupPaths: cgroupPaths,
|
||||
cgroup: cgroups.Cgroup{
|
||||
Parent: "/",
|
||||
Name: name,
|
||||
// Generate the equivalent cgroup manager for this container.
|
||||
cgroupManager := &cgroup_fs.Manager{
|
||||
Cgroups: &libcontainerConfigs.Cgroup{
|
||||
Name: name,
|
||||
},
|
||||
usesAufsDriver: usesAufsDriver,
|
||||
fsInfo: fsInfo,
|
||||
Paths: cgroupPaths,
|
||||
}
|
||||
handler.storageDirs = append(handler.storageDirs, path.Join(dockerRootDir, pathToAufsDir, id))
|
||||
|
||||
id := ContainerNameToDockerId(name)
|
||||
handler := &dockerContainerHandler{
|
||||
id: id,
|
||||
client: client,
|
||||
name: name,
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
cgroupManager: cgroupManager,
|
||||
usesAufsDriver: usesAufsDriver,
|
||||
fsInfo: fsInfo,
|
||||
}
|
||||
handler.storageDirs = append(handler.storageDirs, path.Join(*dockerRootDir, pathToAufsDir, id))
|
||||
|
||||
// We assume that if Inspect fails then the container is not known to docker.
|
||||
ctnr, err := client.InspectContainer(id)
|
||||
@ -135,76 +127,23 @@ func (self *dockerContainerHandler) ContainerReference() (info.ContainerReferenc
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TODO(vmarmol): Switch to getting this from libcontainer once we have a solid API.
|
||||
func (self *dockerContainerHandler) readLibcontainerConfig() (*libcontainer.Config, error) {
|
||||
out, err := ioutil.ReadFile(self.libcontainerConfigPath)
|
||||
func (self *dockerContainerHandler) readLibcontainerConfig() (*libcontainerConfigs.Config, error) {
|
||||
config, err := containerLibcontainer.ReadConfig(*dockerRootDir, *dockerRunDir, self.id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read libcontainer config from %q: %v", self.libcontainerConfigPath, err)
|
||||
}
|
||||
var config libcontainer.Config
|
||||
err = json.Unmarshal(out, &config)
|
||||
if err != nil {
|
||||
// TODO(vmarmol): Remove this once it becomes the standard.
|
||||
// Try to parse the old config. The main difference is that namespaces used to be a map, now it is a slice of structs.
|
||||
// The JSON marshaler will use the non-nested field before the nested one.
|
||||
type oldLibcontainerConfig struct {
|
||||
libcontainer.Config
|
||||
OldNamespaces map[string]bool `json:"namespaces,omitempty"`
|
||||
}
|
||||
var oldConfig oldLibcontainerConfig
|
||||
err2 := json.Unmarshal(out, &oldConfig)
|
||||
if err2 != nil {
|
||||
// Use original error.
|
||||
return nil, fmt.Errorf("failed to parse libcontainer config at %q: %v", self.libcontainerConfigPath, err)
|
||||
}
|
||||
|
||||
// Translate the old config into the new config.
|
||||
config = oldConfig.Config
|
||||
for ns := range oldConfig.OldNamespaces {
|
||||
config.Namespaces = append(config.Namespaces, libcontainer.Namespace{
|
||||
Type: libcontainer.NamespaceType(ns),
|
||||
})
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read libcontainer config: %v", err)
|
||||
}
|
||||
|
||||
// Replace cgroup parent and name with our own since we may be running in a different context.
|
||||
config.Cgroups.Name = self.cgroup.Name
|
||||
config.Cgroups.Parent = self.cgroup.Parent
|
||||
if config.Cgroups == nil {
|
||||
config.Cgroups = new(libcontainerConfigs.Cgroup)
|
||||
}
|
||||
config.Cgroups.Name = self.name
|
||||
config.Cgroups.Parent = "/"
|
||||
|
||||
return &config, nil
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) readLibcontainerState() (state *libcontainer.State, err error) {
|
||||
// TODO(vmarmol): Remove this once we can depend on a newer Docker.
|
||||
// Libcontainer changed how its state was stored, try the old way of a "pid" file
|
||||
if !utils.FileExists(self.libcontainerStatePath) {
|
||||
if utils.FileExists(self.libcontainerPidPath) {
|
||||
// We don't need the old state, return an empty state and we'll gracefully degrade.
|
||||
return &libcontainer.State{}, nil
|
||||
}
|
||||
}
|
||||
f, err := os.Open(self.libcontainerStatePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open %s - %s\n", self.libcontainerStatePath, err)
|
||||
}
|
||||
defer f.Close()
|
||||
d := json.NewDecoder(f)
|
||||
retState := new(libcontainer.State)
|
||||
err = d.Decode(retState)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse libcontainer state at %q: %v", self.libcontainerStatePath, err)
|
||||
}
|
||||
state = retState
|
||||
|
||||
// Create cgroup paths if they don't exist. This is since older Docker clients don't write it.
|
||||
if len(state.CgroupPaths) == 0 {
|
||||
state.CgroupPaths = self.cgroupPaths
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func libcontainerConfigToContainerSpec(config *libcontainer.Config, mi *info.MachineInfo) info.ContainerSpec {
|
||||
func libcontainerConfigToContainerSpec(config *libcontainerConfigs.Config, mi *info.MachineInfo) info.ContainerSpec {
|
||||
var spec info.ContainerSpec
|
||||
spec.HasMemory = true
|
||||
spec.Memory.Limit = math.MaxUint64
|
||||
@ -292,16 +231,31 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) GetStats() (stats *info.ContainerStats, err error) {
|
||||
state, err := self.readLibcontainerState()
|
||||
// TODO(vmarmol): Get from libcontainer API instead of cgroup manager when we don't have to support older Dockers.
|
||||
func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
config, err := self.readLibcontainerConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats, err = containerLibcontainer.GetStats(self.cgroupPaths, state)
|
||||
var networkInterfaces []string
|
||||
if len(config.Networks) > 0 {
|
||||
// ContainerStats only reports stat for one network device.
|
||||
// TODO(vmarmol): Handle multiple physical network devices.
|
||||
for _, n := range config.Networks {
|
||||
// Take the first non-loopback.
|
||||
if n.Type != "loopback" {
|
||||
networkInterfaces = []string{n.HostInterfaceName}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
stats, err := containerLibcontainer.GetStats(self.cgroupManager, networkInterfaces)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
// Get filesystem stats.
|
||||
err = self.getFsStats(stats)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
@ -348,11 +302,13 @@ func (self *dockerContainerHandler) GetCgroupPath(resource string) (string, erro
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) ListThreads(listType container.ListType) ([]int, error) {
|
||||
// TODO(vmarmol): Implement.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||
return cgroup_fs.GetPids(&self.cgroup)
|
||||
// TODO(vmarmol): Implement.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) WatchSubcontainers(events chan container.SubcontainerEvent) error {
|
||||
@ -365,6 +321,5 @@ func (self *dockerContainerHandler) StopWatchingSubcontainers() error {
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) Exists() bool {
|
||||
// We consider the container existing if both libcontainer config and state files exist.
|
||||
return utils.FileExists(self.libcontainerConfigPath) && utils.FileExists(self.libcontainerStatePath)
|
||||
return containerLibcontainer.Exists(*dockerRootDir, *dockerRunDir, self.id)
|
||||
}
|
||||
|
@ -16,13 +16,16 @@ package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libcontainer"
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
cgroupfs "github.com/docker/libcontainer/cgroups/fs"
|
||||
"github.com/docker/libcontainer/network"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils/sysinfo"
|
||||
)
|
||||
|
||||
type CgroupSubsystems struct {
|
||||
@ -73,23 +76,30 @@ var supportedSubsystems map[string]struct{} = map[string]struct{}{
|
||||
"blkio": {},
|
||||
}
|
||||
|
||||
// Get stats of the specified container
|
||||
func GetStats(cgroupPaths map[string]string, state *libcontainer.State) (*info.ContainerStats, error) {
|
||||
// TODO(vmarmol): Use libcontainer's Stats() in the new API when that is ready.
|
||||
stats := &libcontainer.ContainerStats{}
|
||||
|
||||
var err error
|
||||
stats.CgroupStats, err = cgroupfs.GetStats(cgroupPaths)
|
||||
// Get cgroup and networking stats of the specified container
|
||||
func GetStats(cgroupManager cgroups.Manager, networkInterfaces []string) (*info.ContainerStats, error) {
|
||||
cgroupStats, err := cgroupManager.GetStats()
|
||||
if err != nil {
|
||||
return &info.ContainerStats{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.NetworkStats, err = network.GetStats(&state.NetworkState)
|
||||
if err != nil {
|
||||
return &info.ContainerStats{}, err
|
||||
libcontainerStats := &libcontainer.Stats{
|
||||
CgroupStats: cgroupStats,
|
||||
}
|
||||
stats := toContainerStats(libcontainerStats)
|
||||
|
||||
return toContainerStats(stats), nil
|
||||
if len(networkInterfaces) != 0 {
|
||||
// ContainerStats only reports stat for one network device.
|
||||
// TODO(rjnagal): Handle multiple physical network devices.
|
||||
stats.Network, err = sysinfo.GetNetworkStats(networkInterfaces[0])
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
}
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func DockerStateDir(dockerRoot string) string {
|
||||
return path.Join(dockerRoot, "containers")
|
||||
}
|
||||
|
||||
func DiskStatsCopy(blkio_stats []cgroups.BlkioStatEntry) (stat []info.PerDiskStats) {
|
||||
@ -134,7 +144,7 @@ func DiskStatsCopy(blkio_stats []cgroups.BlkioStatEntry) (stat []info.PerDiskSta
|
||||
}
|
||||
|
||||
// Convert libcontainer stats to info.ContainerStats.
|
||||
func toContainerStats(libcontainerStats *libcontainer.ContainerStats) *info.ContainerStats {
|
||||
func toContainerStats(libcontainerStats *libcontainer.Stats) *info.ContainerStats {
|
||||
s := libcontainerStats.CgroupStats
|
||||
ret := new(info.ContainerStats)
|
||||
ret.Timestamp = time.Now()
|
||||
@ -176,10 +186,64 @@ func toContainerStats(libcontainerStats *libcontainer.ContainerStats) *info.Cont
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO(vishh): Perform a deep copy or alias libcontainer network stats.
|
||||
if libcontainerStats.NetworkStats != nil {
|
||||
ret.Network = *(*info.NetworkStats)(libcontainerStats.NetworkStats)
|
||||
if len(libcontainerStats.Interfaces) > 0 {
|
||||
// TODO(vmarmol): Handle multiple interfaces.
|
||||
ret.Network.RxBytes = libcontainerStats.Interfaces[0].RxBytes
|
||||
ret.Network.RxPackets = libcontainerStats.Interfaces[0].RxPackets
|
||||
ret.Network.RxErrors = libcontainerStats.Interfaces[0].RxErrors
|
||||
ret.Network.RxDropped = libcontainerStats.Interfaces[0].RxDropped
|
||||
ret.Network.TxBytes = libcontainerStats.Interfaces[0].TxBytes
|
||||
ret.Network.TxPackets = libcontainerStats.Interfaces[0].TxPackets
|
||||
ret.Network.TxErrors = libcontainerStats.Interfaces[0].TxErrors
|
||||
ret.Network.TxDropped = libcontainerStats.Interfaces[0].TxDropped
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo.
|
||||
func GetNetworkInterfaceStats(interfaceName string) (*libcontainer.NetworkInterface, error) {
|
||||
out := &libcontainer.NetworkInterface{
|
||||
Name: interfaceName,
|
||||
}
|
||||
// This can happen if the network runtime information is missing - possible if the
|
||||
// container was created by an old version of libcontainer.
|
||||
if interfaceName == "" {
|
||||
return out, nil
|
||||
}
|
||||
type netStatsPair struct {
|
||||
// Where to write the output.
|
||||
Out *uint64
|
||||
// The network stats file to read.
|
||||
File string
|
||||
}
|
||||
// Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
|
||||
netStats := []netStatsPair{
|
||||
{Out: &out.RxBytes, File: "tx_bytes"},
|
||||
{Out: &out.RxPackets, File: "tx_packets"},
|
||||
{Out: &out.RxErrors, File: "tx_errors"},
|
||||
{Out: &out.RxDropped, File: "tx_dropped"},
|
||||
|
||||
{Out: &out.TxBytes, File: "rx_bytes"},
|
||||
{Out: &out.TxPackets, File: "rx_packets"},
|
||||
{Out: &out.TxErrors, File: "rx_errors"},
|
||||
{Out: &out.TxDropped, File: "rx_dropped"},
|
||||
}
|
||||
for _, netStat := range netStats {
|
||||
data, err := readSysfsNetworkStats(interfaceName, netStat.File)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
*(netStat.Out) = data
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Reads the specified statistics available under /sys/class/net/<EthInterface>/statistics
|
||||
func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) {
|
||||
data, err := ioutil.ReadFile(path.Join("/sys/class/net", ethInterface, "statistics", statsFile))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||
}
|
||||
|
@ -25,23 +25,20 @@ import (
|
||||
"time"
|
||||
|
||||
"code.google.com/p/go.exp/inotify"
|
||||
dockerlibcontainer "github.com/docker/libcontainer"
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
cgroup_fs "github.com/docker/libcontainer/cgroups/fs"
|
||||
"github.com/docker/libcontainer/network"
|
||||
"github.com/docker/libcontainer/configs"
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils"
|
||||
"github.com/google/cadvisor/utils/sysinfo"
|
||||
)
|
||||
|
||||
type rawContainerHandler struct {
|
||||
// Name of the container for this handler.
|
||||
name string
|
||||
cgroup *cgroups.Cgroup
|
||||
cgroupSubsystems *libcontainer.CgroupSubsystems
|
||||
machineInfoFactory info.MachineInfoFactory
|
||||
|
||||
@ -54,15 +51,15 @@ type rawContainerHandler struct {
|
||||
// Containers being watched for new subcontainers.
|
||||
watches map[string]struct{}
|
||||
|
||||
// Cgroup paths being watchd for new subcontainers
|
||||
// Cgroup paths being watched for new subcontainers
|
||||
cgroupWatches map[string]struct{}
|
||||
|
||||
// Absolute path to the cgroup hierarchies of this container.
|
||||
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
||||
cgroupPaths map[string]string
|
||||
|
||||
// Equivalent libcontainer state for this container.
|
||||
libcontainerState dockerlibcontainer.State
|
||||
// Manager of this container's cgroups.
|
||||
cgroupManager cgroups.Manager
|
||||
|
||||
// Whether this container has network isolation enabled.
|
||||
hasNetwork bool
|
||||
@ -83,38 +80,37 @@ func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSu
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate the equivalent libcontainer state for this container.
|
||||
libcontainerState := dockerlibcontainer.State{
|
||||
CgroupPaths: cgroupPaths,
|
||||
// Generate the equivalent cgroup manager for this container.
|
||||
cgroupManager := &cgroup_fs.Manager{
|
||||
Cgroups: &configs.Cgroup{
|
||||
Name: name,
|
||||
},
|
||||
Paths: cgroupPaths,
|
||||
}
|
||||
|
||||
hasNetwork := false
|
||||
var externalMounts []mount
|
||||
for _, container := range cHints.AllHosts {
|
||||
if name == container.FullName {
|
||||
libcontainerState.NetworkState = network.NetworkState{
|
||||
/*libcontainerState.NetworkState = network.NetworkState{
|
||||
VethHost: container.NetworkInterface.VethHost,
|
||||
VethChild: container.NetworkInterface.VethChild,
|
||||
}
|
||||
hasNetwork = true
|
||||
hasNetwork = true*/
|
||||
externalMounts = container.Mounts
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return &rawContainerHandler{
|
||||
name: name,
|
||||
cgroup: &cgroups.Cgroup{
|
||||
Parent: "/",
|
||||
Name: name,
|
||||
},
|
||||
name: name,
|
||||
cgroupSubsystems: cgroupSubsystems,
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
stopWatcher: make(chan error),
|
||||
watches: make(map[string]struct{}),
|
||||
cgroupWatches: make(map[string]struct{}),
|
||||
cgroupPaths: cgroupPaths,
|
||||
libcontainerState: libcontainerState,
|
||||
cgroupManager: cgroupManager,
|
||||
fsInfo: fsInfo,
|
||||
hasNetwork: hasNetwork,
|
||||
externalMounts: externalMounts,
|
||||
@ -311,29 +307,27 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
}
|
||||
|
||||
func (self *rawContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
stats, err := libcontainer.GetStats(self.cgroupPaths, &self.libcontainerState)
|
||||
var networkInterfaces []string
|
||||
nd, err := self.GetRootNetworkDevices()
|
||||
if err != nil {
|
||||
return new(info.ContainerStats), err
|
||||
}
|
||||
if len(nd) != 0 {
|
||||
// ContainerStats only reports stat for one network device.
|
||||
// TODO(rjnagal): Handle multiple physical network devices.
|
||||
networkInterfaces = []string{nd[0].Name}
|
||||
}
|
||||
stats, err := libcontainer.GetStats(self.cgroupManager, networkInterfaces)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
// Get filesystem stats.
|
||||
err = self.getFsStats(stats)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
// Fill in network stats for root.
|
||||
nd, err := self.GetRootNetworkDevices()
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
if len(nd) != 0 {
|
||||
// ContainerStats only reports stat for one network device.
|
||||
// TODO(rjnagal): Handle multiple physical network devices.
|
||||
stats.Network, err = sysinfo.GetNetworkStats(nd[0].Name)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
}
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
@ -400,7 +394,8 @@ func (self *rawContainerHandler) ListThreads(listType container.ListType) ([]int
|
||||
}
|
||||
|
||||
func (self *rawContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||
return cgroup_fs.GetPids(self.cgroup)
|
||||
// TODO(vmarmol): Implement
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (self *rawContainerHandler) watchDirectory(dir string, containerName string) error {
|
||||
|
Loading…
Reference in New Issue
Block a user