From 701cd36b5c2b75f462574e9f34b8f3065767fa60 Mon Sep 17 00:00:00 2001 From: "Tim St. Clair" Date: Fri, 24 Feb 2017 10:15:11 -0800 Subject: [PATCH 01/10] Record sha256 for cAdvisor releases --- build/release.sh | 3 +-- docs/development/releasing.md | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/build/release.sh b/build/release.sh index fba1527a..897476aa 100755 --- a/build/release.sh +++ b/build/release.sh @@ -48,8 +48,7 @@ docker build -t $docker_tag -t $gcr_tag -f deploy/Dockerfile . echo echo "Release info:" echo "VERSION=$VERSION" -sha1sum --tag cadvisor -md5sum --tag cadvisor +sha256sum --tag cadvisor echo "docker image: $docker_tag" echo "gcr.io image: $gcr_tag" diff --git a/docs/development/releasing.md b/docs/development/releasing.md index 29d9e558..a9258dea 100644 --- a/docs/development/releasing.md +++ b/docs/development/releasing.md @@ -47,7 +47,7 @@ Command: `make release` - Try to build it from the release branch, since we include that in the binary version - Verify the ldflags output, in particular check the Version, BuildUser, and GoVersion are expected -Once the build is complete, check the VERSION and note the sha1 and md5 hashes. +Once the build is complete, check the VERSION and note the sha256 hash. ## 4. Push the Docker images From a5cf6abf6de0925b7f01015bc446822efa2c3943 Mon Sep 17 00:00:00 2001 From: "Tim St. Clair" Date: Mon, 27 Feb 2017 11:48:26 -0800 Subject: [PATCH 02/10] Test cAdvisor with the latest go patch release --- build/jenkins_e2e.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/jenkins_e2e.sh b/build/jenkins_e2e.sh index 1f5b25c0..b0ba8695 100755 --- a/build/jenkins_e2e.sh +++ b/build/jenkins_e2e.sh @@ -35,7 +35,7 @@ fi docker run --rm \ -w "/go/src/github.com/google/cadvisor" \ -v "${GOPATH}/src/github.com/google/cadvisor:/go/src/github.com/google/cadvisor" \ - golang:1.7.1 make all test-runner + golang:1.7 make all test-runner # Nodes that are currently stable. When tests fail on a specific node, and the failure is not remedied within a week, that node will be removed from this list. golden_nodes=( From 55172b9002aef1e7f91b56be3358b07b67927fbb Mon Sep 17 00:00:00 2001 From: Andy Xie Date: Sat, 4 Mar 2017 12:06:00 +0800 Subject: [PATCH 03/10] fix markdown format --- docs/development/releasing.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/development/releasing.md b/docs/development/releasing.md index a9258dea..13c8e0b4 100644 --- a/docs/development/releasing.md +++ b/docs/development/releasing.md @@ -93,9 +93,9 @@ Once you are satisfied with the release quality (consider waiting a week for bug 1. Edit the github release a final time, and uncheck the "Pre-release" checkbox 2. Tag the docker & gcr.io releases with the latest version - ``` - $ docker pull google/cadvisor:$VERSION - $ docker tag -f google/cadvisor:$VERSION google/cadvisor:latest - $ docker tag -f google/cadvisor:$VERSION gcr.io/google_containers/cadvisor:latest - ``` +``` +$ docker pull google/cadvisor:$VERSION +$ docker tag -f google/cadvisor:$VERSION google/cadvisor:latest +$ docker tag -f google/cadvisor:$VERSION gcr.io/google_containers/cadvisor:latest +``` 3. Repeat steps 4.a and 4.b to push the image tagged with latest From dcf4be26136c9b0fb16c10ca8898b828a921da69 Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Thu, 9 Mar 2017 14:49:36 -0800 Subject: [PATCH 04/10] release version 0.25.0 --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bb1894eb..440a9af2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +### 0.25.0 (2017-03-09) +- Disable thin_ls due to excessive iops +- Ignore .mount cgroups, fixing dissappearing stats +- Fix wc goroutine leak +- Update aws-sdk-go dependency to 1.6.10 +- Update to go 1.7 for releases + ### 0.24.1 (2016-10-10) - Fix issue with running cAdvisor in a container on some distributions. From 1e93f1a4129346b3ca1f98139c0d190aa72323c7 Mon Sep 17 00:00:00 2001 From: Andy Xie Date: Sun, 12 Mar 2017 09:34:59 +0800 Subject: [PATCH 05/10] fix statsd error logging --- storage/statsd/client/client.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/storage/statsd/client/client.go b/storage/statsd/client/client.go index 958468ad..99819fb3 100644 --- a/storage/statsd/client/client.go +++ b/storage/statsd/client/client.go @@ -49,8 +49,7 @@ func (self *Client) Send(namespace, containerName, key string, value uint64) err formatted := fmt.Sprintf("%s.%s.%s:%d|g", namespace, containerName, key, value) _, err := fmt.Fprintf(self.conn, formatted) if err != nil { - glog.V(3).Infof("failed to send data %q: %v", formatted, err) - return err + return fmt.Errorf("failed to send data %q: %v", formatted, err) } return nil } From a248c345280c8db14767e282fb47b3649d80c16e Mon Sep 17 00:00:00 2001 From: NickrenREN Date: Tue, 24 Jan 2017 17:58:39 +0800 Subject: [PATCH 06/10] optimize NewRealSysFs() remove second return value since it will never return err --- cadvisor.go | 5 +---- utils/sysfs/sysfs.go | 4 ++-- utils/sysinfo/sysinfo.go | 5 +---- 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/cadvisor.go b/cadvisor.go index 48ddc8ae..70faa2a2 100644 --- a/cadvisor.go +++ b/cadvisor.go @@ -117,10 +117,7 @@ func main() { glog.Fatalf("Failed to initialize storage driver: %s", err) } - sysFs, err := sysfs.NewRealSysFs() - if err != nil { - glog.Fatalf("Failed to create a system interface: %s", err) - } + sysFs := sysfs.NewRealSysFs() collectorHttpClient := createCollectorHttpClient(*collectorCert, *collectorKey) diff --git a/utils/sysfs/sysfs.go b/utils/sysfs/sysfs.go index 5667e016..1132e5b5 100644 --- a/utils/sysfs/sysfs.go +++ b/utils/sysfs/sysfs.go @@ -70,8 +70,8 @@ type SysFs interface { type realSysFs struct{} -func NewRealSysFs() (SysFs, error) { - return &realSysFs{}, nil +func NewRealSysFs() SysFs { + return &realSysFs{} } func (self *realSysFs) GetBlockDevices() ([]os.FileInfo, error) { diff --git a/utils/sysinfo/sysinfo.go b/utils/sysinfo/sysinfo.go index 8f7acc74..7dd14cf4 100644 --- a/utils/sysinfo/sysinfo.go +++ b/utils/sysinfo/sysinfo.go @@ -155,10 +155,7 @@ func GetCacheInfo(sysFs sysfs.SysFs, id int) ([]sysfs.CacheInfo, error) { func GetNetworkStats(name string) (info.InterfaceStats, error) { // TODO(rjnagal): Take syfs as an argument. - sysFs, err := sysfs.NewRealSysFs() - if err != nil { - return info.InterfaceStats{}, err - } + sysFs := sysfs.NewRealSysFs() return getNetworkStats(name, sysFs) } From 3e43b4573d5d8ff638b36d5524eacfe6e65215b7 Mon Sep 17 00:00:00 2001 From: Dmitri Rubinstein Date: Wed, 15 Mar 2017 16:06:54 +0100 Subject: [PATCH 07/10] Added fix for Kubernetes btrfs issue #38337 https://github.com/kubernetes/kubernetes/issues/38337 --- fs/fs.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/fs/fs.go b/fs/fs.go index b78a2e56..950a2eec 100644 --- a/fs/fs.go +++ b/fs/fs.go @@ -149,6 +149,31 @@ func processMounts(mounts []*mount.Info, excludedMountpointPrefixes []string) ma continue } + // btrfs fix: following workaround fixes wrong btrfs Major and Minor Ids reported in /proc/self/mountinfo. + // instead of using values from /proc/self/mountinfo we use stat to get Ids from btrfs mount point + if mount.Fstype == "btrfs" && mount.Major == 0 && strings.HasPrefix(mount.Source, "/dev/") { + + buf := new(syscall.Stat_t) + err := syscall.Stat(mount.Source, buf) + if err != nil { + glog.Warningf("stat failed on %s with error: %s", mount.Source, err) + } else { + glog.Infof("btrfs mount %#v", mount) + if buf.Mode&syscall.S_IFMT == syscall.S_IFBLK { + err := syscall.Stat(mount.Mountpoint, buf) + if err != nil { + glog.Warningf("stat failed on %s with error: %s", mount.Mountpoint, err) + } else { + glog.Infof("btrfs dev major:minor %d:%d\n", int(major(buf.Dev)), int(minor(buf.Dev))) + glog.Infof("btrfs rdev major:minor %d:%d\n", int(major(buf.Rdev)), int(minor(buf.Rdev))) + + mount.Major = int(major(buf.Dev)) + mount.Minor = int(minor(buf.Dev)) + } + } + } + } + partitions[mount.Source] = partition{ fsType: mount.Fstype, mountpoint: mount.Mountpoint, From feecd47daacb9bedf2423b5ab05d56519c4f67cd Mon Sep 17 00:00:00 2001 From: Brian Akins Date: Mon, 12 Dec 2016 14:56:15 -0500 Subject: [PATCH 08/10] Add watcher for zfs similar to devicemapper minor cleanup ensure we look at parent dataset for limit, etc --- container/docker/factory.go | 28 +++++++++ container/docker/handler.go | 46 ++++++++++++++- utils/docker/docker.go | 20 +++++-- zfs/watcher.go | 113 ++++++++++++++++++++++++++++++++++++ 4 files changed, 200 insertions(+), 7 deletions(-) create mode 100644 zfs/watcher.go diff --git a/container/docker/factory.go b/container/docker/factory.go index 5be3b596..08beeddf 100644 --- a/container/docker/factory.go +++ b/container/docker/factory.go @@ -33,6 +33,7 @@ import ( "github.com/google/cadvisor/machine" "github.com/google/cadvisor/manager/watcher" dockerutil "github.com/google/cadvisor/utils/docker" + "github.com/google/cadvisor/zfs" docker "github.com/docker/engine-api/client" "github.com/golang/glog" @@ -105,6 +106,8 @@ type dockerFactory struct { ignoreMetrics container.MetricSet thinPoolWatcher *devicemapper.ThinPoolWatcher + + zfsWatcher *zfs.ZfsWatcher } func (self *dockerFactory) String() string { @@ -132,6 +135,7 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool self.dockerVersion, self.ignoreMetrics, self.thinPoolWatcher, + self.zfsWatcher, ) return } @@ -218,6 +222,21 @@ func startThinPoolWatcher(dockerInfo *dockertypes.Info) (*devicemapper.ThinPoolW return thinPoolWatcher, nil } +func startZfsWatcher(dockerInfo *dockertypes.Info) (*zfs.ZfsWatcher, error) { + filesystem, err := dockerutil.DockerZfsFilesystem(*dockerInfo) + if err != nil { + return nil, err + } + + zfsWatcher, err := zfs.NewZfsWatcher(filesystem) + if err != nil { + return nil, err + } + + go zfsWatcher.Start() + return zfsWatcher, nil +} + func ensureThinLsKernelVersion(kernelVersion string) error { // kernel 4.4.0 has the proper bug fixes to allow thin_ls to work without corrupting the thin pool minKernelVersion := semver.MustParse("4.4.0") @@ -306,6 +325,14 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c } } + var zfsWatcher *zfs.ZfsWatcher + if storageDriver(dockerInfo.Driver) == zfsStorageDriver { + zfsWatcher, err = startZfsWatcher(dockerInfo) + if err != nil { + glog.Errorf("zfs filesystem stats will not be reported: %v", err) + } + } + glog.Infof("Registering Docker factory") f := &dockerFactory{ cgroupSubsystems: cgroupSubsystems, @@ -317,6 +344,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c storageDir: RootDir(), ignoreMetrics: ignoreMetrics, thinPoolWatcher: thinPoolWatcher, + zfsWatcher: zfsWatcher, } container.RegisterContainerHandlerFactory(f, []watcher.ContainerWatchSource{watcher.Raw}) diff --git a/container/docker/handler.go b/container/docker/handler.go index dd0a2cdd..e1409326 100644 --- a/container/docker/handler.go +++ b/container/docker/handler.go @@ -29,6 +29,7 @@ import ( "github.com/google/cadvisor/fs" info "github.com/google/cadvisor/info/v1" dockerutil "github.com/google/cadvisor/utils/docker" + "github.com/google/cadvisor/zfs" docker "github.com/docker/engine-api/client" dockercontainer "github.com/docker/engine-api/types/container" @@ -42,6 +43,7 @@ import ( const ( // The read write layers exist here. aufsRWLayer = "diff" + // Path to the directory where docker stores log files if the json logging driver is enabled. pathToContainersDir = "containers" ) @@ -72,6 +74,12 @@ type dockerContainerHandler struct { // the devicemapper device id for the container deviceID string + // zfs Filesystem + zfsFilesystem string + + // zfsParent is the parent for docker zfs + zfsParent string + // Time at which this container was created. creationTime time.Time @@ -101,6 +109,9 @@ type dockerContainerHandler struct { // thin pool watcher thinPoolWatcher *devicemapper.ThinPoolWatcher + + // zfs watcher + zfsWatcher *zfs.ZfsWatcher } var _ container.ContainerHandler = &dockerContainerHandler{} @@ -136,6 +147,7 @@ func newDockerContainerHandler( dockerVersion []int, ignoreMetrics container.MetricSet, thinPoolWatcher *devicemapper.ThinPoolWatcher, + zfsWatcher *zfs.ZfsWatcher, ) (container.ContainerHandler, error) { // Create the cgroup paths. cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints)) @@ -172,12 +184,21 @@ func newDockerContainerHandler( var ( rootfsStorageDir string poolName string + zfsFilesystem string + zfsParent string ) switch storageDriver { case aufsStorageDriver: rootfsStorageDir = path.Join(storageDir, string(aufsStorageDriver), aufsRWLayer, rwLayerID) case overlayStorageDriver: rootfsStorageDir = path.Join(storageDir, string(overlayStorageDriver), rwLayerID) + case zfsStorageDriver: + status, err := Status() + if err != nil { + return nil, fmt.Errorf("unable to determine docker status: %v", err) + } + zfsParent = status.DriverStatus[dockerutil.DriverStatusParentDataset] + zfsFilesystem = path.Join(zfsParent, rwLayerID) case devicemapperStorageDriver: status, err := Status() if err != nil { @@ -199,10 +220,13 @@ func newDockerContainerHandler( fsInfo: fsInfo, rootFs: rootFs, poolName: poolName, + zfsFilesystem: zfsFilesystem, rootfsStorageDir: rootfsStorageDir, envs: make(map[string]string), ignoreMetrics: ignoreMetrics, thinPoolWatcher: thinPoolWatcher, + zfsWatcher: zfsWatcher, + zfsParent: zfsParent, } // We assume that if Inspect fails then the container is not known to docker. @@ -245,7 +269,9 @@ func newDockerContainerHandler( handler.fsHandler = &dockerFsHandler{ fsHandler: common.NewFsHandler(common.DefaultPeriod, rootfsStorageDir, otherStorageDir, fsInfo), thinPoolWatcher: thinPoolWatcher, + zfsWatcher: zfsWatcher, deviceID: handler.deviceID, + zfsFilesystem: zfsFilesystem, } } @@ -265,7 +291,7 @@ func newDockerContainerHandler( } // dockerFsHandler is a composite FsHandler implementation the incorporates -// the common fs handler and a devicemapper ThinPoolWatcher. +// the common fs handler, a devicemapper ThinPoolWatcher, and a zfsWatcher type dockerFsHandler struct { fsHandler common.FsHandler @@ -273,6 +299,11 @@ type dockerFsHandler struct { thinPoolWatcher *devicemapper.ThinPoolWatcher // deviceID is the id of the container's fs device deviceID string + + // zfsWatcher is the zfs filesystem watcher + zfsWatcher *zfs.ZfsWatcher + // zfsFilesystem is the docker zfs filesystem + zfsFilesystem string } var _ common.FsHandler = &dockerFsHandler{} @@ -306,6 +337,15 @@ func (h *dockerFsHandler) Usage() common.FsUsage { } } + if h.zfsWatcher != nil { + zfsUsage, err := h.zfsWatcher.GetUsage(h.zfsFilesystem) + if err != nil { + glog.V(5).Infof("unable to get fs usage from zfs for filesystem %s: %v", h.zfsFilesystem, err) + } else { + usage.BaseUsageBytes = zfsUsage + usage.TotalUsageBytes += zfsUsage + } + } return usage } @@ -359,12 +399,14 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error // Device has to be the pool name to correlate with the device name as // set in the machine info filesystems. device = self.poolName - case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver: + case aufsStorageDriver, overlayStorageDriver: deviceInfo, err := self.fsInfo.GetDirFsDevice(self.rootfsStorageDir) if err != nil { return fmt.Errorf("unable to determine device info for dir: %v: %v", self.rootfsStorageDir, err) } device = deviceInfo.Device + case zfsStorageDriver: + device = self.zfsParent default: return nil } diff --git a/utils/docker/docker.go b/utils/docker/docker.go index 3ae62797..d59f6f1f 100644 --- a/utils/docker/docker.go +++ b/utils/docker/docker.go @@ -23,11 +23,12 @@ import ( ) const ( - DockerInfoDriver = "Driver" - DockerInfoDriverStatus = "DriverStatus" - DriverStatusPoolName = "Pool Name" - DriverStatusDataLoopFile = "Data loop file" - DriverStatusMetadataFile = "Metadata file" + DockerInfoDriver = "Driver" + DockerInfoDriverStatus = "DriverStatus" + DriverStatusPoolName = "Pool Name" + DriverStatusDataLoopFile = "Data loop file" + DriverStatusMetadataFile = "Metadata file" + DriverStatusParentDataset = "Parent Dataset" ) func DriverStatusValue(status [][2]string, target string) string { @@ -68,3 +69,12 @@ func DockerMetadataDevice(info dockertypes.Info) (string, error) { return metadataDevice, nil } + +func DockerZfsFilesystem(info dockertypes.Info) (string, error) { + filesystem := DriverStatusValue(info.DriverStatus, DriverStatusParentDataset) + if len(filesystem) == 0 { + return "", fmt.Errorf("Could not get zfs filesystem") + } + + return filesystem, nil +} diff --git a/zfs/watcher.go b/zfs/watcher.go new file mode 100644 index 00000000..1bc3fb74 --- /dev/null +++ b/zfs/watcher.go @@ -0,0 +1,113 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package zfs + +import ( + "fmt" + "sync" + "time" + + "github.com/golang/glog" + zfs "github.com/mistifyio/go-zfs" +) + +// zfsWatcher maintains a cache of filesystem -> usage stats for a +// zfs filesystem +type ZfsWatcher struct { + filesystem string + lock *sync.RWMutex + cache map[string]uint64 + period time.Duration + stopChan chan struct{} +} + +// NewThinPoolWatcher returns a new ThinPoolWatcher for the given devicemapper +// thin pool name and metadata device or an error. +func NewZfsWatcher(filesystem string) (*ZfsWatcher, error) { + + return &ZfsWatcher{ + filesystem: filesystem, + lock: &sync.RWMutex{}, + cache: make(map[string]uint64), + period: 15 * time.Second, + stopChan: make(chan struct{}), + }, nil +} + +// Start starts the ZfsWatcher. +func (w *ZfsWatcher) Start() { + err := w.Refresh() + if err != nil { + glog.Errorf("encountered error refreshing zfs watcher: %v", err) + } + + for { + select { + case <-w.stopChan: + return + case <-time.After(w.period): + start := time.Now() + err = w.Refresh() + if err != nil { + glog.Errorf("encountered error refreshing zfs watcher: %v", err) + } + + // print latency for refresh + duration := time.Since(start) + glog.V(5).Infof("zfs(%d) took %s", start.Unix(), duration) + } + } +} + +// Stop stops the ZfsWatcher. +func (w *ZfsWatcher) Stop() { + close(w.stopChan) +} + +// GetUsage gets the cached usage value of the given filesystem. +func (w *ZfsWatcher) GetUsage(filesystem string) (uint64, error) { + w.lock.RLock() + defer w.lock.RUnlock() + + v, ok := w.cache[filesystem] + if !ok { + return 0, fmt.Errorf("no cached value for usage of filesystem %v", filesystem) + } + + return v, nil +} + +// Refresh performs a zfs get +func (w *ZfsWatcher) Refresh() error { + w.lock.Lock() + defer w.lock.Unlock() + newCache := make(map[string]uint64) + parent, err := zfs.GetDataset(w.filesystem) + if err != nil { + glog.Errorf("encountered error getting zfs filesystem: %s: %v", w.filesystem, err) + return err + } + children, err := parent.Children(0) + if err != nil { + glog.Errorf("encountered error getting children of zfs filesystem: %s: %v", w.filesystem, err) + return err + } + + for _, ds := range children { + newCache[ds.Name] = ds.Used + } + + w.cache = newCache + return nil +} From 594dfd4ea661fd8966d3a7a5e75bbcbe7fe3153a Mon Sep 17 00:00:00 2001 From: "Tim St. Clair" Date: Fri, 17 Mar 2017 10:50:07 -0700 Subject: [PATCH 09/10] Remove out of date support section of running doc --- docs/running.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docs/running.md b/docs/running.md index 5f369645..6e980478 100644 --- a/docs/running.md +++ b/docs/running.md @@ -78,7 +78,3 @@ cAdvisor is now running (in the foreground) on `http://localhost:8080/`. ## Runtime Options cAdvisor has a series of flags that can be used to configure its runtime behavior. More details can be found in runtime [options](runtime_options.md). - -## I need help! - -We aim to have cAdvisor run everywhere! If you run into issues getting it running, feel free to file an issue. We are very responsive in supporting our users and update our documentation with new setups. From 4a4471684f103a4f482e5da78026862210097457 Mon Sep 17 00:00:00 2001 From: jjqq Date: Fri, 17 Mar 2017 14:01:44 +0900 Subject: [PATCH 10/10] doc about error `Privileged mode is incompatible with user namespaces` and solution `--userns=host` --- docs/running.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/running.md b/docs/running.md index 6e980478..b798c1c9 100644 --- a/docs/running.md +++ b/docs/running.md @@ -18,6 +18,10 @@ sudo docker run \ cAdvisor is now running (in the background) on `http://localhost:8080/`. The setup includes directories with Docker state cAdvisor needs to observe. +**Note**: If docker daemon is running with [user namespace enabled](https://docs.docker.com/engine/reference/commandline/dockerd/#starting-the-daemon-with-user-namespaces-enabled), +You need to add `--userns=host` option in order for cAdvisor to monitor Docker containers, +otherwise cAdvisor can not connect to docker daemon. + ## Latest Canary The latest cAdvisor canary release is continuously built from HEAD and available