Merge pull request #926 from jimmidyson/docker-fs-size
Support devicemapper storage for docker images dir
This commit is contained in:
commit
75401d790c
@ -81,11 +81,17 @@ func RootDir() string {
|
||||
return *dockerRootDir
|
||||
}
|
||||
|
||||
type storageDriver string
|
||||
|
||||
const (
|
||||
devicemapperStorageDriver storageDriver = "devicemapper"
|
||||
aufsStorageDriver storageDriver = "aufs"
|
||||
)
|
||||
|
||||
type dockerFactory struct {
|
||||
machineInfoFactory info.MachineInfoFactory
|
||||
|
||||
// Whether docker is running with AUFS storage driver.
|
||||
usesAufsDriver bool
|
||||
storageDriver storageDriver
|
||||
|
||||
client *docker.Client
|
||||
|
||||
@ -110,7 +116,7 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool
|
||||
name,
|
||||
self.machineInfoFactory,
|
||||
self.fsInfo,
|
||||
self.usesAufsDriver,
|
||||
self.storageDriver,
|
||||
&self.cgroupSubsystems,
|
||||
inHostNamespace,
|
||||
)
|
||||
@ -184,6 +190,10 @@ func parseDockerVersion(full_version_string string) ([]int, error) {
|
||||
|
||||
// Register root container before running this function!
|
||||
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo) error {
|
||||
if UseSystemd() {
|
||||
glog.Infof("System is using systemd")
|
||||
}
|
||||
|
||||
client, err := Client()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to communicate with docker daemon: %v", err)
|
||||
@ -207,32 +217,16 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo) error {
|
||||
}
|
||||
|
||||
// Check that the libcontainer execdriver is used.
|
||||
information, err := client.Info()
|
||||
information, err := DockerInfo()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to detect Docker info: %v", err)
|
||||
}
|
||||
usesNativeDriver := false
|
||||
for _, val := range *information {
|
||||
if strings.Contains(val, "ExecutionDriver=") && strings.Contains(val, "native") {
|
||||
usesNativeDriver = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !usesNativeDriver {
|
||||
execDriver, ok := information["ExecutionDriver"]
|
||||
if !ok || !strings.HasPrefix(execDriver, "native") {
|
||||
return fmt.Errorf("docker found, but not using native exec driver")
|
||||
}
|
||||
|
||||
usesAufsDriver := false
|
||||
for _, val := range *information {
|
||||
if strings.Contains(val, "Driver=") && strings.Contains(val, "aufs") {
|
||||
usesAufsDriver = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if UseSystemd() {
|
||||
glog.Infof("System is using systemd")
|
||||
}
|
||||
sd, _ := information["Driver"]
|
||||
|
||||
cgroupSubsystems, err := libcontainer.GetCgroupSubsystems()
|
||||
if err != nil {
|
||||
@ -243,7 +237,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo) error {
|
||||
f := &dockerFactory{
|
||||
machineInfoFactory: factory,
|
||||
client: client,
|
||||
usesAufsDriver: usesAufsDriver,
|
||||
storageDriver: storageDriver(sd),
|
||||
cgroupSubsystems: cgroupSubsystems,
|
||||
fsInfo: fsInfo,
|
||||
}
|
||||
|
@ -62,9 +62,9 @@ type dockerContainerHandler struct {
|
||||
// Manager of this container's cgroups.
|
||||
cgroupManager cgroups.Manager
|
||||
|
||||
usesAufsDriver bool
|
||||
fsInfo fs.FsInfo
|
||||
storageDirs []string
|
||||
storageDriver storageDriver
|
||||
fsInfo fs.FsInfo
|
||||
storageDirs []string
|
||||
|
||||
// Time at which this container was created.
|
||||
creationTime time.Time
|
||||
@ -93,7 +93,7 @@ func newDockerContainerHandler(
|
||||
name string,
|
||||
machineInfoFactory info.MachineInfoFactory,
|
||||
fsInfo fs.FsInfo,
|
||||
usesAufsDriver bool,
|
||||
storageDriver storageDriver,
|
||||
cgroupSubsystems *containerLibcontainer.CgroupSubsystems,
|
||||
inHostNamespace bool,
|
||||
) (container.ContainerHandler, error) {
|
||||
@ -127,14 +127,15 @@ func newDockerContainerHandler(
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
cgroupManager: cgroupManager,
|
||||
usesAufsDriver: usesAufsDriver,
|
||||
storageDriver: storageDriver,
|
||||
fsInfo: fsInfo,
|
||||
rootFs: rootFs,
|
||||
storageDirs: storageDirs,
|
||||
fsHandler: newFsHandler(time.Minute, storageDirs, fsInfo),
|
||||
}
|
||||
|
||||
if usesAufsDriver {
|
||||
switch storageDriver {
|
||||
case aufsStorageDriver:
|
||||
handler.fsHandler.start()
|
||||
}
|
||||
|
||||
@ -228,9 +229,8 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
|
||||
spec := libcontainerConfigToContainerSpec(libcontainerConfig, mi)
|
||||
spec.CreationTime = self.creationTime
|
||||
if self.usesAufsDriver {
|
||||
spec.HasFilesystem = true
|
||||
}
|
||||
// For now only enable for aufs filesystems
|
||||
spec.HasFilesystem = self.storageDriver == aufsStorageDriver
|
||||
spec.Labels = self.labels
|
||||
spec.Image = self.image
|
||||
spec.HasNetwork = hasNet(self.networkMode)
|
||||
@ -240,7 +240,7 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
|
||||
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
// No support for non-aufs storage drivers.
|
||||
if !self.usesAufsDriver {
|
||||
if self.storageDriver != aufsStorageDriver {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1,8 +1,16 @@
|
||||
FROM progrium/busybox
|
||||
MAINTAINER dengnan@google.com vmarmol@google.com vishnuk@google.com
|
||||
FROM alpine:3.2
|
||||
MAINTAINER dengnan@google.com vmarmol@google.com vishnuk@google.com jimmidyson@gmail.com
|
||||
|
||||
RUN apk add --update ca-certificates device-mapper && \
|
||||
wget https://circle-artifacts.com/gh/andyshinn/alpine-pkg-glibc/8/artifacts/0/home/ubuntu/alpine-pkg-glibc/packages/x86_64/glibc-2.21-r2.apk && \
|
||||
wget https://circle-artifacts.com/gh/andyshinn/alpine-pkg-glibc/8/artifacts/0/home/ubuntu/alpine-pkg-glibc/packages/x86_64/glibc-bin-2.21-r2.apk && \
|
||||
apk add --allow-untrusted glibc-2.21-r2.apk glibc-bin-2.21-r2.apk && \
|
||||
/usr/glibc/usr/bin/ldconfig /lib /usr/glibc/usr/lib && \
|
||||
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
# Grab cadvisor from the staging directory.
|
||||
ADD cadvisor /usr/bin/cadvisor
|
||||
|
||||
EXPOSE 8080
|
||||
ENTRYPOINT ["/usr/bin/cadvisor"]
|
||||
ENTRYPOINT ["/usr/bin/cadvisor", "-logtostderr"]
|
||||
|
@ -1,8 +1,7 @@
|
||||
FROM golang:latest
|
||||
MAINTAINER vmarmol@google.com
|
||||
|
||||
|
||||
RUN apt-get install -y git
|
||||
RUN apt-get install -y git thin-provisioning-tools
|
||||
RUN git clone https://github.com/google/cadvisor.git /go/src/github.com/google/cadvisor
|
||||
RUN go get github.com/tools/godep
|
||||
RUN cd /go/src/github.com/google/cadvisor && godep go build .
|
||||
|
102
fs/fs.go
102
fs/fs.go
@ -19,6 +19,8 @@ package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
@ -44,6 +46,8 @@ type partition struct {
|
||||
mountpoint string
|
||||
major uint
|
||||
minor uint
|
||||
fsType string
|
||||
blockSize uint
|
||||
}
|
||||
|
||||
type RealFsInfo struct {
|
||||
@ -57,6 +61,7 @@ type RealFsInfo struct {
|
||||
type Context struct {
|
||||
// docker root directory.
|
||||
DockerRoot string
|
||||
DockerInfo map[string]string
|
||||
}
|
||||
|
||||
func NewFsInfo(context Context) (FsInfo, error) {
|
||||
@ -80,7 +85,25 @@ func NewFsInfo(context Context) (FsInfo, error) {
|
||||
if _, ok := partitions[mount.Source]; ok {
|
||||
continue
|
||||
}
|
||||
partitions[mount.Source] = partition{mount.Mountpoint, uint(mount.Major), uint(mount.Minor)}
|
||||
partitions[mount.Source] = partition{
|
||||
mountpoint: mount.Mountpoint,
|
||||
major: uint(mount.Major),
|
||||
minor: uint(mount.Minor),
|
||||
}
|
||||
}
|
||||
if storageDriver, ok := context.DockerInfo["Driver"]; ok && storageDriver == "devicemapper" {
|
||||
dev, major, minor, blockSize, err := dockerDMDevice(context.DockerInfo["DriverStatus"])
|
||||
if err != nil {
|
||||
glog.Warningf("Could not get Docker devicemapper device: %v", err)
|
||||
} else {
|
||||
partitions[dev] = partition{
|
||||
fsType: "devicemapper",
|
||||
major: major,
|
||||
minor: minor,
|
||||
blockSize: blockSize,
|
||||
}
|
||||
fsInfo.labels[LabelDockerImages] = dev
|
||||
}
|
||||
}
|
||||
glog.Infof("Filesystem partitions: %+v", partitions)
|
||||
fsInfo.partitions = partitions
|
||||
@ -174,9 +197,18 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er
|
||||
_, hasMount := mountSet[partition.mountpoint]
|
||||
_, hasDevice := deviceSet[device]
|
||||
if mountSet == nil || (hasMount && !hasDevice) {
|
||||
total, free, avail, err := getVfsStats(partition.mountpoint)
|
||||
var (
|
||||
total, free, avail uint64
|
||||
err error
|
||||
)
|
||||
switch partition.fsType {
|
||||
case "devicemapper":
|
||||
total, free, avail, err = getDMStats(device, partition.blockSize)
|
||||
default:
|
||||
total, free, avail, err = getVfsStats(partition.mountpoint)
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("Statvfs failed. Error: %v", err)
|
||||
glog.Errorf("Stat fs failed. Error: %v", err)
|
||||
} else {
|
||||
deviceSet[device] = struct{}{}
|
||||
deviceInfo := DeviceInfo{
|
||||
@ -295,3 +327,67 @@ func getVfsStats(path string) (uint64, uint64, uint64, error) {
|
||||
avail := uint64(s.Frsize) * s.Bavail
|
||||
return total, free, avail, nil
|
||||
}
|
||||
|
||||
func dockerStatusValue(status [][]string, target string) string {
|
||||
for _, v := range status {
|
||||
if len(v) == 2 && strings.ToLower(v[0]) == strings.ToLower(target) {
|
||||
return v[1]
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func dockerDMDevice(driverStatus string) (string, uint, uint, uint, error) {
|
||||
var config [][]string
|
||||
err := json.Unmarshal([]byte(driverStatus), &config)
|
||||
if err != nil {
|
||||
return "", 0, 0, 0, err
|
||||
}
|
||||
poolName := dockerStatusValue(config, "Pool Name")
|
||||
if len(poolName) == 0 {
|
||||
return "", 0, 0, 0, fmt.Errorf("Could not get dm pool name")
|
||||
}
|
||||
|
||||
dmTable, err := exec.Command("dmsetup", "table", poolName).Output()
|
||||
if err != nil {
|
||||
return "", 0, 0, 0, err
|
||||
}
|
||||
|
||||
var (
|
||||
major, minor, dataBlkSize, bkt uint
|
||||
bkts string
|
||||
)
|
||||
|
||||
_, err = fmt.Fscanf(bytes.NewReader(dmTable),
|
||||
"%d %d %s %d:%d %d:%d %d %d %d %s",
|
||||
&bkt, &bkt, &bkts, &bkt, &bkt, &major, &minor, &dataBlkSize, &bkt, &bkt, &bkts)
|
||||
if err != nil {
|
||||
return "", 0, 0, 0, err
|
||||
}
|
||||
return poolName, major, minor, dataBlkSize, nil
|
||||
}
|
||||
|
||||
func getDMStats(poolName string, dataBlkSize uint) (uint64, uint64, uint64, error) {
|
||||
dmStatus, err := exec.Command("dmsetup", "status", poolName).Output()
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
var (
|
||||
total, used, bkt uint64
|
||||
bkts string
|
||||
)
|
||||
|
||||
_, err = fmt.Fscanf(bytes.NewReader(dmStatus),
|
||||
"%d %d %s %d %d/%d %d/%d %s %s %s %s",
|
||||
&bkt, &bkt, &bkts, &bkt, &bkt, &bkt, &used, &total, &bkts, &bkts, &bkts, &bkts)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
total *= 512 * uint64(dataBlkSize)
|
||||
used *= 512 * uint64(dataBlkSize)
|
||||
free := total - used
|
||||
|
||||
return total, free, free, nil
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
@ -126,7 +127,11 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
|
||||
}
|
||||
glog.Infof("cAdvisor running in container: %q", selfContainer)
|
||||
|
||||
context := fs.Context{DockerRoot: docker.RootDir()}
|
||||
dockerInfo, err := docker.DockerInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
context := fs.Context{DockerRoot: docker.RootDir(), DockerInfo: dockerInfo}
|
||||
fsInfo, err := fs.NewFsInfo(context)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -1188,19 +1193,13 @@ func (m *manager) DockerInfo() (DockerStatus, error) {
|
||||
out.NumContainers = n
|
||||
}
|
||||
}
|
||||
// cut, trim, cut - Example format:
|
||||
// DriverStatus=[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirperm1 Supported","false"]]
|
||||
if val, ok := info["DriverStatus"]; ok {
|
||||
var driverStatus [][]string
|
||||
err = json.Unmarshal([]byte(val), &driverStatus)
|
||||
out.DriverStatus = make(map[string]string)
|
||||
val = strings.TrimPrefix(val, "[[")
|
||||
val = strings.TrimSuffix(val, "]]")
|
||||
vals := strings.Split(val, "],[")
|
||||
for _, v := range vals {
|
||||
kv := strings.Split(v, "\",\"")
|
||||
if len(kv) != 2 {
|
||||
continue
|
||||
} else {
|
||||
out.DriverStatus[strings.Trim(kv[0], "\"")] = strings.Trim(kv[1], "\"")
|
||||
for _, v := range driverStatus {
|
||||
if len(v) == 2 {
|
||||
out.DriverStatus[v[0]] = v[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user