Merge pull request #135 from kelseyhightower/godep
manage dependencies with godep
This commit is contained in:
commit
7d1987f8b2
@ -5,10 +5,12 @@ before_script:
|
||||
- go get github.com/stretchr/testify/mock
|
||||
- go get github.com/kr/pretty
|
||||
- go get code.google.com/p/go.tools/cmd/vet
|
||||
- go get github.com/kr/godep
|
||||
- wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb
|
||||
- sudo dpkg -i influxdb_latest_amd64.deb
|
||||
- sudo service influxdb start
|
||||
script:
|
||||
- go test -v -race github.com/google/cadvisor/...
|
||||
- go build github.com/google/cadvisor
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
- godep go test -v -race github.com/google/cadvisor/...
|
||||
- godep go build github.com/google/cadvisor
|
||||
- go vet github.com/google/cadvisor
|
||||
|
47
Godeps/Godeps.json
generated
Normal file
47
Godeps/Godeps.json
generated
Normal file
@ -0,0 +1,47 @@
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor",
|
||||
"GoVersion": "go1.3",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/mount",
|
||||
"Comment": "v1.1.1-402-g487a417",
|
||||
"Rev": "487a417d9fd074d0e78876072c7d1ebfd398ea7a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/libcontainer",
|
||||
"Comment": "v1.1.0-115-ge6a43c1",
|
||||
"Rev": "e6a43c1c2b9f769deb96348a0a93417cd48a36d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient",
|
||||
"Comment": "0.2.1-185-g4fc0e0d",
|
||||
"Rev": "4fc0e0dbba85f2f799aa77cea594d40267f5bd5a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/influxdb/influxdb/client",
|
||||
"Comment": "v0.8.0-rc.3-9-g3284662",
|
||||
"Rev": "3284662b350688b651359f9124928856071bd3f5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kr/pretty",
|
||||
"Comment": "go.weekly.2011-12-22-20-g088c856",
|
||||
"Rev": "088c856450c08c03eb32f7a6c221e6eefaa10e6f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kr/text",
|
||||
"Rev": "6807e777504f54ad073ecef66747de158294b639"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/objx",
|
||||
"Rev": "cbeaeb16a013161a98496fad62933b1d21786672"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/assert",
|
||||
"Rev": "ed6ea184117f7ae3168d1c05dbdbde1a86e2be68"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/mock",
|
||||
"Rev": "ed6ea184117f7ae3168d1c05dbdbde1a86e2be68"
|
||||
}
|
||||
]
|
||||
}
|
5
Godeps/Readme
generated
Normal file
5
Godeps/Readme
generated
Normal file
@ -0,0 +1,5 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
/pkg
|
||||
/bin
|
1
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/MAINTAINERS
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/MAINTAINERS
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
|
62
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go
generated
vendored
Normal file
62
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
package mount
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Parse fstab type mount options into mount() flags
|
||||
// and device specific data
|
||||
func parseOptions(options string) (int, string) {
|
||||
var (
|
||||
flag int
|
||||
data []string
|
||||
)
|
||||
|
||||
flags := map[string]struct {
|
||||
clear bool
|
||||
flag int
|
||||
}{
|
||||
"defaults": {false, 0},
|
||||
"ro": {false, RDONLY},
|
||||
"rw": {true, RDONLY},
|
||||
"suid": {true, NOSUID},
|
||||
"nosuid": {false, NOSUID},
|
||||
"dev": {true, NODEV},
|
||||
"nodev": {false, NODEV},
|
||||
"exec": {true, NOEXEC},
|
||||
"noexec": {false, NOEXEC},
|
||||
"sync": {false, SYNCHRONOUS},
|
||||
"async": {true, SYNCHRONOUS},
|
||||
"dirsync": {false, DIRSYNC},
|
||||
"remount": {false, REMOUNT},
|
||||
"mand": {false, MANDLOCK},
|
||||
"nomand": {true, MANDLOCK},
|
||||
"atime": {true, NOATIME},
|
||||
"noatime": {false, NOATIME},
|
||||
"diratime": {true, NODIRATIME},
|
||||
"nodiratime": {false, NODIRATIME},
|
||||
"bind": {false, BIND},
|
||||
"rbind": {false, RBIND},
|
||||
"private": {false, PRIVATE},
|
||||
"relatime": {false, RELATIME},
|
||||
"norelatime": {true, RELATIME},
|
||||
"strictatime": {false, STRICTATIME},
|
||||
"nostrictatime": {true, STRICTATIME},
|
||||
}
|
||||
|
||||
for _, o := range strings.Split(options, ",") {
|
||||
// If the option does not exist in the flags table or the flag
|
||||
// is not supported on the platform,
|
||||
// then it is a data value for a specific fs type
|
||||
if f, exists := flags[o]; exists && f.flag != 0 {
|
||||
if f.clear {
|
||||
flag &= ^f.flag
|
||||
} else {
|
||||
flag |= f.flag
|
||||
}
|
||||
} else {
|
||||
data = append(data, o)
|
||||
}
|
||||
}
|
||||
return flag, strings.Join(data, ",")
|
||||
}
|
28
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go
generated
vendored
Normal file
28
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
// +build freebsd,cgo
|
||||
|
||||
package mount
|
||||
|
||||
/*
|
||||
#include <sys/mount.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
const (
|
||||
RDONLY = C.MNT_RDONLY
|
||||
NOSUID = C.MNT_NOSUID
|
||||
NOEXEC = C.MNT_NOEXEC
|
||||
SYNCHRONOUS = C.MNT_SYNCHRONOUS
|
||||
NOATIME = C.MNT_NOATIME
|
||||
|
||||
BIND = 0
|
||||
DIRSYNC = 0
|
||||
MANDLOCK = 0
|
||||
NODEV = 0
|
||||
NODIRATIME = 0
|
||||
PRIVATE = 0
|
||||
RBIND = 0
|
||||
RELATIVE = 0
|
||||
RELATIME = 0
|
||||
REMOUNT = 0
|
||||
STRICTATIME = 0
|
||||
)
|
25
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go
generated
vendored
Normal file
25
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
// +build amd64
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
RDONLY = syscall.MS_RDONLY
|
||||
NOSUID = syscall.MS_NOSUID
|
||||
NODEV = syscall.MS_NODEV
|
||||
NOEXEC = syscall.MS_NOEXEC
|
||||
SYNCHRONOUS = syscall.MS_SYNCHRONOUS
|
||||
DIRSYNC = syscall.MS_DIRSYNC
|
||||
REMOUNT = syscall.MS_REMOUNT
|
||||
MANDLOCK = syscall.MS_MANDLOCK
|
||||
NOATIME = syscall.MS_NOATIME
|
||||
NODIRATIME = syscall.MS_NODIRATIME
|
||||
BIND = syscall.MS_BIND
|
||||
RBIND = syscall.MS_BIND | syscall.MS_REC
|
||||
PRIVATE = syscall.MS_PRIVATE
|
||||
RELATIME = syscall.MS_RELATIME
|
||||
STRICTATIME = syscall.MS_STRICTATIME
|
||||
)
|
22
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go
generated
vendored
Normal file
22
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
// +build !linux,!freebsd linux,!amd64 freebsd,!cgo
|
||||
|
||||
package mount
|
||||
|
||||
const (
|
||||
BIND = 0
|
||||
DIRSYNC = 0
|
||||
MANDLOCK = 0
|
||||
NOATIME = 0
|
||||
NODEV = 0
|
||||
NODIRATIME = 0
|
||||
NOEXEC = 0
|
||||
NOSUID = 0
|
||||
PRIVATE = 0
|
||||
RBIND = 0
|
||||
RELATIME = 0
|
||||
RELATIVE = 0
|
||||
REMOUNT = 0
|
||||
STRICTATIME = 0
|
||||
SYNCHRONOUS = 0
|
||||
RDONLY = 0
|
||||
)
|
70
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go
generated
vendored
Normal file
70
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
package mount
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func GetMounts() ([]*MountInfo, error) {
|
||||
return parseMountTable()
|
||||
}
|
||||
|
||||
// Looks at /proc/self/mountinfo to determine of the specified
|
||||
// mountpoint has been mounted
|
||||
func Mounted(mountpoint string) (bool, error) {
|
||||
entries, err := parseMountTable()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Search the table for the mountpoint
|
||||
for _, e := range entries {
|
||||
if e.Mountpoint == mountpoint {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Mount the specified options at the target path only if
|
||||
// the target is not mounted
|
||||
// Options must be specified as fstab style
|
||||
func Mount(device, target, mType, options string) error {
|
||||
flag, _ := parseOptions(options)
|
||||
if flag&REMOUNT != REMOUNT {
|
||||
if mounted, err := Mounted(target); err != nil || mounted {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ForceMount(device, target, mType, options)
|
||||
}
|
||||
|
||||
// Mount the specified options at the target path
|
||||
// reguardless if the target is mounted or not
|
||||
// Options must be specified as fstab style
|
||||
func ForceMount(device, target, mType, options string) error {
|
||||
flag, data := parseOptions(options)
|
||||
if err := mount(device, target, mType, uintptr(flag), data); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmount the target only if it is mounted
|
||||
func Unmount(target string) error {
|
||||
if mounted, err := Mounted(target); err != nil || !mounted {
|
||||
return err
|
||||
}
|
||||
return ForceUnmount(target)
|
||||
}
|
||||
|
||||
// Unmount the target reguardless if it is mounted or not
|
||||
func ForceUnmount(target string) (err error) {
|
||||
// Simple retry logic for unmount
|
||||
for i := 0; i < 10; i++ {
|
||||
if err = unmount(target, 0); err == nil {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return
|
||||
}
|
137
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount_test.go
generated
vendored
Normal file
137
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount_test.go
generated
vendored
Normal file
@ -0,0 +1,137 @@
|
||||
package mount
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMountOptionsParsing(t *testing.T) {
|
||||
options := "noatime,ro,size=10k"
|
||||
|
||||
flag, data := parseOptions(options)
|
||||
|
||||
if data != "size=10k" {
|
||||
t.Fatalf("Expected size=10 got %s", data)
|
||||
}
|
||||
|
||||
expectedFlag := NOATIME | RDONLY
|
||||
|
||||
if flag != expectedFlag {
|
||||
t.Fatalf("Expected %d got %d", expectedFlag, flag)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMounted(t *testing.T) {
|
||||
tmp := path.Join(os.TempDir(), "mount-tests")
|
||||
if err := os.MkdirAll(tmp, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
var (
|
||||
sourceDir = path.Join(tmp, "source")
|
||||
targetDir = path.Join(tmp, "target")
|
||||
sourcePath = path.Join(sourceDir, "file.txt")
|
||||
targetPath = path.Join(targetDir, "file.txt")
|
||||
)
|
||||
|
||||
os.Mkdir(sourceDir, 0777)
|
||||
os.Mkdir(targetDir, 0777)
|
||||
|
||||
f, err := os.Create(sourcePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.WriteString("hello")
|
||||
f.Close()
|
||||
|
||||
f, err = os.Create(targetPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(targetDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
mounted, err := Mounted(targetDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !mounted {
|
||||
t.Fatalf("Expected %s to be mounted", targetDir)
|
||||
}
|
||||
if _, err := os.Stat(targetDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMountReadonly(t *testing.T) {
|
||||
tmp := path.Join(os.TempDir(), "mount-tests")
|
||||
if err := os.MkdirAll(tmp, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
var (
|
||||
sourceDir = path.Join(tmp, "source")
|
||||
targetDir = path.Join(tmp, "target")
|
||||
sourcePath = path.Join(sourceDir, "file.txt")
|
||||
targetPath = path.Join(targetDir, "file.txt")
|
||||
)
|
||||
|
||||
os.Mkdir(sourceDir, 0777)
|
||||
os.Mkdir(targetDir, 0777)
|
||||
|
||||
f, err := os.Create(sourcePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.WriteString("hello")
|
||||
f.Close()
|
||||
|
||||
f, err = os.Create(targetPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(targetDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
f, err = os.OpenFile(targetPath, os.O_RDWR, 0777)
|
||||
if err == nil {
|
||||
t.Fatal("Should not be able to open a ro file as rw")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMounts(t *testing.T) {
|
||||
mounts, err := GetMounts()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
root := false
|
||||
for _, entry := range mounts {
|
||||
if entry.Mountpoint == "/" {
|
||||
root = true
|
||||
}
|
||||
}
|
||||
|
||||
if !root {
|
||||
t.Fatal("/ should be mounted at least")
|
||||
}
|
||||
}
|
59
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go
generated
vendored
Normal file
59
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
package mount
|
||||
|
||||
/*
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/_iovec.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/param.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func allocateIOVecs(options []string) []C.struct_iovec {
|
||||
out := make([]C.struct_iovec, len(options))
|
||||
for i, option := range options {
|
||||
out[i].iov_base = unsafe.Pointer(C.CString(option))
|
||||
out[i].iov_len = C.size_t(len(option) + 1)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||
isNullFS := false
|
||||
|
||||
xs := strings.Split(data, ",")
|
||||
for _, x := range xs {
|
||||
if x == "bind" {
|
||||
isNullFS = true
|
||||
}
|
||||
}
|
||||
|
||||
options := []string{"fspath", target}
|
||||
if isNullFS {
|
||||
options = append(options, "fstype", "nullfs", "target", device)
|
||||
} else {
|
||||
options = append(options, "fstype", mType, "from", device)
|
||||
}
|
||||
rawOptions := allocateIOVecs(options)
|
||||
for _, rawOption := range rawOptions {
|
||||
defer C.free(rawOption.iov_base)
|
||||
}
|
||||
|
||||
if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
|
||||
reason := C.GoString(C.strerror(*C.__error()))
|
||||
return fmt.Errorf("Failed to call nmount: %s", reason)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmount(target string, flag int) error {
|
||||
return syscall.Unmount(target, flag)
|
||||
}
|
23
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go
generated
vendored
Normal file
23
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// +build amd64
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||
if err := syscall.Mount(device, target, mType, flag, data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we have a bind mount or remount, remount...
|
||||
if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY {
|
||||
return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmount(target string, flag int) error {
|
||||
return syscall.Unmount(target, flag)
|
||||
}
|
11
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go
generated
vendored
Normal file
11
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// +build !linux,!freebsd linux,!amd64 freebsd,!cgo
|
||||
|
||||
package mount
|
||||
|
||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func unmount(target string, flag int) error {
|
||||
panic("Not implemented")
|
||||
}
|
7
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go
generated
vendored
Normal file
7
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
package mount
|
||||
|
||||
type MountInfo struct {
|
||||
Id, Parent, Major, Minor int
|
||||
Root, Mountpoint, Opts string
|
||||
Fstype, Source, VfsOpts string
|
||||
}
|
38
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
generated
vendored
Normal file
38
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
package mount
|
||||
|
||||
/*
|
||||
#include <sys/param.h>
|
||||
#include <sys/ucred.h>
|
||||
#include <sys/mount.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts
|
||||
func parseMountTable() ([]*MountInfo, error) {
|
||||
var rawEntries *C.struct_statfs
|
||||
|
||||
count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
|
||||
if count == 0 {
|
||||
return nil, fmt.Errorf("Failed to call getmntinfo")
|
||||
}
|
||||
|
||||
var entries []C.struct_statfs
|
||||
header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
|
||||
header.Cap = count
|
||||
header.Len = count
|
||||
header.Data = uintptr(unsafe.Pointer(rawEntries))
|
||||
|
||||
var out []*MountInfo
|
||||
for _, entry := range entries {
|
||||
var mountinfo MountInfo
|
||||
mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
|
||||
out = append(out, &mountinfo)
|
||||
}
|
||||
return out, nil
|
||||
}
|
73
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go
generated
vendored
Normal file
73
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
package mount
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
/* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
|
||||
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
|
||||
|
||||
(1) mount ID: unique identifier of the mount (may be reused after umount)
|
||||
(2) parent ID: ID of parent (or of self for the top of the mount tree)
|
||||
(3) major:minor: value of st_dev for files on filesystem
|
||||
(4) root: root of the mount within the filesystem
|
||||
(5) mount point: mount point relative to the process's root
|
||||
(6) mount options: per mount options
|
||||
(7) optional fields: zero or more fields of the form "tag[:value]"
|
||||
(8) separator: marks the end of the optional fields
|
||||
(9) filesystem type: name of filesystem of the form "type[.subtype]"
|
||||
(10) mount source: filesystem specific information or "none"
|
||||
(11) super options: per super block options*/
|
||||
mountinfoFormat = "%d %d %d:%d %s %s %s "
|
||||
)
|
||||
|
||||
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts
|
||||
func parseMountTable() ([]*MountInfo, error) {
|
||||
f, err := os.Open("/proc/self/mountinfo")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseInfoFile(f)
|
||||
}
|
||||
|
||||
func parseInfoFile(r io.Reader) ([]*MountInfo, error) {
|
||||
var (
|
||||
s = bufio.NewScanner(r)
|
||||
out = []*MountInfo{}
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
p = &MountInfo{}
|
||||
text = s.Text()
|
||||
)
|
||||
|
||||
if _, err := fmt.Sscanf(text, mountinfoFormat,
|
||||
&p.Id, &p.Parent, &p.Major, &p.Minor,
|
||||
&p.Root, &p.Mountpoint, &p.Opts); err != nil {
|
||||
return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
|
||||
}
|
||||
// Safe as mountinfo encodes mountpoints with spaces as \040.
|
||||
index := strings.Index(text, " - ")
|
||||
postSeparatorFields := strings.Fields(text[index+3:])
|
||||
if len(postSeparatorFields) != 3 {
|
||||
return nil, fmt.Errorf("Error did not find 3 fields post '-' in '%s'", text)
|
||||
}
|
||||
p.Fstype = postSeparatorFields[0]
|
||||
p.Source = postSeparatorFields[1]
|
||||
p.VfsOpts = postSeparatorFields[2]
|
||||
out = append(out, p)
|
||||
}
|
||||
return out, nil
|
||||
}
|
445
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_test_linux.go
generated
vendored
Normal file
445
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_test_linux.go
generated
vendored
Normal file
@ -0,0 +1,445 @@
|
||||
package mount
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw
|
||||
16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel
|
||||
17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755
|
||||
18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw
|
||||
19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw
|
||||
20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel
|
||||
21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000
|
||||
22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755
|
||||
23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755
|
||||
24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd
|
||||
25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw
|
||||
26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children
|
||||
27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children
|
||||
28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children
|
||||
29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children
|
||||
30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children
|
||||
31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children
|
||||
32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children
|
||||
33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children
|
||||
34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children
|
||||
35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered
|
||||
36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct
|
||||
37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel
|
||||
38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel
|
||||
39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel
|
||||
40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw
|
||||
41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw
|
||||
42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw
|
||||
43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw
|
||||
45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered
|
||||
46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered
|
||||
47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered
|
||||
48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered
|
||||
121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000
|
||||
124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw
|
||||
165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered
|
||||
167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered
|
||||
171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered
|
||||
175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered
|
||||
179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered
|
||||
183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered
|
||||
187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered
|
||||
191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered
|
||||
195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered
|
||||
199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered
|
||||
203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered
|
||||
207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered
|
||||
211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered
|
||||
215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered
|
||||
219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered
|
||||
223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered
|
||||
227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered
|
||||
231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered
|
||||
235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered
|
||||
239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered
|
||||
243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered
|
||||
247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered`
|
||||
|
||||
ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
|
||||
16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
|
||||
17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755
|
||||
18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
|
||||
19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755
|
||||
20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered
|
||||
21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755
|
||||
22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw
|
||||
23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw
|
||||
24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw
|
||||
25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k
|
||||
26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children
|
||||
27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw
|
||||
28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu
|
||||
29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755
|
||||
30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw
|
||||
31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct
|
||||
32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory
|
||||
33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices
|
||||
34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer
|
||||
35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio
|
||||
36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event
|
||||
37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb
|
||||
38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd
|
||||
39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525
|
||||
40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525
|
||||
41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525
|
||||
42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525
|
||||
43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525
|
||||
44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525
|
||||
45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525
|
||||
46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525
|
||||
47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525
|
||||
48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525
|
||||
49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525
|
||||
50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525
|
||||
51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525
|
||||
52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525
|
||||
53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525
|
||||
54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525
|
||||
55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525
|
||||
56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525
|
||||
57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525
|
||||
58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525
|
||||
59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525
|
||||
60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525
|
||||
61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525
|
||||
62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525
|
||||
63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525
|
||||
64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525
|
||||
65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525
|
||||
66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525
|
||||
67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525
|
||||
68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525
|
||||
69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525
|
||||
70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525
|
||||
71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525
|
||||
72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525
|
||||
73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525
|
||||
74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525
|
||||
75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525
|
||||
76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525
|
||||
77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525
|
||||
78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525
|
||||
79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525
|
||||
80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525
|
||||
81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525
|
||||
82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525
|
||||
83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525
|
||||
84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525
|
||||
85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525
|
||||
86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525
|
||||
87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525
|
||||
88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525
|
||||
89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525
|
||||
90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525
|
||||
91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525
|
||||
92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525
|
||||
93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525
|
||||
94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525
|
||||
95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525
|
||||
96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525
|
||||
97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525
|
||||
98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525
|
||||
99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525
|
||||
100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525
|
||||
101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525
|
||||
102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525
|
||||
103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525
|
||||
104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525
|
||||
105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525
|
||||
106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525
|
||||
107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525
|
||||
108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525
|
||||
109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525
|
||||
110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525
|
||||
111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525
|
||||
112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525
|
||||
113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525
|
||||
114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525
|
||||
115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525
|
||||
116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525
|
||||
117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525
|
||||
118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525
|
||||
119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525
|
||||
120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525
|
||||
121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525
|
||||
122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525
|
||||
123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525
|
||||
124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525
|
||||
125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525
|
||||
126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525
|
||||
127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525
|
||||
128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525
|
||||
129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525
|
||||
130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525
|
||||
131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525
|
||||
132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525
|
||||
133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525
|
||||
134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525
|
||||
135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525
|
||||
136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525
|
||||
137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525
|
||||
138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525
|
||||
139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525
|
||||
140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525
|
||||
141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525
|
||||
142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525
|
||||
143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525
|
||||
144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525`
|
||||
|
||||
gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
|
||||
17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755
|
||||
18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755
|
||||
19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw
|
||||
20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
|
||||
21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw
|
||||
22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
|
||||
23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw
|
||||
24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755
|
||||
25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc
|
||||
26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children
|
||||
27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children
|
||||
28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children
|
||||
29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children
|
||||
30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children
|
||||
31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children
|
||||
32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children
|
||||
33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro
|
||||
34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota
|
||||
35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw
|
||||
36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw
|
||||
42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw
|
||||
43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw
|
||||
44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000
|
||||
68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c
|
||||
85 68 8:6 /var/lib/docker/init/dockerinit-0.7.2-dev//deleted /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerinit rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c
|
||||
39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c
|
||||
40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c
|
||||
41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c
|
||||
45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c
|
||||
46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c
|
||||
47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c
|
||||
48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c
|
||||
49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c
|
||||
50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c
|
||||
51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c
|
||||
52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c
|
||||
53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c
|
||||
54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c
|
||||
55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c
|
||||
56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c
|
||||
57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c
|
||||
59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c
|
||||
60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c
|
||||
61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c
|
||||
62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c
|
||||
63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c
|
||||
64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c
|
||||
65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c
|
||||
66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c
|
||||
70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c
|
||||
71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c
|
||||
72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c
|
||||
73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c
|
||||
76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c
|
||||
77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c
|
||||
78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c
|
||||
79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c
|
||||
80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c
|
||||
81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c
|
||||
82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c
|
||||
83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c
|
||||
84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c
|
||||
94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c
|
||||
95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c
|
||||
96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c
|
||||
97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c
|
||||
98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c
|
||||
102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c
|
||||
103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c
|
||||
104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c
|
||||
105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c
|
||||
106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c
|
||||
107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c
|
||||
108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c
|
||||
109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c
|
||||
110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c
|
||||
111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c
|
||||
112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c
|
||||
113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c
|
||||
114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c
|
||||
117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c
|
||||
118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c
|
||||
119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c
|
||||
120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c
|
||||
121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c
|
||||
122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c
|
||||
123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c
|
||||
126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c
|
||||
127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c
|
||||
128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c
|
||||
130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c
|
||||
131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c
|
||||
132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c
|
||||
133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c
|
||||
134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c
|
||||
135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c
|
||||
136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c
|
||||
137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c
|
||||
138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c
|
||||
139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c
|
||||
140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c
|
||||
141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c
|
||||
142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c
|
||||
143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c
|
||||
144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c
|
||||
147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c
|
||||
150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c
|
||||
151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c
|
||||
152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c
|
||||
153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c
|
||||
154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c
|
||||
155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c
|
||||
156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c
|
||||
157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c
|
||||
158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c
|
||||
159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c
|
||||
160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c
|
||||
162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c
|
||||
163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c
|
||||
164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c
|
||||
165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c
|
||||
166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c
|
||||
167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c
|
||||
168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c
|
||||
169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c
|
||||
170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c
|
||||
171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c
|
||||
172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c
|
||||
173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c
|
||||
174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c
|
||||
184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c
|
||||
187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c
|
||||
188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c
|
||||
189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c
|
||||
190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c
|
||||
191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c
|
||||
192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c
|
||||
193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c
|
||||
194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c
|
||||
195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c
|
||||
196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c
|
||||
197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c
|
||||
198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c
|
||||
199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c
|
||||
200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c
|
||||
201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c
|
||||
202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c
|
||||
203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c
|
||||
204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c
|
||||
205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c
|
||||
206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c
|
||||
207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c
|
||||
208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c
|
||||
209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c
|
||||
210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c
|
||||
211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c
|
||||
212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c
|
||||
213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c
|
||||
214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c
|
||||
215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c
|
||||
216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c
|
||||
217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c
|
||||
218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c
|
||||
219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c
|
||||
220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c
|
||||
221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c
|
||||
222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c
|
||||
223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c
|
||||
224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c
|
||||
225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c
|
||||
226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c
|
||||
227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c
|
||||
228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c
|
||||
229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c
|
||||
230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c
|
||||
231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c
|
||||
232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c
|
||||
233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c
|
||||
234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c
|
||||
235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c
|
||||
237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c
|
||||
238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c
|
||||
239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c
|
||||
240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c
|
||||
241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c
|
||||
242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c
|
||||
243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c
|
||||
244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c
|
||||
245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c
|
||||
246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c
|
||||
247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c
|
||||
249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c
|
||||
250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c
|
||||
251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c
|
||||
252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c
|
||||
253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c
|
||||
254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c
|
||||
255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c
|
||||
256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c
|
||||
257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c
|
||||
259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c
|
||||
260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c
|
||||
261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c
|
||||
262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c
|
||||
263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c
|
||||
264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c
|
||||
58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c
|
||||
67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c
|
||||
265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c
|
||||
270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c
|
||||
273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c
|
||||
278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c
|
||||
281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c
|
||||
286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c
|
||||
289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c
|
||||
99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096`
|
||||
)
|
||||
|
||||
func TestParseFedoraMountinfo(t *testing.T) {
|
||||
r := bytes.NewBuffer([]byte(fedoraMountinfo))
|
||||
_, err := parseInfoFile(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseUbuntuMountinfo(t *testing.T) {
|
||||
r := bytes.NewBuffer([]byte(ubuntuMountInfo))
|
||||
_, err := parseInfoFile(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseGentooMountinfo(t *testing.T) {
|
||||
r := bytes.NewBuffer([]byte(gentooMountinfo))
|
||||
_, err := parseInfoFile(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
12
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
generated
vendored
Normal file
12
Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// +build !linux,!freebsd freebsd,!cgo
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func parseMountTable() ([]*MountInfo, error) {
|
||||
return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
34
Godeps/_workspace/src/github.com/docker/libcontainer/.travis.yml
generated
vendored
Normal file
34
Godeps/_workspace/src/github.com/docker/libcontainer/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
language: go
|
||||
go: 1.3
|
||||
|
||||
# let us have pretty experimental Docker-based Travis workers
|
||||
sudo: false
|
||||
|
||||
env:
|
||||
- TRAVIS_GLOBAL_WTF=1
|
||||
- _GOOS=linux _GOARCH=amd64 CGO_ENABLED=1
|
||||
- _GOOS=linux _GOARCH=amd64 CGO_ENABLED=0
|
||||
# - _GOOS=linux _GOARCH=386 CGO_ENABLED=1 # TODO add this once Travis can handle it (https://github.com/travis-ci/travis-ci/issues/2207#issuecomment-49625061)
|
||||
- _GOOS=linux _GOARCH=386 CGO_ENABLED=0
|
||||
- _GOOS=linux _GOARCH=arm CGO_ENABLED=0
|
||||
|
||||
install:
|
||||
- mkdir -pv "${GOPATH%%:*}/src/github.com/docker" && [ -d "${GOPATH%%:*}/src/github.com/docker/libcontainer" ] || ln -sv "$(readlink -f .)" "${GOPATH%%:*}/src/github.com/docker/libcontainer"
|
||||
- if [ -z "$TRAVIS_GLOBAL_WTF" ]; then
|
||||
gvm cross "$_GOOS" "$_GOARCH";
|
||||
export GOOS="$_GOOS" GOARCH="$_GOARCH";
|
||||
fi
|
||||
- if [ -z "$TRAVIS_GLOBAL_WTF" ]; then go env; fi
|
||||
- go get -d -v ./...
|
||||
- if [ "$TRAVIS_GLOBAL_WTF" ]; then
|
||||
export DOCKER_PATH="${GOPATH%%:*}/src/github.com/dotcloud/docker";
|
||||
mkdir -p "$DOCKER_PATH/hack/make";
|
||||
( cd "$DOCKER_PATH/hack/make" && wget -c 'https://raw.githubusercontent.com/dotcloud/docker/master/hack/make/'{.validate,validate-dco,validate-gofmt} );
|
||||
sed -i 's!dotcloud/docker!docker/libcontainer!' "$DOCKER_PATH/hack/make/.validate";
|
||||
fi
|
||||
|
||||
script:
|
||||
- if [ "$TRAVIS_GLOBAL_WTF" ]; then bash "$DOCKER_PATH/hack/make/validate-dco"; fi
|
||||
- if [ "$TRAVIS_GLOBAL_WTF" ]; then bash "$DOCKER_PATH/hack/make/validate-gofmt"; fi
|
||||
- if [ -z "$TRAVIS_GLOBAL_WTF" ]; then go build -v ./...; fi
|
||||
- if [ -z "$TRAVIS_GLOBAL_WTF" -a "$GOARCH" != 'arm' ]; then go test -test.short -v ./...; fi
|
204
Godeps/_workspace/src/github.com/docker/libcontainer/CONTRIBUTORS_GUIDE.md
generated
vendored
Normal file
204
Godeps/_workspace/src/github.com/docker/libcontainer/CONTRIBUTORS_GUIDE.md
generated
vendored
Normal file
@ -0,0 +1,204 @@
|
||||
# The libcontainer Contributors' Guide
|
||||
|
||||
Want to hack on libcontainer? Awesome! Here are instructions to get you
|
||||
started. They are probably not perfect, please let us know if anything
|
||||
feels wrong or incomplete.
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
When reporting [issues](https://github.com/docker/libcontainer/issues)
|
||||
on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc),
|
||||
the output of `uname -a`. Please include the steps required to reproduce
|
||||
the problem if possible and applicable.
|
||||
This information will help us review and fix your issue faster.
|
||||
|
||||
## Development Environment
|
||||
|
||||
*Add instructions on setting up the development environment.*
|
||||
|
||||
## Contribution Guidelines
|
||||
|
||||
### Pull requests are always welcome
|
||||
|
||||
We are always thrilled to receive pull requests, and do our best to
|
||||
process them as fast as possible. Not sure if that typo is worth a pull
|
||||
request? Do it! We will appreciate it.
|
||||
|
||||
If your pull request is not accepted on the first try, don't be
|
||||
discouraged! If there's a problem with the implementation, hopefully you
|
||||
received feedback on what to improve.
|
||||
|
||||
We're trying very hard to keep libcontainer lean and focused. We don't want it
|
||||
to do everything for everybody. This means that we might decide against
|
||||
incorporating a new feature. However, there might be a way to implement
|
||||
that feature *on top of* libcontainer.
|
||||
|
||||
### Discuss your design on the mailing list
|
||||
|
||||
We recommend discussing your plans [on the mailing
|
||||
list](https://groups.google.com/forum/?fromgroups#!forum/libcontainer)
|
||||
before starting to code - especially for more ambitious contributions.
|
||||
This gives other contributors a chance to point you in the right
|
||||
direction, give feedback on your design, and maybe point out if someone
|
||||
else is working on the same thing.
|
||||
|
||||
### Create issues...
|
||||
|
||||
Any significant improvement should be documented as [a GitHub
|
||||
issue](https://github.com/docker/libcontainer/issues) before anybody
|
||||
starts working on it.
|
||||
|
||||
### ...but check for existing issues first!
|
||||
|
||||
Please take a moment to check that an issue doesn't already exist
|
||||
documenting your bug report or improvement proposal. If it does, it
|
||||
never hurts to add a quick "+1" or "I have this problem too". This will
|
||||
help prioritize the most common problems and requests.
|
||||
|
||||
### Conventions
|
||||
|
||||
Fork the repo and make changes on your fork in a feature branch:
|
||||
|
||||
- If it's a bugfix branch, name it XXX-something where XXX is the number of the
|
||||
issue
|
||||
- If it's a feature branch, create an enhancement issue to announce your
|
||||
intentions, and name it XXX-something where XXX is the number of the issue.
|
||||
|
||||
Submit unit tests for your changes. Go has a great test framework built in; use
|
||||
it! Take a look at existing tests for inspiration. Run the full test suite on
|
||||
your branch before submitting a pull request.
|
||||
|
||||
Update the documentation when creating or modifying features. Test
|
||||
your documentation changes for clarity, concision, and correctness, as
|
||||
well as a clean documentation build. See ``docs/README.md`` for more
|
||||
information on building the docs and how docs get released.
|
||||
|
||||
Write clean code. Universally formatted code promotes ease of writing, reading,
|
||||
and maintenance. Always run `gofmt -s -w file.go` on each changed file before
|
||||
committing your changes. Most editors have plugins that do this automatically.
|
||||
|
||||
Pull requests descriptions should be as clear as possible and include a
|
||||
reference to all the issues that they address.
|
||||
|
||||
Pull requests must not contain commits from other users or branches.
|
||||
|
||||
Commit messages must start with a capitalized and short summary (max. 50
|
||||
chars) written in the imperative, followed by an optional, more detailed
|
||||
explanatory text which is separated from the summary by an empty line.
|
||||
|
||||
Code review comments may be added to your pull request. Discuss, then make the
|
||||
suggested modifications and push additional commits to your feature branch. Be
|
||||
sure to post a comment after pushing. The new commits will show up in the pull
|
||||
request automatically, but the reviewers will not be notified unless you
|
||||
comment.
|
||||
|
||||
Before the pull request is merged, make sure that you squash your commits into
|
||||
logical units of work using `git rebase -i` and `git push -f`. After every
|
||||
commit the test suite should be passing. Include documentation changes in the
|
||||
same commit so that a revert would remove all traces of the feature or fix.
|
||||
|
||||
Commits that fix or close an issue should include a reference like `Closes #XXX`
|
||||
or `Fixes #XXX`, which will automatically close the issue when merged.
|
||||
|
||||
### Testing
|
||||
|
||||
Make sure you include suitable tests, preferably unit tests, in your pull request
|
||||
and that all the tests pass.
|
||||
|
||||
*Instructions for running tests to be added.*
|
||||
|
||||
### Merge approval
|
||||
|
||||
libcontainer maintainers use LGTM (looks good to me) in comments on the code review
|
||||
to indicate acceptance.
|
||||
|
||||
A change requires LGTMs from at lease two maintainers. One of those must come from
|
||||
a maintainer of the component affected. For example, if a change affects `netlink/`
|
||||
and `security`, it needs at least one LGTM from a maintainer of each. Maintainers
|
||||
only need one LGTM as presumably they LGTM their own change.
|
||||
|
||||
For more details see [MAINTAINERS.md](MAINTAINERS.md)
|
||||
|
||||
### Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the
|
||||
patch, which certifies that you wrote it or otherwise have the right to
|
||||
pass it on as an open-source patch. The rules are pretty simple: if you
|
||||
can certify the below (from
|
||||
[developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
then you just add a line to every git commit message:
|
||||
|
||||
Docker-DCO-1.1-Signed-off-by: Joe Smith <joe.smith@email.com> (github: github_handle)
|
||||
|
||||
using your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
One way to automate this, is customise your get ``commit.template`` by adding
|
||||
a ``prepare-commit-msg`` hook to your libcontainer checkout:
|
||||
|
||||
```
|
||||
curl -o .git/hooks/prepare-commit-msg https://raw.githubusercontent.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg
|
||||
```
|
||||
|
||||
* Note: the above script expects to find your GitHub user name in ``git config --get github.user``
|
||||
|
||||
#### Small patch exception
|
||||
|
||||
There are several exceptions to the signing requirement. Currently these are:
|
||||
|
||||
* Your patch fixes spelling or grammar errors.
|
||||
* Your patch is a single line change to documentation contained in the
|
||||
`docs` directory.
|
||||
* Your patch fixes Markdown formatting or syntax errors in the
|
||||
documentation contained in the `docs` directory.
|
||||
|
||||
If you have any questions, please refer to the FAQ in the [docs](to be written)
|
||||
|
||||
### How can I become a maintainer?
|
||||
|
||||
* Step 1: learn the component inside out
|
||||
* Step 2: make yourself useful by contributing code, bugfixes, support etc.
|
||||
* Step 3: volunteer on the irc channel (#libcontainer@freenode)
|
||||
|
||||
Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available.
|
||||
You don't have to be a maintainer to make a difference on the project!
|
||||
|
21
Godeps/_workspace/src/github.com/docker/libcontainer/Dockerfile
generated
vendored
Normal file
21
Godeps/_workspace/src/github.com/docker/libcontainer/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
FROM crosbymichael/golang
|
||||
|
||||
RUN apt-get update && apt-get install -y gcc
|
||||
RUN go get code.google.com/p/go.tools/cmd/cover
|
||||
|
||||
# setup a playground for us to spawn containers in
|
||||
RUN mkdir /busybox && \
|
||||
curl -sSL 'https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.02/rootfs.tar' | tar -xC /busybox
|
||||
|
||||
RUN curl -sSL https://raw.githubusercontent.com/docker/docker/master/hack/dind -o /dind && \
|
||||
chmod +x /dind
|
||||
|
||||
COPY . /go/src/github.com/docker/libcontainer
|
||||
WORKDIR /go/src/github.com/docker/libcontainer
|
||||
RUN cp sample_configs/minimal.json /busybox/container.json
|
||||
|
||||
RUN go get -d -v ./...
|
||||
RUN go install -v ./...
|
||||
|
||||
ENTRYPOINT ["/dind"]
|
||||
CMD ["go", "test", "-cover", "./..."]
|
191
Godeps/_workspace/src/github.com/docker/libcontainer/LICENSE
generated
vendored
Normal file
191
Godeps/_workspace/src/github.com/docker/libcontainer/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2014 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
4
Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS
generated
vendored
Normal file
4
Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
Michael Crosby <michael@docker.com> (@crosbymichael)
|
||||
Rohit Jnagal <jnagal@google.com> (@rjnagal)
|
||||
Victor Marmol <vmarmol@google.com> (@vmarmol)
|
||||
.travis.yml: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
99
Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS_GUIDE.md
generated
vendored
Normal file
99
Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS_GUIDE.md
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
# The libcontainer Maintainers' Guide
|
||||
|
||||
## Introduction
|
||||
|
||||
Dear maintainer. Thank you for investing the time and energy to help
|
||||
make libcontainer as useful as possible. Maintaining a project is difficult,
|
||||
sometimes unrewarding work. Sure, you will get to contribute cool
|
||||
features to the project. But most of your time will be spent reviewing,
|
||||
cleaning up, documenting, answering questions, justifying design
|
||||
decisions - while everyone has all the fun! But remember - the quality
|
||||
of the maintainers work is what distinguishes the good projects from the
|
||||
great. So please be proud of your work, even the unglamourous parts,
|
||||
and encourage a culture of appreciation and respect for *every* aspect
|
||||
of improving the project - not just the hot new features.
|
||||
|
||||
This document is a manual for maintainers old and new. It explains what
|
||||
is expected of maintainers, how they should work, and what tools are
|
||||
available to them.
|
||||
|
||||
This is a living document - if you see something out of date or missing,
|
||||
speak up!
|
||||
|
||||
## What are a maintainer's responsibility?
|
||||
|
||||
It is every maintainer's responsibility to:
|
||||
|
||||
* 1) Expose a clear roadmap for improving their component.
|
||||
* 2) Deliver prompt feedback and decisions on pull requests.
|
||||
* 3) Be available to anyone with questions, bug reports, criticism etc.
|
||||
on their component. This includes IRC, GitHub requests and the mailing
|
||||
list.
|
||||
* 4) Make sure their component respects the philosophy, design and
|
||||
roadmap of the project.
|
||||
|
||||
## How are decisions made?
|
||||
|
||||
Short answer: with pull requests to the libcontainer repository.
|
||||
|
||||
libcontainer is an open-source project with an open design philosophy. This
|
||||
means that the repository is the source of truth for EVERY aspect of the
|
||||
project, including its philosophy, design, roadmap and APIs. *If it's
|
||||
part of the project, it's in the repo. It's in the repo, it's part of
|
||||
the project.*
|
||||
|
||||
As a result, all decisions can be expressed as changes to the
|
||||
repository. An implementation change is a change to the source code. An
|
||||
API change is a change to the API specification. A philosophy change is
|
||||
a change to the philosophy manifesto. And so on.
|
||||
|
||||
All decisions affecting libcontainer, big and small, follow the same 3 steps:
|
||||
|
||||
* Step 1: Open a pull request. Anyone can do this.
|
||||
|
||||
* Step 2: Discuss the pull request. Anyone can do this.
|
||||
|
||||
* Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do
|
||||
this (see below "Who decides what?")
|
||||
|
||||
|
||||
## Who decides what?
|
||||
|
||||
All decisions are pull requests, and the relevant maintainers make
|
||||
decisions by accepting or refusing the pull request. Review and acceptance
|
||||
by anyone is denoted by adding a comment in the pull request: `LGTM`.
|
||||
However, only currently listed `MAINTAINERS` are counted towards the required
|
||||
two LGTMs.
|
||||
|
||||
libcontainer follows the timeless, highly efficient and totally unfair system
|
||||
known as [Benevolent dictator for life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with Michael Crosby in the role of BDFL.
|
||||
This means that all decisions are made by default by Michael. Since making
|
||||
every decision himself would be highly un-scalable, in practice decisions
|
||||
are spread across multiple maintainers.
|
||||
|
||||
The relevant maintainers for a pull request can be worked out in two steps:
|
||||
|
||||
* Step 1: Determine the subdirectories affected by the pull request. This
|
||||
might be `netlink/` and `security/`, or any other part of the repo.
|
||||
|
||||
* Step 2: Find the `MAINTAINERS` file which affects this directory. If the
|
||||
directory itself does not have a `MAINTAINERS` file, work your way up
|
||||
the repo hierarchy until you find one.
|
||||
|
||||
### I'm a maintainer, and I'm going on holiday
|
||||
|
||||
Please let your co-maintainers and other contributors know by raising a pull
|
||||
request that comments out your `MAINTAINERS` file entry using a `#`.
|
||||
|
||||
### I'm a maintainer, should I make pull requests too?
|
||||
|
||||
Yes. Nobody should ever push to master directly. All changes should be
|
||||
made through a pull request.
|
||||
|
||||
### Who assigns maintainers?
|
||||
|
||||
Michael has final `LGTM` approval for all pull requests to `MAINTAINERS` files.
|
||||
|
||||
### How is this process changed?
|
||||
|
||||
Just like everything else: by making a pull request :)
|
10
Godeps/_workspace/src/github.com/docker/libcontainer/Makefile
generated
vendored
Normal file
10
Godeps/_workspace/src/github.com/docker/libcontainer/Makefile
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
|
||||
all:
|
||||
docker build -t docker/libcontainer .
|
||||
|
||||
test:
|
||||
# we need NET_ADMIN for the netlink tests and SYS_ADMIN for mounting
|
||||
docker run --rm --cap-add NET_ADMIN --cap-add SYS_ADMIN docker/libcontainer
|
||||
|
||||
sh:
|
||||
docker run --rm -ti -w /busybox --rm --cap-add NET_ADMIN --cap-add SYS_ADMIN docker/libcontainer nsinit exec sh
|
16
Godeps/_workspace/src/github.com/docker/libcontainer/NOTICE
generated
vendored
Normal file
16
Godeps/_workspace/src/github.com/docker/libcontainer/NOTICE
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
libcontainer
|
||||
Copyright 2012-2014 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (http://www.docker.com).
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
|
||||
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
19
Godeps/_workspace/src/github.com/docker/libcontainer/PRINCIPLES.md
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/docker/libcontainer/PRINCIPLES.md
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
# libcontainer Principles
|
||||
|
||||
In the design and development of libcontainer we try to follow these principles:
|
||||
|
||||
(Work in progress)
|
||||
|
||||
* Don't try to replace every tool. Instead, be an ingredient to improve them.
|
||||
* Less code is better.
|
||||
* Fewer components are better. Do you really need to add one more class?
|
||||
* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand.
|
||||
* Don't do later what you can do now. "//FIXME: refactor" is not acceptable in new code.
|
||||
* When hesitating between two options, choose the one that is easier to reverse.
|
||||
* "No" is temporary; "Yes" is forever. If you're not sure about a new feature, say no. You can change your mind later.
|
||||
* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable.
|
||||
* The fewer moving parts in a container, the better.
|
||||
* Don't merge it unless you document it.
|
||||
* Don't document it unless you can keep it up-to-date.
|
||||
* Don't merge it unless you test it!
|
||||
* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that.
|
62
Godeps/_workspace/src/github.com/docker/libcontainer/README.md
generated
vendored
Normal file
62
Godeps/_workspace/src/github.com/docker/libcontainer/README.md
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
## libcontainer - reference implementation for containers
|
||||
|
||||
### Note on API changes:
|
||||
|
||||
Please bear with us while we work on making the libcontainer API stable and something that we can support long term. We are currently discussing the API with the community, therefore, if you currently depend on libcontainer please pin your dependency at a specific tag or commit id. Please join the discussion and help shape the API.
|
||||
|
||||
#### Background
|
||||
|
||||
libcontainer specifies configuration options for what a container is. It provides a native Go implementation for using Linux namespaces with no external dependencies. libcontainer provides many convenience functions for working with namespaces, networking, and management.
|
||||
|
||||
|
||||
#### Container
|
||||
A container is a self contained execution environment that shares the kernel of the host system and which is (optionally) isolated from other containers in the system.
|
||||
|
||||
libcontainer may be used to execute a process in a container. If a user tries to run a new process inside an existing container, the new process is added to the processes executing in the container.
|
||||
|
||||
|
||||
#### Root file system
|
||||
|
||||
A container runs with a directory known as its *root file system*, or *rootfs*, mounted as the file system root. The rootfs is usually a full system tree.
|
||||
|
||||
|
||||
#### Configuration
|
||||
|
||||
A container is initially configured by supplying configuration data when the container is created.
|
||||
|
||||
|
||||
#### nsinit
|
||||
|
||||
`nsinit` is a cli application which demonstrates the use of libcontainer. It is able to spawn new containers or join existing containers, based on the current directory.
|
||||
|
||||
To use `nsinit`, cd into a Linux rootfs and copy a `container.json` file into the directory with your specified configuration. Environment, networking, and different capabilities for the container are specified in this file. The configuration is used for each process executed inside the container.
|
||||
|
||||
See the `sample_configs` folder for examples of what the container configuration should look like.
|
||||
|
||||
To execute `/bin/bash` in the current directory as a container just run the following **as root**:
|
||||
```bash
|
||||
nsinit exec /bin/bash
|
||||
```
|
||||
|
||||
If you wish to spawn another process inside the container while your current bash session is running, run the same command again to get another bash shell (or change the command). If the original process (PID 1) dies, all other processes spawned inside the container will be killed and the namespace will be removed.
|
||||
|
||||
You can identify if a process is running in a container by looking to see if `state.json` is in the root of the directory.
|
||||
|
||||
You may also specify an alternate root place where the `container.json` file is read and where the `state.json` file will be saved.
|
||||
|
||||
#### Future
|
||||
See the [roadmap](ROADMAP.md).
|
||||
|
||||
## Copyright and license
|
||||
|
||||
Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
|
||||
Docs released under Creative commons.
|
||||
|
||||
## Hacking on libcontainer
|
||||
|
||||
First of all, please familiarise yourself with the [libcontainer Principles](PRINCIPLES.md).
|
||||
|
||||
If you're a *contributor* or aspiring contributor, you should read the [Contributors' Guide](CONTRIBUTORS_GUIDE.md).
|
||||
|
||||
If you're a *maintainer* or aspiring maintainer, you should read the [Maintainers' Guide](MAINTAINERS_GUIDE.md) and
|
||||
"How can I become a maintainer?" in the Contributors' Guide.
|
16
Godeps/_workspace/src/github.com/docker/libcontainer/ROADMAP.md
generated
vendored
Normal file
16
Godeps/_workspace/src/github.com/docker/libcontainer/ROADMAP.md
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
# libcontainer: what's next?
|
||||
|
||||
This document is a high-level overview of where we want to take libcontainer next.
|
||||
It is a curated selection of planned improvements which are either important, difficult, or both.
|
||||
|
||||
For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/docker/libcontainer/issues).
|
||||
|
||||
To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request.
|
||||
|
||||
## Broader kernel support
|
||||
|
||||
Our goal is to make libcontainer run everywhere, but currently libcontainer requires Linux version 3.8 or higher. If you’re deploying new machines for the purpose of running libcontainer, this is a fairly easy requirement to meet. However, if you’re adding libcontainer to an existing deployment, you may not have the flexibility to update and patch the kernel.
|
||||
|
||||
## Cross-architecture support
|
||||
|
||||
Our goal is to make libcontainer run everywhere. However currently libcontainer only runs on x86_64 systems. We plan on expanding architecture support, so that libcontainer containers can be created and used on more architectures.
|
34
Godeps/_workspace/src/github.com/docker/libcontainer/api_temp.go
generated
vendored
Normal file
34
Godeps/_workspace/src/github.com/docker/libcontainer/api_temp.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
Temporary API endpoint for libcontainer while the full API is finalized (api.go).
|
||||
*/
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"github.com/docker/libcontainer/cgroups/fs"
|
||||
"github.com/docker/libcontainer/cgroups/systemd"
|
||||
"github.com/docker/libcontainer/network"
|
||||
)
|
||||
|
||||
// TODO(vmarmol): Complete Stats() in final libcontainer API and move users to that.
|
||||
// DEPRECATED: The below portions are only to be used during the transition to the official API.
|
||||
// Returns all available stats for the given container.
|
||||
func GetStats(container *Config, state *State) (*ContainerStats, error) {
|
||||
var (
|
||||
err error
|
||||
stats = &ContainerStats{}
|
||||
)
|
||||
|
||||
if systemd.UseSystemd() {
|
||||
stats.CgroupStats, err = systemd.GetStats(container.Cgroups)
|
||||
} else {
|
||||
stats.CgroupStats, err = fs.GetStats(container.Cgroups)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
stats.NetworkStats, err = network.GetStats(&state.NetworkState)
|
||||
|
||||
return stats, err
|
||||
}
|
35
Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor.go
generated
vendored
Normal file
35
Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
// +build apparmor,linux
|
||||
|
||||
package apparmor
|
||||
|
||||
// #cgo LDFLAGS: -lapparmor
|
||||
// #include <sys/apparmor.h>
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func IsEnabled() bool {
|
||||
if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" {
|
||||
buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
|
||||
return err == nil && len(buf) > 1 && buf[0] == 'Y'
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func ApplyProfile(name string) error {
|
||||
if name == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
|
||||
if _, err := C.aa_change_onexec(cName); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
11
Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor_disabled.go
generated
vendored
Normal file
11
Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor_disabled.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// +build !apparmor !linux
|
||||
|
||||
package apparmor
|
||||
|
||||
func IsEnabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func ApplyProfile(name string) error {
|
||||
return nil
|
||||
}
|
94
Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/gen.go
generated
vendored
Normal file
94
Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/gen.go
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
package apparmor
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
type data struct {
|
||||
Name string
|
||||
Imports []string
|
||||
InnerImports []string
|
||||
}
|
||||
|
||||
const baseTemplate = `
|
||||
{{range $value := .Imports}}
|
||||
{{$value}}
|
||||
{{end}}
|
||||
|
||||
profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
|
||||
{{range $value := .InnerImports}}
|
||||
{{$value}}
|
||||
{{end}}
|
||||
|
||||
network,
|
||||
capability,
|
||||
file,
|
||||
umount,
|
||||
|
||||
mount fstype=tmpfs,
|
||||
mount fstype=mqueue,
|
||||
mount fstype=fuse.*,
|
||||
mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/,
|
||||
mount fstype=efivarfs -> /sys/firmware/efi/efivars/,
|
||||
mount fstype=fusectl -> /sys/fs/fuse/connections/,
|
||||
mount fstype=securityfs -> /sys/kernel/security/,
|
||||
mount fstype=debugfs -> /sys/kernel/debug/,
|
||||
mount fstype=proc -> /proc/,
|
||||
mount fstype=sysfs -> /sys/,
|
||||
|
||||
deny @{PROC}/sys/fs/** wklx,
|
||||
deny @{PROC}/sysrq-trigger rwklx,
|
||||
deny @{PROC}/mem rwklx,
|
||||
deny @{PROC}/kmem rwklx,
|
||||
deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx,
|
||||
deny @{PROC}/sys/kernel/*/** wklx,
|
||||
|
||||
deny mount options=(ro, remount) -> /,
|
||||
deny mount fstype=debugfs -> /var/lib/ureadahead/debugfs/,
|
||||
deny mount fstype=devpts,
|
||||
|
||||
deny /sys/[^f]*/** wklx,
|
||||
deny /sys/f[^s]*/** wklx,
|
||||
deny /sys/fs/[^c]*/** wklx,
|
||||
deny /sys/fs/c[^g]*/** wklx,
|
||||
deny /sys/fs/cg[^r]*/** wklx,
|
||||
deny /sys/firmware/efi/efivars/** rwklx,
|
||||
deny /sys/kernel/security/** rwklx,
|
||||
}
|
||||
`
|
||||
|
||||
func generateProfile(out io.Writer) error {
|
||||
compiled, err := template.New("apparmor_profile").Parse(baseTemplate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data := &data{
|
||||
Name: "docker-default",
|
||||
}
|
||||
if tuntablesExists() {
|
||||
data.Imports = append(data.Imports, "#include <tunables/global>")
|
||||
} else {
|
||||
data.Imports = append(data.Imports, "@{PROC}=/proc/")
|
||||
}
|
||||
if abstrctionsEsists() {
|
||||
data.InnerImports = append(data.InnerImports, "#include <abstractions/base>")
|
||||
}
|
||||
if err := compiled.Execute(out, data); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// check if the tunables/global exist
|
||||
func tuntablesExists() bool {
|
||||
_, err := os.Stat("/etc/apparmor.d/tunables/global")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// check if abstractions/base exist
|
||||
func abstrctionsEsists() bool {
|
||||
_, err := os.Stat("/etc/apparmor.d/abstractions/base")
|
||||
return err == nil
|
||||
}
|
44
Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/setup.go
generated
vendored
Normal file
44
Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/setup.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
package apparmor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultProfilePath = "/etc/apparmor.d/docker"
|
||||
)
|
||||
|
||||
func InstallDefaultProfile() error {
|
||||
if !IsEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure /etc/apparmor.d exists
|
||||
if err := os.MkdirAll(path.Dir(DefaultProfilePath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(DefaultProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := generateProfile(f); err != nil {
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
|
||||
cmd := exec.Command("/sbin/apparmor_parser", "-r", "-W", "docker")
|
||||
// to use the parser directly we have to make sure we are in the correct
|
||||
// dir with the profile
|
||||
cmd.Dir = "/etc/apparmor.d"
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading docker apparmor profile: %s (%s)", err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
40
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups.go
generated
vendored
Normal file
40
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/docker/libcontainer/devices"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotFound = errors.New("mountpoint not found")
|
||||
)
|
||||
|
||||
type FreezerState string
|
||||
|
||||
const (
|
||||
Undefined FreezerState = ""
|
||||
Frozen FreezerState = "FROZEN"
|
||||
Thawed FreezerState = "THAWED"
|
||||
)
|
||||
|
||||
type Cgroup struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Parent string `json:"parent,omitempty"` // name of parent cgroup or slice
|
||||
|
||||
AllowAllDevices bool `json:"allow_all_devices,omitempty"` // If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list.
|
||||
AllowedDevices []*devices.Device `json:"allowed_devices,omitempty"`
|
||||
Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes)
|
||||
MemoryReservation int64 `json:"memory_reservation,omitempty"` // Memory reservation or soft_limit (in bytes)
|
||||
MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap
|
||||
CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers)
|
||||
CpuQuota int64 `json:"cpu_quota,omitempty"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period.
|
||||
CpuPeriod int64 `json:"cpu_period,omitempty"` // CPU period to be used for hardcapping (in usecs). 0 to use system default.
|
||||
CpusetCpus string `json:"cpuset_cpus,omitempty"` // CPU to use
|
||||
Freezer FreezerState `json:"freezer,omitempty"` // set the freeze value for the process
|
||||
Slice string `json:"slice,omitempty"` // Parent slice to use for systemd
|
||||
}
|
||||
|
||||
type ActiveCgroup interface {
|
||||
Cleanup() error
|
||||
}
|
27
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups_test.go
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
cgroupsContents = `11:hugetlb:/
|
||||
10:perf_event:/
|
||||
9:blkio:/
|
||||
8:net_cls:/
|
||||
7:freezer:/
|
||||
6:devices:/
|
||||
5:memory:/
|
||||
4:cpuacct,cpu:/
|
||||
3:cpuset:/
|
||||
2:name=systemd:/user.slice/user-1000.slice/session-16.scope`
|
||||
)
|
||||
|
||||
func TestParseCgroups(t *testing.T) {
|
||||
r := bytes.NewBuffer([]byte(cgroupsContents))
|
||||
_, err := parseCgroupFile("blkio", r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
264
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgutil/cgutil.go
generated
vendored
Normal file
264
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgutil/cgutil.go
generated
vendored
Normal file
@ -0,0 +1,264 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
"github.com/docker/libcontainer/cgroups/fs"
|
||||
"github.com/docker/libcontainer/cgroups/systemd"
|
||||
)
|
||||
|
||||
var createCommand = cli.Command{
|
||||
Name: "create",
|
||||
Usage: "Create a cgroup container using the supplied configuration and initial process.",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{"config, c", "cgroup.json", "path to container configuration (cgroups.Cgroup object)"},
|
||||
cli.IntFlag{"pid, p", 0, "pid of the initial process in the container"},
|
||||
},
|
||||
Action: createAction,
|
||||
}
|
||||
|
||||
var destroyCommand = cli.Command{
|
||||
Name: "destroy",
|
||||
Usage: "Destroy an existing cgroup container.",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{"name, n", "", "container name"},
|
||||
cli.StringFlag{"parent, p", "", "container parent"},
|
||||
},
|
||||
Action: destroyAction,
|
||||
}
|
||||
|
||||
var statsCommand = cli.Command{
|
||||
Name: "stats",
|
||||
Usage: "Get stats for cgroup",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{"name, n", "", "container name"},
|
||||
cli.StringFlag{"parent, p", "", "container parent"},
|
||||
},
|
||||
Action: statsAction,
|
||||
}
|
||||
|
||||
var pauseCommand = cli.Command{
|
||||
Name: "pause",
|
||||
Usage: "Pause cgroup",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{"name, n", "", "container name"},
|
||||
cli.StringFlag{"parent, p", "", "container parent"},
|
||||
},
|
||||
Action: pauseAction,
|
||||
}
|
||||
|
||||
var resumeCommand = cli.Command{
|
||||
Name: "resume",
|
||||
Usage: "Resume a paused cgroup",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{"name, n", "", "container name"},
|
||||
cli.StringFlag{"parent, p", "", "container parent"},
|
||||
},
|
||||
Action: resumeAction,
|
||||
}
|
||||
|
||||
var psCommand = cli.Command{
|
||||
Name: "ps",
|
||||
Usage: "Get list of pids for a cgroup",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{"name, n", "", "container name"},
|
||||
cli.StringFlag{"parent, p", "", "container parent"},
|
||||
},
|
||||
Action: psAction,
|
||||
}
|
||||
|
||||
func getConfigFromFile(c *cli.Context) (*cgroups.Cgroup, error) {
|
||||
f, err := os.Open(c.String("config"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var config *cgroups.Cgroup
|
||||
if err := json.NewDecoder(f).Decode(&config); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func openLog(name string) error {
|
||||
f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.SetOutput(f)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getConfig(context *cli.Context) (*cgroups.Cgroup, error) {
|
||||
name := context.String("name")
|
||||
if name == "" {
|
||||
log.Fatal(fmt.Errorf("Missing container name"))
|
||||
}
|
||||
parent := context.String("parent")
|
||||
return &cgroups.Cgroup{
|
||||
Name: name,
|
||||
Parent: parent,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func killAll(config *cgroups.Cgroup) {
|
||||
// We could use freezer here to prevent process spawning while we are trying
|
||||
// to kill everything. But going with more portable solution of retrying for
|
||||
// now.
|
||||
pids := getPids(config)
|
||||
retry := 10
|
||||
for len(pids) != 0 || retry > 0 {
|
||||
killPids(pids)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
retry--
|
||||
pids = getPids(config)
|
||||
}
|
||||
if len(pids) != 0 {
|
||||
log.Fatal(fmt.Errorf("Could not kill existing processes in the container."))
|
||||
}
|
||||
}
|
||||
|
||||
func getPids(config *cgroups.Cgroup) []int {
|
||||
pids, err := fs.GetPids(config)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return pids
|
||||
}
|
||||
|
||||
func killPids(pids []int) {
|
||||
for _, pid := range pids {
|
||||
// pids might go away on their own. Ignore errors.
|
||||
syscall.Kill(pid, syscall.SIGKILL)
|
||||
}
|
||||
}
|
||||
|
||||
func setFreezerState(context *cli.Context, state cgroups.FreezerState) {
|
||||
config, err := getConfig(context)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if systemd.UseSystemd() {
|
||||
err = systemd.Freeze(config, state)
|
||||
} else {
|
||||
err = fs.Freeze(config, state)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func createAction(context *cli.Context) {
|
||||
config, err := getConfigFromFile(context)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
pid := context.Int("pid")
|
||||
if pid <= 0 {
|
||||
log.Fatal(fmt.Errorf("Invalid pid : %d", pid))
|
||||
}
|
||||
if systemd.UseSystemd() {
|
||||
_, err := systemd.Apply(config, pid)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
_, err := fs.Apply(config, pid)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func destroyAction(context *cli.Context) {
|
||||
config, err := getConfig(context)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
killAll(config)
|
||||
// Systemd will clean up cgroup state for empty container.
|
||||
if !systemd.UseSystemd() {
|
||||
err := fs.Cleanup(config)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func statsAction(context *cli.Context) {
|
||||
config, err := getConfig(context)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
stats, err := fs.GetStats(config)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
out, err := json.MarshalIndent(stats, "", "\t")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("Usage stats for '%s':\n %v\n", config.Name, string(out))
|
||||
}
|
||||
|
||||
func pauseAction(context *cli.Context) {
|
||||
setFreezerState(context, cgroups.Frozen)
|
||||
}
|
||||
|
||||
func resumeAction(context *cli.Context) {
|
||||
setFreezerState(context, cgroups.Thawed)
|
||||
}
|
||||
|
||||
func psAction(context *cli.Context) {
|
||||
config, err := getConfig(context)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
pids, err := fs.GetPids(config)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Pids in '%s':\n", config.Name)
|
||||
fmt.Println(pids)
|
||||
}
|
||||
|
||||
func main() {
|
||||
logPath := os.Getenv("log")
|
||||
if logPath != "" {
|
||||
if err := openLog(logPath); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Name = "cgutil"
|
||||
app.Usage = "Test utility for libcontainer cgroups package"
|
||||
app.Version = "0.1"
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
createCommand,
|
||||
destroyCommand,
|
||||
statsCommand,
|
||||
pauseCommand,
|
||||
resumeCommand,
|
||||
psCommand,
|
||||
}
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
10
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgutil/sample_cgroup.json
generated
vendored
Normal file
10
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgutil/sample_cgroup.json
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
{
|
||||
"name": "luke",
|
||||
"parent": "darth",
|
||||
"allow_all_devices": true,
|
||||
"memory": 1073741824,
|
||||
"memory_swap": -1,
|
||||
"cpu_shares": 2048,
|
||||
"cpu_quota": 500000,
|
||||
"cpu_period": 250000
|
||||
}
|
198
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go
generated
vendored
Normal file
198
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go
generated
vendored
Normal file
@ -0,0 +1,198 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
var (
|
||||
subsystems = map[string]subsystem{
|
||||
"devices": &DevicesGroup{},
|
||||
"memory": &MemoryGroup{},
|
||||
"cpu": &CpuGroup{},
|
||||
"cpuset": &CpusetGroup{},
|
||||
"cpuacct": &CpuacctGroup{},
|
||||
"blkio": &BlkioGroup{},
|
||||
"perf_event": &PerfEventGroup{},
|
||||
"freezer": &FreezerGroup{},
|
||||
}
|
||||
)
|
||||
|
||||
type subsystem interface {
|
||||
Set(*data) error
|
||||
Remove(*data) error
|
||||
GetStats(string, *cgroups.Stats) error
|
||||
}
|
||||
|
||||
type data struct {
|
||||
root string
|
||||
cgroup string
|
||||
c *cgroups.Cgroup
|
||||
pid int
|
||||
}
|
||||
|
||||
func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) {
|
||||
d, err := getCgroupData(c, pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, sys := range subsystems {
|
||||
if err := sys.Set(d); err != nil {
|
||||
d.Cleanup()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func Cleanup(c *cgroups.Cgroup) error {
|
||||
d, err := getCgroupData(c, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not get Cgroup data %s", err)
|
||||
}
|
||||
return d.Cleanup()
|
||||
}
|
||||
|
||||
func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) {
|
||||
stats := cgroups.NewStats()
|
||||
|
||||
d, err := getCgroupData(c, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting CgroupData %s", err)
|
||||
}
|
||||
|
||||
for sysname, sys := range subsystems {
|
||||
path, err := d.path(sysname)
|
||||
if err != nil {
|
||||
// Don't fail if a cgroup hierarchy was not found, just skip this subsystem
|
||||
if err == cgroups.ErrNotFound {
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := sys.GetStats(path, stats); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// Freeze toggles the container's freezer cgroup depending on the state
|
||||
// provided
|
||||
func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error {
|
||||
d, err := getCgroupData(c, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Freezer = state
|
||||
|
||||
freezer := subsystems["freezer"]
|
||||
|
||||
return freezer.Set(d)
|
||||
}
|
||||
|
||||
func GetPids(c *cgroups.Cgroup) ([]int, error) {
|
||||
d, err := getCgroupData(c, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dir, err := d.path("devices")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cgroups.ReadProcsFile(dir)
|
||||
}
|
||||
|
||||
func getCgroupData(c *cgroups.Cgroup, pid int) (*data, error) {
|
||||
// we can pick any subsystem to find the root
|
||||
cgroupRoot, err := cgroups.FindCgroupMountpoint("cpu")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cgroupRoot = filepath.Dir(cgroupRoot)
|
||||
|
||||
if _, err := os.Stat(cgroupRoot); err != nil {
|
||||
return nil, fmt.Errorf("cgroups fs not found")
|
||||
}
|
||||
|
||||
cgroup := c.Name
|
||||
if c.Parent != "" {
|
||||
cgroup = filepath.Join(c.Parent, cgroup)
|
||||
}
|
||||
|
||||
return &data{
|
||||
root: cgroupRoot,
|
||||
cgroup: cgroup,
|
||||
c: c,
|
||||
pid: pid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (raw *data) parent(subsystem string) (string, error) {
|
||||
initPath, err := cgroups.GetInitCgroupDir(subsystem)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(raw.root, subsystem, initPath), nil
|
||||
}
|
||||
|
||||
func (raw *data) path(subsystem string) (string, error) {
|
||||
parent, err := raw.parent(subsystem)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(parent, raw.cgroup), nil
|
||||
}
|
||||
|
||||
func (raw *data) join(subsystem string) (string, error) {
|
||||
path, err := raw.path(subsystem)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
|
||||
return "", err
|
||||
}
|
||||
if err := writeFile(path, "cgroup.procs", strconv.Itoa(raw.pid)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func (raw *data) Cleanup() error {
|
||||
for _, sys := range subsystems {
|
||||
sys.Remove(raw)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeFile(dir, file, data string) error {
|
||||
return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700)
|
||||
}
|
||||
|
||||
func readFile(dir, file string) (string, error) {
|
||||
data, err := ioutil.ReadFile(filepath.Join(dir, file))
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
func removePath(p string, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if p != "" {
|
||||
return os.RemoveAll(p)
|
||||
}
|
||||
return nil
|
||||
}
|
141
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio.go
generated
vendored
Normal file
141
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio.go
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
type BlkioGroup struct {
|
||||
}
|
||||
|
||||
func (s *BlkioGroup) Set(d *data) error {
|
||||
// we just want to join this group even though we don't set anything
|
||||
if _, err := d.join("blkio"); err != nil && err != cgroups.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *BlkioGroup) Remove(d *data) error {
|
||||
return removePath(d.path("blkio"))
|
||||
}
|
||||
|
||||
/*
|
||||
examples:
|
||||
|
||||
blkio.sectors
|
||||
8:0 6792
|
||||
|
||||
blkio.io_service_bytes
|
||||
8:0 Read 1282048
|
||||
8:0 Write 2195456
|
||||
8:0 Sync 2195456
|
||||
8:0 Async 1282048
|
||||
8:0 Total 3477504
|
||||
Total 3477504
|
||||
|
||||
blkio.io_serviced
|
||||
8:0 Read 124
|
||||
8:0 Write 104
|
||||
8:0 Sync 104
|
||||
8:0 Async 124
|
||||
8:0 Total 228
|
||||
Total 228
|
||||
|
||||
blkio.io_queued
|
||||
8:0 Read 0
|
||||
8:0 Write 0
|
||||
8:0 Sync 0
|
||||
8:0 Async 0
|
||||
8:0 Total 0
|
||||
Total 0
|
||||
*/
|
||||
|
||||
func splitBlkioStatLine(r rune) bool {
|
||||
return r == ' ' || r == ':'
|
||||
}
|
||||
|
||||
func getBlkioStat(path string) ([]cgroups.BlkioStatEntry, error) {
|
||||
var blkioStats []cgroups.BlkioStatEntry
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return blkioStats, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
sc := bufio.NewScanner(f)
|
||||
for sc.Scan() {
|
||||
// format: dev type amount
|
||||
fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine)
|
||||
if len(fields) < 3 {
|
||||
if len(fields) == 2 && fields[0] == "Total" {
|
||||
// skip total line
|
||||
continue
|
||||
} else {
|
||||
return nil, fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text())
|
||||
}
|
||||
}
|
||||
|
||||
v, err := strconv.ParseUint(fields[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
major := v
|
||||
|
||||
v, err = strconv.ParseUint(fields[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
minor := v
|
||||
|
||||
op := ""
|
||||
valueField := 2
|
||||
if len(fields) == 4 {
|
||||
op = fields[2]
|
||||
valueField = 3
|
||||
}
|
||||
v, err = strconv.ParseUint(fields[valueField], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v})
|
||||
}
|
||||
|
||||
return blkioStats, nil
|
||||
}
|
||||
|
||||
func (s *BlkioGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
var blkioStats []cgroups.BlkioStatEntry
|
||||
var err error
|
||||
|
||||
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil {
|
||||
return err
|
||||
}
|
||||
stats.BlkioStats.SectorsRecursive = blkioStats
|
||||
|
||||
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_bytes_recursive")); err != nil {
|
||||
return err
|
||||
}
|
||||
stats.BlkioStats.IoServiceBytesRecursive = blkioStats
|
||||
|
||||
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err != nil {
|
||||
return err
|
||||
}
|
||||
stats.BlkioStats.IoServicedRecursive = blkioStats
|
||||
|
||||
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_queued_recursive")); err != nil {
|
||||
return err
|
||||
}
|
||||
stats.BlkioStats.IoQueuedRecursive = blkioStats
|
||||
|
||||
return nil
|
||||
}
|
174
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go
generated
vendored
Normal file
174
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
const (
|
||||
sectorsRecursiveContents = `8:0 1024`
|
||||
serviceBytesRecursiveContents = `8:0 Read 100
|
||||
8:0 Write 200
|
||||
8:0 Sync 300
|
||||
8:0 Async 500
|
||||
8:0 Total 500
|
||||
Total 500`
|
||||
servicedRecursiveContents = `8:0 Read 10
|
||||
8:0 Write 40
|
||||
8:0 Sync 20
|
||||
8:0 Async 30
|
||||
8:0 Total 50
|
||||
Total 50`
|
||||
queuedRecursiveContents = `8:0 Read 1
|
||||
8:0 Write 4
|
||||
8:0 Sync 2
|
||||
8:0 Async 3
|
||||
8:0 Total 5
|
||||
Total 5`
|
||||
)
|
||||
|
||||
var actualStats = *cgroups.NewStats()
|
||||
|
||||
func appendBlkioStatEntry(blkioStatEntries *[]cgroups.BlkioStatEntry, major, minor, value uint64, op string) {
|
||||
*blkioStatEntries = append(*blkioStatEntries, cgroups.BlkioStatEntry{Major: major, Minor: minor, Value: value, Op: op})
|
||||
}
|
||||
|
||||
func TestBlkioStats(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify expected stats.
|
||||
expectedStats := cgroups.BlkioStats{}
|
||||
appendBlkioStatEntry(&expectedStats.SectorsRecursive, 8, 0, 1024, "")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 100, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 200, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 300, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Total")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 10, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 40, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 20, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 30, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 50, "Total")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 1, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 4, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 2, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 3, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 5, "Total")
|
||||
|
||||
expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoSectorsFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoServiceBytesFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoServicedFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoQueuedFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsUnexpectedNumberOfFields(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": "8:0 Read 100 100",
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected to fail, but did not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsUnexpectedFieldType(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": "8:0 Read Write",
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected to fail, but did not")
|
||||
}
|
||||
}
|
72
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu.go
generated
vendored
Normal file
72
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
type CpuGroup struct {
|
||||
}
|
||||
|
||||
func (s *CpuGroup) Set(d *data) error {
|
||||
// We always want to join the cpu group, to allow fair cpu scheduling
|
||||
// on a container basis
|
||||
dir, err := d.join("cpu")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.c.CpuShares != 0 {
|
||||
if err := writeFile(dir, "cpu.shares", strconv.FormatInt(d.c.CpuShares, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if d.c.CpuPeriod != 0 {
|
||||
if err := writeFile(dir, "cpu.cfs_period_us", strconv.FormatInt(d.c.CpuPeriod, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if d.c.CpuQuota != 0 {
|
||||
if err := writeFile(dir, "cpu.cfs_quota_us", strconv.FormatInt(d.c.CpuQuota, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CpuGroup) Remove(d *data) error {
|
||||
return removePath(d.path("cpu"))
|
||||
}
|
||||
|
||||
func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
f, err := os.Open(filepath.Join(path, "cpu.stat"))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
sc := bufio.NewScanner(f)
|
||||
for sc.Scan() {
|
||||
t, v, err := getCgroupParamKeyValue(sc.Text())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch t {
|
||||
case "nr_periods":
|
||||
stats.CpuStats.ThrottlingData.Periods = v
|
||||
|
||||
case "nr_throttled":
|
||||
stats.CpuStats.ThrottlingData.ThrottledPeriods = v
|
||||
|
||||
case "throttled_time":
|
||||
stats.CpuStats.ThrottlingData.ThrottledTime = v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
66
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu_test.go
generated
vendored
Normal file
66
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu_test.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
func TestCpuStats(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpu", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
kNrPeriods = 2000
|
||||
kNrThrottled = 200
|
||||
kThrottledTime = uint64(18446744073709551615)
|
||||
)
|
||||
|
||||
cpuStatContent := fmt.Sprintf("nr_periods %d\n nr_throttled %d\n throttled_time %d\n",
|
||||
kNrPeriods, kNrThrottled, kThrottledTime)
|
||||
helper.writeFileContents(map[string]string{
|
||||
"cpu.stat": cpuStatContent,
|
||||
})
|
||||
|
||||
cpu := &CpuGroup{}
|
||||
err := cpu.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedStats := cgroups.ThrottlingData{
|
||||
Periods: kNrPeriods,
|
||||
ThrottledPeriods: kNrThrottled,
|
||||
ThrottledTime: kThrottledTime}
|
||||
|
||||
expectThrottlingDataEquals(t, expectedStats, actualStats.CpuStats.ThrottlingData)
|
||||
}
|
||||
|
||||
func TestNoCpuStatFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpu", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
cpu := &CpuGroup{}
|
||||
err := cpu.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal("Expected not to fail, but did")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidCpuStat(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpu", t)
|
||||
defer helper.cleanup()
|
||||
cpuStatContent := `nr_periods 2000
|
||||
nr_throttled 200
|
||||
throttled_time fortytwo`
|
||||
helper.writeFileContents(map[string]string{
|
||||
"cpu.stat": cpuStatContent,
|
||||
})
|
||||
|
||||
cpu := &CpuGroup{}
|
||||
err := cpu.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failed stat parsing.")
|
||||
}
|
||||
}
|
163
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuacct.go
generated
vendored
Normal file
163
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuacct.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
"github.com/docker/libcontainer/system"
|
||||
)
|
||||
|
||||
var (
|
||||
cpuCount = uint64(runtime.NumCPU())
|
||||
clockTicks = uint64(system.GetClockTicks())
|
||||
)
|
||||
|
||||
const nanosecondsInSecond = 1000000000
|
||||
|
||||
type CpuacctGroup struct {
|
||||
}
|
||||
|
||||
func (s *CpuacctGroup) Set(d *data) error {
|
||||
// we just want to join this group even though we don't set anything
|
||||
if _, err := d.join("cpuacct"); err != nil && err != cgroups.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CpuacctGroup) Remove(d *data) error {
|
||||
return removePath(d.path("cpuacct"))
|
||||
}
|
||||
|
||||
func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
var (
|
||||
err error
|
||||
startCpu, lastCpu, startSystem, lastSystem, startUsage, lastUsage, kernelModeUsage, userModeUsage, percentage uint64
|
||||
)
|
||||
if kernelModeUsage, userModeUsage, err = getCpuUsage(path); err != nil {
|
||||
return err
|
||||
}
|
||||
startCpu = kernelModeUsage + userModeUsage
|
||||
if startSystem, err = getSystemCpuUsage(); err != nil {
|
||||
return err
|
||||
}
|
||||
startUsageTime := time.Now()
|
||||
if startUsage, err = getCgroupParamInt(path, "cpuacct.usage"); err != nil {
|
||||
return err
|
||||
}
|
||||
// sample for 100ms
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if kernelModeUsage, userModeUsage, err = getCpuUsage(path); err != nil {
|
||||
return err
|
||||
}
|
||||
lastCpu = kernelModeUsage + userModeUsage
|
||||
if lastSystem, err = getSystemCpuUsage(); err != nil {
|
||||
return err
|
||||
}
|
||||
usageSampleDuration := time.Since(startUsageTime)
|
||||
if lastUsage, err = getCgroupParamInt(path, "cpuacct.usage"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
deltaProc = lastCpu - startCpu
|
||||
deltaSystem = lastSystem - startSystem
|
||||
deltaUsage = lastUsage - startUsage
|
||||
)
|
||||
if deltaSystem > 0.0 {
|
||||
percentage = ((deltaProc / deltaSystem) * clockTicks) * cpuCount
|
||||
}
|
||||
// NOTE: a percentage over 100% is valid for POSIX because that means the
|
||||
// processes is using multiple cores
|
||||
stats.CpuStats.CpuUsage.PercentUsage = percentage
|
||||
// Delta usage is in nanoseconds of CPU time so get the usage (in cores) over the sample time.
|
||||
stats.CpuStats.CpuUsage.CurrentUsage = deltaUsage / uint64(usageSampleDuration.Nanoseconds())
|
||||
percpuUsage, err := getPercpuUsage(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stats.CpuStats.CpuUsage.TotalUsage = lastUsage
|
||||
stats.CpuStats.CpuUsage.PercpuUsage = percpuUsage
|
||||
stats.CpuStats.CpuUsage.UsageInKernelmode = (kernelModeUsage * nanosecondsInSecond) / clockTicks
|
||||
stats.CpuStats.CpuUsage.UsageInUsermode = (userModeUsage * nanosecondsInSecond) / clockTicks
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO(vmarmol): Use cgroups stats.
|
||||
func getSystemCpuUsage() (uint64, error) {
|
||||
|
||||
f, err := os.Open("/proc/stat")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
sc := bufio.NewScanner(f)
|
||||
for sc.Scan() {
|
||||
parts := strings.Fields(sc.Text())
|
||||
switch parts[0] {
|
||||
case "cpu":
|
||||
if len(parts) < 8 {
|
||||
return 0, fmt.Errorf("invalid number of cpu fields")
|
||||
}
|
||||
|
||||
var total uint64
|
||||
for _, i := range parts[1:8] {
|
||||
v, err := strconv.ParseUint(i, 10, 64)
|
||||
if err != nil {
|
||||
return 0.0, fmt.Errorf("Unable to convert value %s to int: %s", i, err)
|
||||
}
|
||||
total += v
|
||||
}
|
||||
return total, nil
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("invalid stat format")
|
||||
}
|
||||
|
||||
func getCpuUsage(path string) (uint64, uint64, error) {
|
||||
kernelModeUsage := uint64(0)
|
||||
userModeUsage := uint64(0)
|
||||
data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.stat"))
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
fields := strings.Fields(string(data))
|
||||
if len(fields) != 4 {
|
||||
return 0, 0, fmt.Errorf("Failure - %s is expected to have 4 fields", filepath.Join(path, "cpuacct.stat"))
|
||||
}
|
||||
if userModeUsage, err = strconv.ParseUint(fields[1], 10, 64); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
if kernelModeUsage, err = strconv.ParseUint(fields[3], 10, 64); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
return kernelModeUsage, userModeUsage, nil
|
||||
}
|
||||
|
||||
func getPercpuUsage(path string) ([]uint64, error) {
|
||||
percpuUsage := []uint64{}
|
||||
data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.usage_percpu"))
|
||||
if err != nil {
|
||||
return percpuUsage, err
|
||||
}
|
||||
for _, value := range strings.Fields(string(data)) {
|
||||
value, err := strconv.ParseUint(value, 10, 64)
|
||||
if err != nil {
|
||||
return percpuUsage, fmt.Errorf("Unable to convert param value to uint64: %s", err)
|
||||
}
|
||||
percpuUsage = append(percpuUsage, value)
|
||||
}
|
||||
return percpuUsage, nil
|
||||
}
|
110
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go
generated
vendored
Normal file
110
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
type CpusetGroup struct {
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) Set(d *data) error {
|
||||
// we don't want to join this cgroup unless it is specified
|
||||
if d.c.CpusetCpus != "" {
|
||||
dir, err := d.path("cpuset")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.ensureParent(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// because we are not using d.join we need to place the pid into the procs file
|
||||
// unlike the other subsystems
|
||||
if err := writeFile(dir, "cgroup.procs", strconv.Itoa(d.pid)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeFile(dir, "cpuset.cpus", d.c.CpusetCpus); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) Remove(d *data) error {
|
||||
return removePath(d.path("cpuset"))
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) {
|
||||
if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil {
|
||||
return
|
||||
}
|
||||
if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems")); err != nil {
|
||||
return
|
||||
}
|
||||
return cpus, mems, nil
|
||||
}
|
||||
|
||||
// ensureParent ensures that the parent directory of current is created
|
||||
// with the proper cpus and mems files copied from it's parent if the values
|
||||
// are a file with a new line char
|
||||
func (s *CpusetGroup) ensureParent(current string) error {
|
||||
parent := filepath.Dir(current)
|
||||
|
||||
if _, err := os.Stat(parent); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.ensureParent(parent); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(current, 0755); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
return s.copyIfNeeded(current, parent)
|
||||
}
|
||||
|
||||
// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent
|
||||
// directory to the current directory if the file's contents are 0
|
||||
func (s *CpusetGroup) copyIfNeeded(current, parent string) error {
|
||||
var (
|
||||
err error
|
||||
currentCpus, currentMems []byte
|
||||
parentCpus, parentMems []byte
|
||||
)
|
||||
|
||||
if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil {
|
||||
return err
|
||||
}
|
||||
if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.isEmpty(currentCpus) {
|
||||
if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if s.isEmpty(currentMems) {
|
||||
if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) isEmpty(b []byte) bool {
|
||||
return len(bytes.Trim(b, "\n")) == 0
|
||||
}
|
34
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/devices.go
generated
vendored
Normal file
34
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/devices.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
package fs
|
||||
|
||||
import "github.com/docker/libcontainer/cgroups"
|
||||
|
||||
type DevicesGroup struct {
|
||||
}
|
||||
|
||||
func (s *DevicesGroup) Set(d *data) error {
|
||||
dir, err := d.join("devices")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !d.c.AllowAllDevices {
|
||||
if err := writeFile(dir, "devices.deny", "a"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, dev := range d.c.AllowedDevices {
|
||||
if err := writeFile(dir, "devices.allow", dev.GetCgroupAllowString()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DevicesGroup) Remove(d *data) error {
|
||||
return removePath(d.path("devices"))
|
||||
}
|
||||
|
||||
func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
50
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/freezer.go
generated
vendored
Normal file
50
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/freezer.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
type FreezerGroup struct {
|
||||
}
|
||||
|
||||
func (s *FreezerGroup) Set(d *data) error {
|
||||
switch d.c.Freezer {
|
||||
case cgroups.Frozen, cgroups.Thawed:
|
||||
dir, err := d.path("freezer")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writeFile(dir, "freezer.state", string(d.c.Freezer)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
state, err := readFile(dir, "freezer.state")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.TrimSpace(state) == string(d.c.Freezer) {
|
||||
break
|
||||
}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
default:
|
||||
if _, err := d.join("freezer"); err != nil && err != cgroups.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FreezerGroup) Remove(d *data) error {
|
||||
return removePath(d.path("freezer"))
|
||||
}
|
||||
|
||||
func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
92
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory.go
generated
vendored
Normal file
92
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
type MemoryGroup struct {
|
||||
}
|
||||
|
||||
func (s *MemoryGroup) Set(d *data) error {
|
||||
dir, err := d.join("memory")
|
||||
// only return an error for memory if it was not specified
|
||||
if err != nil && (d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0) {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
}()
|
||||
|
||||
// Only set values if some config was specified.
|
||||
if d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0 {
|
||||
if d.c.Memory != 0 {
|
||||
if err := writeFile(dir, "memory.limit_in_bytes", strconv.FormatInt(d.c.Memory, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if d.c.MemoryReservation != 0 {
|
||||
if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(d.c.MemoryReservation, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// By default, MemorySwap is set to twice the size of RAM.
|
||||
// If you want to omit MemorySwap, set it to `-1'.
|
||||
if d.c.MemorySwap != -1 {
|
||||
if err := writeFile(dir, "memory.memsw.limit_in_bytes", strconv.FormatInt(d.c.Memory*2, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MemoryGroup) Remove(d *data) error {
|
||||
return removePath(d.path("memory"))
|
||||
}
|
||||
|
||||
func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
// Set stats from memory.stat.
|
||||
statsFile, err := os.Open(filepath.Join(path, "memory.stat"))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer statsFile.Close()
|
||||
|
||||
sc := bufio.NewScanner(statsFile)
|
||||
for sc.Scan() {
|
||||
t, v, err := getCgroupParamKeyValue(sc.Text())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stats.MemoryStats.Stats[t] = v
|
||||
}
|
||||
|
||||
// Set memory usage and max historical usage.
|
||||
value, err := getCgroupParamInt(path, "memory.usage_in_bytes")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stats.MemoryStats.Usage = value
|
||||
value, err = getCgroupParamInt(path, "memory.max_usage_in_bytes")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stats.MemoryStats.MaxUsage = value
|
||||
value, err = getCgroupParamInt(path, "memory.failcnt")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stats.MemoryStats.Failcnt = value
|
||||
|
||||
return nil
|
||||
}
|
127
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go
generated
vendored
Normal file
127
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go
generated
vendored
Normal file
@ -0,0 +1,127 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
const (
|
||||
memoryStatContents = `cache 512
|
||||
rss 1024`
|
||||
memoryUsageContents = "2048\n"
|
||||
memoryMaxUsageContents = "4096\n"
|
||||
memoryFailcnt = "100\n"
|
||||
)
|
||||
|
||||
func TestMemoryStats(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
"memory.failcnt": memoryFailcnt,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expectedStats := cgroups.MemoryStats{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Stats: map[string]uint64{"cache": 512, "rss": 1024}}
|
||||
expectMemoryStatEquals(t, expectedStats, actualStats.MemoryStats)
|
||||
}
|
||||
|
||||
func TestMemoryStatsNoStatFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsNoUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsNoMaxUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsBadStatFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": "rss rss",
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsBadUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.usage_in_bytes": "bad",
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsBadMaxUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
"memory.max_usage_in_bytes": "bad",
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
82
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux.go
generated
vendored
Normal file
82
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
// NotifyOnOOM sends signals on the returned channel when the cgroup reaches
|
||||
// its memory limit. The channel is closed when the cgroup is removed.
|
||||
func NotifyOnOOM(c *cgroups.Cgroup) (<-chan struct{}, error) {
|
||||
d, err := getCgroupData(c, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return notifyOnOOM(d)
|
||||
}
|
||||
|
||||
func notifyOnOOM(d *data) (<-chan struct{}, error) {
|
||||
dir, err := d.path("memory")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0)
|
||||
if syserr != 0 {
|
||||
return nil, syserr
|
||||
}
|
||||
|
||||
eventfd := os.NewFile(fd, "eventfd")
|
||||
|
||||
oomControl, err := os.Open(filepath.Join(dir, "memory.oom_control"))
|
||||
if err != nil {
|
||||
eventfd.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
eventControlPath = filepath.Join(dir, "cgroup.event_control")
|
||||
data = fmt.Sprintf("%d %d", eventfd.Fd(), oomControl.Fd())
|
||||
)
|
||||
|
||||
if err := writeFile(dir, "cgroup.event_control", data); err != nil {
|
||||
eventfd.Close()
|
||||
oomControl.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
close(ch)
|
||||
eventfd.Close()
|
||||
oomControl.Close()
|
||||
}()
|
||||
|
||||
buf := make([]byte, 8)
|
||||
|
||||
for {
|
||||
if _, err := eventfd.Read(buf); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// When a cgroup is destroyed, an event is sent to eventfd.
|
||||
// So if the control path is gone, return instead of notifying.
|
||||
if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
|
||||
ch <- struct{}{}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch, nil
|
||||
}
|
86
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux_test.go
generated
vendored
Normal file
86
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux_test.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNotifyOnOOM(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.oom_control": "",
|
||||
"cgroup.event_control": "",
|
||||
})
|
||||
|
||||
var eventFd, oomControlFd int
|
||||
|
||||
ooms, err := notifyOnOOM(helper.CgroupData)
|
||||
if err != nil {
|
||||
t.Fatal("expected no error, got:", err)
|
||||
}
|
||||
|
||||
memoryPath, _ := helper.CgroupData.path("memory")
|
||||
data, err := readFile(memoryPath, "cgroup.event_control")
|
||||
if err != nil {
|
||||
t.Fatal("couldn't read event control file:", err)
|
||||
}
|
||||
|
||||
if _, err := fmt.Sscanf(data, "%d %d", &eventFd, &oomControlFd); err != nil {
|
||||
t.Fatalf("invalid control data %q: %s", data, err)
|
||||
}
|
||||
|
||||
// re-open the eventfd
|
||||
efd, err := syscall.Dup(eventFd)
|
||||
if err != nil {
|
||||
t.Fatal("unable to reopen eventfd:", err)
|
||||
}
|
||||
defer syscall.Close(efd)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal("unable to dup event fd:", err)
|
||||
}
|
||||
|
||||
buf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(buf, 1)
|
||||
|
||||
if _, err := syscall.Write(efd, buf); err != nil {
|
||||
t.Fatal("unable to write to eventfd:", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ooms:
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("no notification on oom channel after 100ms")
|
||||
}
|
||||
|
||||
// simulate what happens when a cgroup is destroyed by cleaning up and then
|
||||
// writing to the eventfd.
|
||||
helper.cleanup()
|
||||
if _, err := syscall.Write(efd, buf); err != nil {
|
||||
t.Fatal("unable to write to eventfd:", err)
|
||||
}
|
||||
|
||||
// give things a moment to shut down
|
||||
select {
|
||||
case _, ok := <-ooms:
|
||||
if ok {
|
||||
t.Fatal("expected no oom to be triggered")
|
||||
}
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
}
|
||||
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(oomControlFd), syscall.F_GETFD, 0); err != syscall.EBADF {
|
||||
t.Error("expected oom control to be closed")
|
||||
}
|
||||
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(eventFd), syscall.F_GETFD, 0); err != syscall.EBADF {
|
||||
t.Error("expected event fd to be closed")
|
||||
}
|
||||
}
|
24
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/perf_event.go
generated
vendored
Normal file
24
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/perf_event.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
type PerfEventGroup struct {
|
||||
}
|
||||
|
||||
func (s *PerfEventGroup) Set(d *data) error {
|
||||
// we just want to join this group even though we don't set anything
|
||||
if _, err := d.join("perf_event"); err != nil && err != cgroups.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *PerfEventGroup) Remove(d *data) error {
|
||||
return removePath(d.path("perf_event"))
|
||||
}
|
||||
|
||||
func (s *PerfEventGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
73
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/stats_test_util.go
generated
vendored
Normal file
73
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/stats_test_util.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
func blkioStatEntryEquals(expected, actual []cgroups.BlkioStatEntry) error {
|
||||
if len(expected) != len(actual) {
|
||||
return fmt.Errorf("blkioStatEntries length do not match")
|
||||
}
|
||||
for i, expValue := range expected {
|
||||
actValue := actual[i]
|
||||
if expValue != actValue {
|
||||
return fmt.Errorf("Expected blkio stat entry %v but found %v", expValue, actValue)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func expectBlkioStatsEquals(t *testing.T, expected, actual cgroups.BlkioStats) {
|
||||
if err := blkioStatEntryEquals(expected.IoServiceBytesRecursive, actual.IoServiceBytesRecursive); err != nil {
|
||||
log.Printf("blkio IoServiceBytesRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.IoServicedRecursive, actual.IoServicedRecursive); err != nil {
|
||||
log.Printf("blkio IoServicedRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.IoQueuedRecursive, actual.IoQueuedRecursive); err != nil {
|
||||
log.Printf("blkio IoQueuedRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.SectorsRecursive, actual.SectorsRecursive); err != nil {
|
||||
log.Printf("blkio SectorsRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func expectThrottlingDataEquals(t *testing.T, expected, actual cgroups.ThrottlingData) {
|
||||
if expected != actual {
|
||||
log.Printf("Expected throttling data %v but found %v\n", expected, actual)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func expectMemoryStatEquals(t *testing.T, expected, actual cgroups.MemoryStats) {
|
||||
if expected.Usage != actual.Usage {
|
||||
log.Printf("Expected memory usage %d but found %d\n", expected.Usage, actual.Usage)
|
||||
t.Fail()
|
||||
}
|
||||
if expected.MaxUsage != actual.MaxUsage {
|
||||
log.Printf("Expected memory max usage %d but found %d\n", expected.MaxUsage, actual.MaxUsage)
|
||||
t.Fail()
|
||||
}
|
||||
for key, expValue := range expected.Stats {
|
||||
actValue, ok := actual.Stats[key]
|
||||
if !ok {
|
||||
log.Printf("Expected memory stat key %s not found\n", key)
|
||||
t.Fail()
|
||||
}
|
||||
if expValue != actValue {
|
||||
log.Printf("Expected memory stat value %d but found %d\n", expValue, actValue)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
60
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/test_util.go
generated
vendored
Normal file
60
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/test_util.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
/*
|
||||
Utility for testing cgroup operations.
|
||||
|
||||
Creates a mock of the cgroup filesystem for the duration of the test.
|
||||
*/
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type cgroupTestUtil struct {
|
||||
// data to use in tests.
|
||||
CgroupData *data
|
||||
|
||||
// Path to the mock cgroup directory.
|
||||
CgroupPath string
|
||||
|
||||
// Temporary directory to store mock cgroup filesystem.
|
||||
tempDir string
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
// Creates a new test util for the specified subsystem
|
||||
func NewCgroupTestUtil(subsystem string, t *testing.T) *cgroupTestUtil {
|
||||
d := &data{}
|
||||
tempDir, err := ioutil.TempDir("", fmt.Sprintf("%s_cgroup_test", subsystem))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
d.root = tempDir
|
||||
testCgroupPath, err := d.path(subsystem)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure the full mock cgroup path exists.
|
||||
err = os.MkdirAll(testCgroupPath, 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return &cgroupTestUtil{CgroupData: d, CgroupPath: testCgroupPath, tempDir: tempDir, t: t}
|
||||
}
|
||||
|
||||
func (c *cgroupTestUtil) cleanup() {
|
||||
os.RemoveAll(c.tempDir)
|
||||
}
|
||||
|
||||
// Write the specified contents on the mock of the specified cgroup files.
|
||||
func (c *cgroupTestUtil) writeFileContents(fileContents map[string]string) {
|
||||
for file, contents := range fileContents {
|
||||
err := writeFile(c.CgroupPath, file, contents)
|
||||
if err != nil {
|
||||
c.t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
40
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils.go
generated
vendored
Normal file
40
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotSupportStat = errors.New("stats are not supported for subsystem")
|
||||
ErrNotValidFormat = errors.New("line is not a valid key value format")
|
||||
)
|
||||
|
||||
// Parses a cgroup param and returns as name, value
|
||||
// i.e. "io_service_bytes 1234" will return as io_service_bytes, 1234
|
||||
func getCgroupParamKeyValue(t string) (string, uint64, error) {
|
||||
parts := strings.Fields(t)
|
||||
switch len(parts) {
|
||||
case 2:
|
||||
value, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return "", 0, fmt.Errorf("Unable to convert param value to uint64: %s", err)
|
||||
}
|
||||
return parts[0], value, nil
|
||||
default:
|
||||
return "", 0, ErrNotValidFormat
|
||||
}
|
||||
}
|
||||
|
||||
// Gets a single int64 value from the specified cgroup file.
|
||||
func getCgroupParamInt(cgroupPath, cgroupFile string) (uint64, error) {
|
||||
contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseUint(strings.TrimSpace(string(contents)), 10, 64)
|
||||
}
|
68
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go
generated
vendored
Normal file
68
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
cgroupFile = "cgroup.file"
|
||||
floatValue = 2048.0
|
||||
floatString = "2048"
|
||||
)
|
||||
|
||||
func TestGetCgroupParamsInt(t *testing.T) {
|
||||
// Setup tempdir.
|
||||
tempDir, err := ioutil.TempDir("", "cgroup_utils_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
tempFile := filepath.Join(tempDir, cgroupFile)
|
||||
|
||||
// Success.
|
||||
err = ioutil.WriteFile(tempFile, []byte(floatString), 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
value, err := getCgroupParamInt(tempDir, cgroupFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if value != floatValue {
|
||||
t.Fatalf("Expected %d to equal %f", value, floatValue)
|
||||
}
|
||||
|
||||
// Success with new line.
|
||||
err = ioutil.WriteFile(tempFile, []byte(floatString+"\n"), 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
value, err = getCgroupParamInt(tempDir, cgroupFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if value != floatValue {
|
||||
t.Fatalf("Expected %d to equal %f", value, floatValue)
|
||||
}
|
||||
|
||||
// Not a float.
|
||||
err = ioutil.WriteFile(tempFile, []byte("not-a-float"), 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = getCgroupParamInt(tempDir, cgroupFile)
|
||||
if err == nil {
|
||||
t.Fatal("Expecting error, got none")
|
||||
}
|
||||
|
||||
// Unknown file.
|
||||
err = os.Remove(tempFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = getCgroupParamInt(tempDir, cgroupFile)
|
||||
if err == nil {
|
||||
t.Fatal("Expecting error, got none")
|
||||
}
|
||||
}
|
67
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/stats.go
generated
vendored
Normal file
67
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/stats.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
package cgroups
|
||||
|
||||
type ThrottlingData struct {
|
||||
// Number of periods with throttling active
|
||||
Periods uint64 `json:"periods,omitempty"`
|
||||
// Number of periods when the container hit its throttling limit.
|
||||
ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
|
||||
// Aggregate time the container was throttled for in nanoseconds.
|
||||
ThrottledTime uint64 `json:"throttled_time,omitempty"`
|
||||
}
|
||||
|
||||
type CpuUsage struct {
|
||||
// percentage of available CPUs currently being used.
|
||||
PercentUsage uint64 `json:"percent_usage,omitempty"`
|
||||
// nanoseconds of cpu time consumed over the last 100 ms.
|
||||
CurrentUsage uint64 `json:"current_usage,omitempty"`
|
||||
// total nanoseconds of cpu time consumed
|
||||
TotalUsage uint64 `json:"total_usage,omitempty"`
|
||||
PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
|
||||
// Time spent by tasks of the cgroup in kernel mode. Units: nanoseconds.
|
||||
UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
|
||||
// Time spent by tasks of the cgroup in user mode. Units: nanoseconds.
|
||||
UsageInUsermode uint64 `json:"usage_in_usermode"`
|
||||
}
|
||||
|
||||
type CpuStats struct {
|
||||
CpuUsage CpuUsage `json:"cpu_usage,omitempty"`
|
||||
ThrottlingData ThrottlingData `json:"throlling_data,omitempty"`
|
||||
}
|
||||
|
||||
type MemoryStats struct {
|
||||
// current res_counter usage for memory
|
||||
Usage uint64 `json:"usage,omitempty"`
|
||||
// maximum usage ever recorded.
|
||||
MaxUsage uint64 `json:"max_usage,omitempty"`
|
||||
// TODO(vishh): Export these as stronger types.
|
||||
// all the stats exported via memory.stat.
|
||||
Stats map[string]uint64 `json:"stats,omitempty"`
|
||||
// number of times memory usage hits limits.
|
||||
Failcnt uint64 `json:"failcnt"`
|
||||
}
|
||||
|
||||
type BlkioStatEntry struct {
|
||||
Major uint64 `json:"major,omitempty"`
|
||||
Minor uint64 `json:"minor,omitempty"`
|
||||
Op string `json:"op,omitempty"`
|
||||
Value uint64 `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
type BlkioStats struct {
|
||||
// number of bytes tranferred to and from the block device
|
||||
IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"`
|
||||
IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recusrive,omitempty"`
|
||||
IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"`
|
||||
SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"`
|
||||
}
|
||||
|
||||
type Stats struct {
|
||||
CpuStats CpuStats `json:"cpu_stats,omitempty"`
|
||||
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
|
||||
BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
|
||||
}
|
||||
|
||||
func NewStats() *Stats {
|
||||
memoryStats := MemoryStats{Stats: make(map[string]uint64)}
|
||||
return &Stats{MemoryStats: memoryStats}
|
||||
}
|
29
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go
generated
vendored
Normal file
29
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// +build !linux
|
||||
|
||||
package systemd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
func UseSystemd() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) {
|
||||
return nil, fmt.Errorf("Systemd not supported")
|
||||
}
|
||||
|
||||
func GetPids(c *cgroups.Cgroup) ([]int, error) {
|
||||
return nil, fmt.Errorf("Systemd not supported")
|
||||
}
|
||||
|
||||
func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error {
|
||||
return fmt.Errorf("Systemd not supported")
|
||||
}
|
||||
|
||||
func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) {
|
||||
return nil, fmt.Errorf("Systemd not supported")
|
||||
}
|
439
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go
generated
vendored
Normal file
439
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go
generated
vendored
Normal file
@ -0,0 +1,439 @@
|
||||
// +build linux
|
||||
|
||||
package systemd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
systemd1 "github.com/coreos/go-systemd/dbus"
|
||||
"github.com/docker/docker/pkg/systemd"
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
"github.com/docker/libcontainer/cgroups/fs"
|
||||
"github.com/godbus/dbus"
|
||||
)
|
||||
|
||||
type systemdCgroup struct {
|
||||
cleanupDirs []string
|
||||
}
|
||||
|
||||
type subsystem interface {
|
||||
GetStats(string, *cgroups.Stats) error
|
||||
}
|
||||
|
||||
var (
|
||||
connLock sync.Mutex
|
||||
theConn *systemd1.Conn
|
||||
hasStartTransientUnit bool
|
||||
subsystems = map[string]subsystem{
|
||||
"devices": &fs.DevicesGroup{},
|
||||
"memory": &fs.MemoryGroup{},
|
||||
"cpu": &fs.CpuGroup{},
|
||||
"cpuset": &fs.CpusetGroup{},
|
||||
"cpuacct": &fs.CpuacctGroup{},
|
||||
"blkio": &fs.BlkioGroup{},
|
||||
"perf_event": &fs.PerfEventGroup{},
|
||||
"freezer": &fs.FreezerGroup{},
|
||||
}
|
||||
)
|
||||
|
||||
func UseSystemd() bool {
|
||||
if !systemd.SdBooted() {
|
||||
return false
|
||||
}
|
||||
|
||||
connLock.Lock()
|
||||
defer connLock.Unlock()
|
||||
|
||||
if theConn == nil {
|
||||
var err error
|
||||
theConn, err = systemd1.New()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Assume we have StartTransientUnit
|
||||
hasStartTransientUnit = true
|
||||
|
||||
// But if we get UnknownMethod error we don't
|
||||
if _, err := theConn.StartTransientUnit("test.scope", "invalid"); err != nil {
|
||||
if dbusError, ok := err.(dbus.Error); ok {
|
||||
if dbusError.Name == "org.freedesktop.DBus.Error.UnknownMethod" {
|
||||
hasStartTransientUnit = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return hasStartTransientUnit
|
||||
}
|
||||
|
||||
func getIfaceForUnit(unitName string) string {
|
||||
if strings.HasSuffix(unitName, ".scope") {
|
||||
return "Scope"
|
||||
}
|
||||
if strings.HasSuffix(unitName, ".service") {
|
||||
return "Service"
|
||||
}
|
||||
return "Unit"
|
||||
}
|
||||
|
||||
type cgroupArg struct {
|
||||
File string
|
||||
Value string
|
||||
}
|
||||
|
||||
func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) {
|
||||
var (
|
||||
unitName = getUnitName(c)
|
||||
slice = "system.slice"
|
||||
properties []systemd1.Property
|
||||
cpuArgs []cgroupArg
|
||||
cpusetArgs []cgroupArg
|
||||
memoryArgs []cgroupArg
|
||||
res systemdCgroup
|
||||
)
|
||||
|
||||
// First set up things not supported by systemd
|
||||
|
||||
// -1 disables memorySwap
|
||||
if c.MemorySwap >= 0 && (c.Memory != 0 || c.MemorySwap > 0) {
|
||||
memorySwap := c.MemorySwap
|
||||
|
||||
if memorySwap == 0 {
|
||||
// By default, MemorySwap is set to twice the size of RAM.
|
||||
memorySwap = c.Memory * 2
|
||||
}
|
||||
|
||||
memoryArgs = append(memoryArgs, cgroupArg{"memory.memsw.limit_in_bytes", strconv.FormatInt(memorySwap, 10)})
|
||||
}
|
||||
|
||||
if c.CpusetCpus != "" {
|
||||
cpusetArgs = append(cpusetArgs, cgroupArg{"cpuset.cpus", c.CpusetCpus})
|
||||
}
|
||||
|
||||
if c.Slice != "" {
|
||||
slice = c.Slice
|
||||
}
|
||||
|
||||
properties = append(properties,
|
||||
systemd1.Property{"Slice", dbus.MakeVariant(slice)},
|
||||
systemd1.Property{"Description", dbus.MakeVariant("docker container " + c.Name)},
|
||||
systemd1.Property{"PIDs", dbus.MakeVariant([]uint32{uint32(pid)})},
|
||||
)
|
||||
|
||||
// Always enable accounting, this gets us the same behaviour as the fs implementation,
|
||||
// plus the kernel has some problems with joining the memory cgroup at a later time.
|
||||
properties = append(properties,
|
||||
systemd1.Property{"MemoryAccounting", dbus.MakeVariant(true)},
|
||||
systemd1.Property{"CPUAccounting", dbus.MakeVariant(true)},
|
||||
systemd1.Property{"BlockIOAccounting", dbus.MakeVariant(true)})
|
||||
|
||||
if c.Memory != 0 {
|
||||
properties = append(properties,
|
||||
systemd1.Property{"MemoryLimit", dbus.MakeVariant(uint64(c.Memory))})
|
||||
}
|
||||
// TODO: MemoryReservation and MemorySwap not available in systemd
|
||||
|
||||
if c.CpuShares != 0 {
|
||||
properties = append(properties,
|
||||
systemd1.Property{"CPUShares", dbus.MakeVariant(uint64(c.CpuShares))})
|
||||
}
|
||||
|
||||
if _, err := theConn.StartTransientUnit(unitName, "replace", properties...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// To work around the lack of /dev/pts/* support above we need to manually add these
|
||||
// so, ask systemd for the cgroup used
|
||||
props, err := theConn.GetUnitTypeProperties(unitName, getIfaceForUnit(unitName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cgroup := props["ControlGroup"].(string)
|
||||
|
||||
if !c.AllowAllDevices {
|
||||
// Atm we can't use the systemd device support because of two missing things:
|
||||
// * Support for wildcards to allow mknod on any device
|
||||
// * Support for wildcards to allow /dev/pts support
|
||||
//
|
||||
// The second is available in more recent systemd as "char-pts", but not in e.g. v208 which is
|
||||
// in wide use. When both these are availalable we will be able to switch, but need to keep the old
|
||||
// implementation for backwards compat.
|
||||
//
|
||||
// Note: we can't use systemd to set up the initial limits, and then change the cgroup
|
||||
// because systemd will re-write the device settings if it needs to re-apply the cgroup context.
|
||||
// This happens at least for v208 when any sibling unit is started.
|
||||
|
||||
mountpoint, err := cgroups.FindCgroupMountpoint("devices")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
initPath, err := cgroups.GetInitCgroupDir("devices")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dir := filepath.Join(mountpoint, initPath, c.Parent, c.Name)
|
||||
|
||||
res.cleanupDirs = append(res.cleanupDirs, dir)
|
||||
|
||||
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filepath.Join(dir, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := writeFile(dir, "devices.deny", "a"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, dev := range c.AllowedDevices {
|
||||
if err := writeFile(dir, "devices.allow", dev.GetCgroupAllowString()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(cpuArgs) != 0 {
|
||||
mountpoint, err := cgroups.FindCgroupMountpoint("cpu")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
path := filepath.Join(mountpoint, cgroup)
|
||||
|
||||
for _, arg := range cpuArgs {
|
||||
if err := ioutil.WriteFile(filepath.Join(path, arg.File), []byte(arg.Value), 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(memoryArgs) != 0 {
|
||||
mountpoint, err := cgroups.FindCgroupMountpoint("memory")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
path := filepath.Join(mountpoint, cgroup)
|
||||
|
||||
for _, arg := range memoryArgs {
|
||||
if err := ioutil.WriteFile(filepath.Join(path, arg.File), []byte(arg.Value), 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// we need to manually join the freezer cgroup in systemd because it does not currently support it
|
||||
// via the dbus api
|
||||
freezerPath, err := joinFreezer(c, pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res.cleanupDirs = append(res.cleanupDirs, freezerPath)
|
||||
|
||||
if len(cpusetArgs) != 0 {
|
||||
// systemd does not atm set up the cpuset controller, so we must manually
|
||||
// join it. Additionally that is a very finicky controller where each
|
||||
// level must have a full setup as the default for a new directory is "no cpus",
|
||||
// so we avoid using any hierarchies here, creating a toplevel directory.
|
||||
mountpoint, err := cgroups.FindCgroupMountpoint("cpuset")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
initPath, err := cgroups.GetInitCgroupDir("cpuset")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
foundCpus bool
|
||||
foundMems bool
|
||||
|
||||
rootPath = filepath.Join(mountpoint, initPath)
|
||||
path = filepath.Join(mountpoint, initPath, c.Parent+"-"+c.Name)
|
||||
)
|
||||
|
||||
res.cleanupDirs = append(res.cleanupDirs, path)
|
||||
|
||||
if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, arg := range cpusetArgs {
|
||||
if arg.File == "cpuset.cpus" {
|
||||
foundCpus = true
|
||||
}
|
||||
if arg.File == "cpuset.mems" {
|
||||
foundMems = true
|
||||
}
|
||||
if err := ioutil.WriteFile(filepath.Join(path, arg.File), []byte(arg.Value), 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// These are required, if not specified inherit from parent
|
||||
if !foundCpus {
|
||||
s, err := ioutil.ReadFile(filepath.Join(rootPath, "cpuset.cpus"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filepath.Join(path, "cpuset.cpus"), s, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// These are required, if not specified inherit from parent
|
||||
if !foundMems {
|
||||
s, err := ioutil.ReadFile(filepath.Join(rootPath, "cpuset.mems"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filepath.Join(path, "cpuset.mems"), s, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func writeFile(dir, file, data string) error {
|
||||
return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700)
|
||||
}
|
||||
|
||||
func (c *systemdCgroup) Cleanup() error {
|
||||
// systemd cleans up, we don't need to do much
|
||||
|
||||
for _, path := range c.cleanupDirs {
|
||||
os.RemoveAll(path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func joinFreezer(c *cgroups.Cgroup, pid int) (string, error) {
|
||||
path, err := getSubsystemPath(c, "freezer")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func getSubsystemPath(c *cgroups.Cgroup, subsystem string) (string, error) {
|
||||
mountpoint, err := cgroups.FindCgroupMountpoint(subsystem)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
initPath, err := cgroups.GetInitCgroupDir(subsystem)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
slice := "system.slice"
|
||||
if c.Slice != "" {
|
||||
slice = c.Slice
|
||||
}
|
||||
|
||||
return filepath.Join(mountpoint, initPath, slice, getUnitName(c)), nil
|
||||
}
|
||||
|
||||
func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error {
|
||||
path, err := getSubsystemPath(c, "freezer")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filepath.Join(path, "freezer.state"), []byte(state), 0); err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
state_, err := ioutil.ReadFile(filepath.Join(path, "freezer.state"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if string(state) == string(bytes.TrimSpace(state_)) {
|
||||
break
|
||||
}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetPids(c *cgroups.Cgroup) ([]int, error) {
|
||||
unitName := getUnitName(c)
|
||||
|
||||
mountpoint, err := cgroups.FindCgroupMountpoint("cpu")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
props, err := theConn.GetUnitTypeProperties(unitName, getIfaceForUnit(unitName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cgroup := props["ControlGroup"].(string)
|
||||
|
||||
return cgroups.ReadProcsFile(filepath.Join(mountpoint, cgroup))
|
||||
}
|
||||
|
||||
func getUnitName(c *cgroups.Cgroup) string {
|
||||
return fmt.Sprintf("%s-%s.scope", c.Parent, c.Name)
|
||||
}
|
||||
|
||||
/*
|
||||
* This would be nicer to get from the systemd API when accounting
|
||||
* is enabled, but sadly there is no way to do that yet.
|
||||
* The lack of this functionality in the API & the approach taken
|
||||
* is guided by
|
||||
* http://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#readingaccountinginformation.
|
||||
*/
|
||||
func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) {
|
||||
stats := cgroups.NewStats()
|
||||
|
||||
for sysname, sys := range subsystems {
|
||||
subsystemPath, err := getSubsystemPath(c, sysname)
|
||||
if err != nil {
|
||||
// Don't fail if a cgroup hierarchy was not found, just skip this subsystem
|
||||
if err == cgroups.ErrNotFound {
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := sys.GetStats(subsystemPath, stats); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
168
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/utils.go
generated
vendored
Normal file
168
Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/utils.go
generated
vendored
Normal file
@ -0,0 +1,168 @@
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
)
|
||||
|
||||
// https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt
|
||||
func FindCgroupMountpoint(subsystem string) (string, error) {
|
||||
mounts, err := mount.GetMounts()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, mount := range mounts {
|
||||
if mount.Fstype == "cgroup" {
|
||||
for _, opt := range strings.Split(mount.VfsOpts, ",") {
|
||||
if opt == subsystem {
|
||||
return mount.Mountpoint, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", ErrNotFound
|
||||
}
|
||||
|
||||
type Mount struct {
|
||||
Mountpoint string
|
||||
Subsystems []string
|
||||
}
|
||||
|
||||
func (m Mount) GetThisCgroupDir() (string, error) {
|
||||
if len(m.Subsystems) == 0 {
|
||||
return "", fmt.Errorf("no subsystem for mount")
|
||||
}
|
||||
|
||||
return GetThisCgroupDir(m.Subsystems[0])
|
||||
}
|
||||
|
||||
func GetCgroupMounts() ([]Mount, error) {
|
||||
mounts, err := mount.GetMounts()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
all, err := GetAllSubsystems()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allMap := make(map[string]bool)
|
||||
for _, s := range all {
|
||||
allMap[s] = true
|
||||
}
|
||||
|
||||
res := []Mount{}
|
||||
for _, mount := range mounts {
|
||||
if mount.Fstype == "cgroup" {
|
||||
m := Mount{Mountpoint: mount.Mountpoint}
|
||||
|
||||
for _, opt := range strings.Split(mount.VfsOpts, ",") {
|
||||
if strings.HasPrefix(opt, "name=") {
|
||||
m.Subsystems = append(m.Subsystems, opt)
|
||||
}
|
||||
if allMap[opt] {
|
||||
m.Subsystems = append(m.Subsystems, opt)
|
||||
}
|
||||
}
|
||||
res = append(res, m)
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Returns all the cgroup subsystems supported by the kernel
|
||||
func GetAllSubsystems() ([]string, error) {
|
||||
f, err := os.Open("/proc/cgroups")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
subsystems := []string{}
|
||||
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
text := s.Text()
|
||||
if text[0] != '#' {
|
||||
parts := strings.Fields(text)
|
||||
if len(parts) >= 4 && parts[3] != "0" {
|
||||
subsystems = append(subsystems, parts[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
return subsystems, nil
|
||||
}
|
||||
|
||||
// Returns the relative path to the cgroup docker is running in.
|
||||
func GetThisCgroupDir(subsystem string) (string, error) {
|
||||
f, err := os.Open("/proc/self/cgroup")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseCgroupFile(subsystem, f)
|
||||
}
|
||||
|
||||
func GetInitCgroupDir(subsystem string) (string, error) {
|
||||
f, err := os.Open("/proc/1/cgroup")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseCgroupFile(subsystem, f)
|
||||
}
|
||||
|
||||
func ReadProcsFile(dir string) ([]int, error) {
|
||||
f, err := os.Open(filepath.Join(dir, "cgroup.procs"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var (
|
||||
s = bufio.NewScanner(f)
|
||||
out = []int{}
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
if t := s.Text(); t != "" {
|
||||
pid, err := strconv.Atoi(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, pid)
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func parseCgroupFile(subsystem string, r io.Reader) (string, error) {
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
text := s.Text()
|
||||
parts := strings.Split(text, ":")
|
||||
for _, subs := range strings.Split(parts[1], ",") {
|
||||
if subs == subsystem {
|
||||
return parts[2], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", ErrNotFound
|
||||
}
|
86
Godeps/_workspace/src/github.com/docker/libcontainer/config.go
generated
vendored
Normal file
86
Godeps/_workspace/src/github.com/docker/libcontainer/config.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
"github.com/docker/libcontainer/mount"
|
||||
"github.com/docker/libcontainer/network"
|
||||
)
|
||||
|
||||
type MountConfig mount.MountConfig
|
||||
|
||||
type Network network.Network
|
||||
|
||||
// Config defines configuration options for executing a process inside a contained environment.
|
||||
type Config struct {
|
||||
// Mount specific options.
|
||||
MountConfig *MountConfig `json:"mount_config,omitempty"`
|
||||
|
||||
// Hostname optionally sets the container's hostname if provided
|
||||
Hostname string `json:"hostname,omitempty"`
|
||||
|
||||
// User will set the uid and gid of the executing process running inside the container
|
||||
User string `json:"user,omitempty"`
|
||||
|
||||
// WorkingDir will change the processes current working directory inside the container's rootfs
|
||||
WorkingDir string `json:"working_dir,omitempty"`
|
||||
|
||||
// Env will populate the processes environment with the provided values
|
||||
// Any values from the parent processes will be cleared before the values
|
||||
// provided in Env are provided to the process
|
||||
Env []string `json:"environment,omitempty"`
|
||||
|
||||
// Tty when true will allocate a pty slave on the host for access by the container's process
|
||||
// and ensure that it is mounted inside the container's rootfs
|
||||
Tty bool `json:"tty,omitempty"`
|
||||
|
||||
// Namespaces specifies the container's namespaces that it should setup when cloning the init process
|
||||
// If a namespace is not provided that namespace is shared from the container's parent process
|
||||
Namespaces map[string]bool `json:"namespaces,omitempty"`
|
||||
|
||||
// Capabilities specify the capabilities to keep when executing the process inside the container
|
||||
// All capbilities not specified will be dropped from the processes capability mask
|
||||
Capabilities []string `json:"capabilities,omitempty"`
|
||||
|
||||
// Networks specifies the container's network setup to be created
|
||||
Networks []*Network `json:"networks,omitempty"`
|
||||
|
||||
// Routes can be specified to create entries in the route table as the container is started
|
||||
Routes []*Route `json:"routes,omitempty"`
|
||||
|
||||
// Cgroups specifies specific cgroup settings for the various subsystems that the container is
|
||||
// placed into to limit the resources the container has available
|
||||
Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"`
|
||||
|
||||
// AppArmorProfile specifies the profile to apply to the process running in the container and is
|
||||
// change at the time the process is execed
|
||||
AppArmorProfile string `json:"apparmor_profile,omitempty"`
|
||||
|
||||
// ProcessLabel specifies the label to apply to the process running in the container. It is
|
||||
// commonly used by selinux
|
||||
ProcessLabel string `json:"process_label,omitempty"`
|
||||
|
||||
// RestrictSys will remount /proc/sys, /sys, and mask over sysrq-trigger as well as /proc/irq and
|
||||
// /proc/bus
|
||||
RestrictSys bool `json:"restrict_sys,omitempty"`
|
||||
}
|
||||
|
||||
// Routes can be specified to create entries in the route table as the container is started
|
||||
//
|
||||
// All of destination, source, and gateway should be either IPv4 or IPv6.
|
||||
// One of the three options must be present, and ommitted entries will use their
|
||||
// IP family default for the route table. For IPv4 for example, setting the
|
||||
// gateway to 1.2.3.4 and the interface to eth0 will set up a standard
|
||||
// destination of 0.0.0.0(or *) when viewed in the route table.
|
||||
type Route struct {
|
||||
// Sets the destination and mask, should be a CIDR. Accepts IPv4 and IPv6
|
||||
Destination string `json:"destination,omitempty"`
|
||||
|
||||
// Sets the source and mask, should be a CIDR. Accepts IPv4 and IPv6
|
||||
Source string `json:"source,omitempty"`
|
||||
|
||||
// Sets the gateway. Accepts IPv4 and IPv6
|
||||
Gateway string `json:"gateway,omitempty"`
|
||||
|
||||
// The device to set this route up for, for example: eth0
|
||||
InterfaceName string `json:"interface_name,omitempty"`
|
||||
}
|
160
Godeps/_workspace/src/github.com/docker/libcontainer/config_test.go
generated
vendored
Normal file
160
Godeps/_workspace/src/github.com/docker/libcontainer/config_test.go
generated
vendored
Normal file
@ -0,0 +1,160 @@
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/libcontainer/devices"
|
||||
)
|
||||
|
||||
// Checks whether the expected capability is specified in the capabilities.
|
||||
func contains(expected string, values []string) bool {
|
||||
for _, v := range values {
|
||||
if v == expected {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func containsDevice(expected *devices.Device, values []*devices.Device) bool {
|
||||
for _, d := range values {
|
||||
if d.Path == expected.Path &&
|
||||
d.CgroupPermissions == expected.CgroupPermissions &&
|
||||
d.FileMode == expected.FileMode &&
|
||||
d.MajorNumber == expected.MajorNumber &&
|
||||
d.MinorNumber == expected.MinorNumber &&
|
||||
d.Type == expected.Type {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func loadConfig(name string) (*Config, error) {
|
||||
f, err := os.Open(filepath.Join("sample_configs", name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var container *Config
|
||||
if err := json.NewDecoder(f).Decode(&container); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return container, nil
|
||||
}
|
||||
|
||||
func TestConfigJsonFormat(t *testing.T) {
|
||||
container, err := loadConfig("attach_to_bridge.json")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if container.Hostname != "koye" {
|
||||
t.Log("hostname is not set")
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if !container.Tty {
|
||||
t.Log("tty should be set to true")
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if !container.Namespaces["NEWNET"] {
|
||||
t.Log("namespaces should contain NEWNET")
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if container.Namespaces["NEWUSER"] {
|
||||
t.Log("namespaces should not contain NEWUSER")
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if contains("SYS_ADMIN", container.Capabilities) {
|
||||
t.Log("SYS_ADMIN should not be enabled in capabilities mask")
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if !contains("MKNOD", container.Capabilities) {
|
||||
t.Log("MKNOD should be enabled in capabilities mask")
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if !contains("SYS_CHROOT", container.Capabilities) {
|
||||
t.Log("capabilities mask should contain SYS_CHROOT")
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
for _, n := range container.Networks {
|
||||
if n.Type == "veth" {
|
||||
if n.Bridge != "docker0" {
|
||||
t.Logf("veth bridge should be docker0 but received %q", n.Bridge)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if n.Address != "172.17.0.101/16" {
|
||||
t.Logf("veth address should be 172.17.0.101/61 but received %q", n.Address)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if n.VethPrefix != "veth" {
|
||||
t.Logf("veth prefix should be veth but received %q", n.VethPrefix)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if n.Gateway != "172.17.42.1" {
|
||||
t.Logf("veth gateway should be 172.17.42.1 but received %q", n.Gateway)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if n.Mtu != 1500 {
|
||||
t.Logf("veth mtu should be 1500 but received %d", n.Mtu)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, d := range devices.DefaultSimpleDevices {
|
||||
if !containsDevice(d, container.MountConfig.DeviceNodes) {
|
||||
t.Logf("expected device configuration for %s", d.Path)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
if !container.RestrictSys {
|
||||
t.Log("expected restrict sys to be true")
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestApparmorProfile(t *testing.T) {
|
||||
container, err := loadConfig("apparmor.json")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if container.AppArmorProfile != "docker-default" {
|
||||
t.Fatalf("expected apparmor profile to be docker-default but received %q", container.AppArmorProfile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelinuxLabels(t *testing.T) {
|
||||
container, err := loadConfig("selinux.json")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
label := "system_u:system_r:svirt_lxc_net_t:s0:c164,c475"
|
||||
|
||||
if container.ProcessLabel != label {
|
||||
t.Fatalf("expected process label %q but received %q", label, container.ProcessLabel)
|
||||
}
|
||||
if container.MountConfig.MountLabel != label {
|
||||
t.Fatalf("expected mount label %q but received %q", label, container.MountConfig.MountLabel)
|
||||
}
|
||||
}
|
128
Godeps/_workspace/src/github.com/docker/libcontainer/console/console.go
generated
vendored
Normal file
128
Godeps/_workspace/src/github.com/docker/libcontainer/console/console.go
generated
vendored
Normal file
@ -0,0 +1,128 @@
|
||||
// +build linux
|
||||
|
||||
package console
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/docker/libcontainer/label"
|
||||
)
|
||||
|
||||
// Setup initializes the proper /dev/console inside the rootfs path
|
||||
func Setup(rootfs, consolePath, mountLabel string) error {
|
||||
oldMask := syscall.Umask(0000)
|
||||
defer syscall.Umask(oldMask)
|
||||
|
||||
if err := os.Chmod(consolePath, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Chown(consolePath, 0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := label.SetFileLabel(consolePath, mountLabel); err != nil {
|
||||
return fmt.Errorf("set file label %s %s", consolePath, err)
|
||||
}
|
||||
|
||||
dest := filepath.Join(rootfs, "dev/console")
|
||||
|
||||
f, err := os.Create(dest)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return fmt.Errorf("create %s %s", dest, err)
|
||||
}
|
||||
|
||||
if f != nil {
|
||||
f.Close()
|
||||
}
|
||||
|
||||
if err := syscall.Mount(consolePath, dest, "bind", syscall.MS_BIND, ""); err != nil {
|
||||
return fmt.Errorf("bind %s to %s %s", consolePath, dest, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func OpenAndDup(consolePath string) error {
|
||||
slave, err := OpenTerminal(consolePath, syscall.O_RDWR)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open terminal %s", err)
|
||||
}
|
||||
|
||||
if err := syscall.Dup2(int(slave.Fd()), 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := syscall.Dup2(int(slave.Fd()), 1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return syscall.Dup2(int(slave.Fd()), 2)
|
||||
}
|
||||
|
||||
// Unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
||||
// Unlockpt should be called before opening the slave side of a pseudoterminal.
|
||||
func Unlockpt(f *os.File) error {
|
||||
var u int
|
||||
|
||||
return Ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u)))
|
||||
}
|
||||
|
||||
// Ptsname retrieves the name of the first available pts for the given master.
|
||||
func Ptsname(f *os.File) (string, error) {
|
||||
var n int
|
||||
|
||||
if err := Ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprintf("/dev/pts/%d", n), nil
|
||||
}
|
||||
|
||||
// CreateMasterAndConsole will open /dev/ptmx on the host and retreive the
|
||||
// pts name for use as the pty slave inside the container
|
||||
func CreateMasterAndConsole() (*os.File, string, error) {
|
||||
master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
console, err := Ptsname(master)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
if err := Unlockpt(master); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return master, console, nil
|
||||
}
|
||||
|
||||
// OpenPtmx opens /dev/ptmx, i.e. the PTY master.
|
||||
func OpenPtmx() (*os.File, error) {
|
||||
// O_NOCTTY and O_CLOEXEC are not present in os package so we use the syscall's one for all.
|
||||
return os.OpenFile("/dev/ptmx", syscall.O_RDONLY|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0)
|
||||
}
|
||||
|
||||
// OpenTerminal is a clone of os.OpenFile without the O_CLOEXEC
|
||||
// used to open the pty slave inside the container namespace
|
||||
func OpenTerminal(name string, flag int) (*os.File, error) {
|
||||
r, e := syscall.Open(name, flag, 0)
|
||||
if e != nil {
|
||||
return nil, &os.PathError{"open", name, e}
|
||||
}
|
||||
return os.NewFile(uintptr(r), name), nil
|
||||
}
|
||||
|
||||
func Ioctl(fd uintptr, flag, data uintptr) error {
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
71
Godeps/_workspace/src/github.com/docker/libcontainer/container.go
generated
vendored
Normal file
71
Godeps/_workspace/src/github.com/docker/libcontainer/container.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
NOTE: The API is in flux and mainly not implemented. Proceed with caution until further notice.
|
||||
*/
|
||||
package libcontainer
|
||||
|
||||
// A libcontainer container object.
|
||||
//
|
||||
// Each container is thread-safe within the same process. Since a container can
|
||||
// be destroyed by a separate process, any function may return that the container
|
||||
// was not found.
|
||||
type Container interface {
|
||||
// Returns the path to the container which contains the state
|
||||
Path() string
|
||||
|
||||
// Returns the current run state of the container.
|
||||
//
|
||||
// Errors: container no longer exists,
|
||||
// system error.
|
||||
RunState() (*RunState, error)
|
||||
|
||||
// Returns the current config of the container.
|
||||
Config() *Config
|
||||
|
||||
// Start a process inside the container. Returns the PID of the new process (in the caller process's namespace) and a channel that will return the exit status of the process whenever it dies.
|
||||
//
|
||||
// Errors: container no longer exists,
|
||||
// config is invalid,
|
||||
// container is paused,
|
||||
// system error.
|
||||
Start(*ProcessConfig) (pid int, exitChan chan int, err error)
|
||||
|
||||
// Destroys the container after killing all running processes.
|
||||
//
|
||||
// Any event registrations are removed before the container is destroyed.
|
||||
// No error is returned if the container is already destroyed.
|
||||
//
|
||||
// Errors: system error.
|
||||
Destroy() error
|
||||
|
||||
// Returns the PIDs inside this container. The PIDs are in the namespace of the calling process.
|
||||
//
|
||||
// Errors: container no longer exists,
|
||||
// system error.
|
||||
//
|
||||
// Some of the returned PIDs may no longer refer to processes in the Container, unless
|
||||
// the Container state is PAUSED in which case every PID in the slice is valid.
|
||||
Processes() ([]int, error)
|
||||
|
||||
// Returns statistics for the container.
|
||||
//
|
||||
// Errors: container no longer exists,
|
||||
// system error.
|
||||
Stats() (*ContainerStats, error)
|
||||
|
||||
// If the Container state is RUNNING or PAUSING, sets the Container state to PAUSING and pauses
|
||||
// the execution of any user processes. Asynchronously, when the container finished being paused the
|
||||
// state is changed to PAUSED.
|
||||
// If the Container state is PAUSED, do nothing.
|
||||
//
|
||||
// Errors: container no longer exists,
|
||||
// system error.
|
||||
Pause() error
|
||||
|
||||
// If the Container state is PAUSED, resumes the execution of any user processes in the
|
||||
// Container before setting the Container state to RUNNING.
|
||||
// If the Container state is RUNNING, do nothing.
|
||||
//
|
||||
// Errors: container no longer exists,
|
||||
// system error.
|
||||
Resume() error
|
||||
}
|
159
Godeps/_workspace/src/github.com/docker/libcontainer/devices/defaults.go
generated
vendored
Normal file
159
Godeps/_workspace/src/github.com/docker/libcontainer/devices/defaults.go
generated
vendored
Normal file
@ -0,0 +1,159 @@
|
||||
package devices
|
||||
|
||||
var (
|
||||
// These are devices that are to be both allowed and created.
|
||||
|
||||
DefaultSimpleDevices = []*Device{
|
||||
// /dev/null and zero
|
||||
{
|
||||
Path: "/dev/null",
|
||||
Type: 'c',
|
||||
MajorNumber: 1,
|
||||
MinorNumber: 3,
|
||||
CgroupPermissions: "rwm",
|
||||
FileMode: 0666,
|
||||
},
|
||||
{
|
||||
Path: "/dev/zero",
|
||||
Type: 'c',
|
||||
MajorNumber: 1,
|
||||
MinorNumber: 5,
|
||||
CgroupPermissions: "rwm",
|
||||
FileMode: 0666,
|
||||
},
|
||||
|
||||
{
|
||||
Path: "/dev/full",
|
||||
Type: 'c',
|
||||
MajorNumber: 1,
|
||||
MinorNumber: 7,
|
||||
CgroupPermissions: "rwm",
|
||||
FileMode: 0666,
|
||||
},
|
||||
|
||||
// consoles and ttys
|
||||
{
|
||||
Path: "/dev/tty",
|
||||
Type: 'c',
|
||||
MajorNumber: 5,
|
||||
MinorNumber: 0,
|
||||
CgroupPermissions: "rwm",
|
||||
FileMode: 0666,
|
||||
},
|
||||
|
||||
// /dev/urandom,/dev/random
|
||||
{
|
||||
Path: "/dev/urandom",
|
||||
Type: 'c',
|
||||
MajorNumber: 1,
|
||||
MinorNumber: 9,
|
||||
CgroupPermissions: "rwm",
|
||||
FileMode: 0666,
|
||||
},
|
||||
{
|
||||
Path: "/dev/random",
|
||||
Type: 'c',
|
||||
MajorNumber: 1,
|
||||
MinorNumber: 8,
|
||||
CgroupPermissions: "rwm",
|
||||
FileMode: 0666,
|
||||
},
|
||||
}
|
||||
|
||||
DefaultAllowedDevices = append([]*Device{
|
||||
// allow mknod for any device
|
||||
{
|
||||
Type: 'c',
|
||||
MajorNumber: Wildcard,
|
||||
MinorNumber: Wildcard,
|
||||
CgroupPermissions: "m",
|
||||
},
|
||||
{
|
||||
Type: 'b',
|
||||
MajorNumber: Wildcard,
|
||||
MinorNumber: Wildcard,
|
||||
CgroupPermissions: "m",
|
||||
},
|
||||
|
||||
{
|
||||
Path: "/dev/console",
|
||||
Type: 'c',
|
||||
MajorNumber: 5,
|
||||
MinorNumber: 1,
|
||||
CgroupPermissions: "rwm",
|
||||
},
|
||||
{
|
||||
Path: "/dev/tty0",
|
||||
Type: 'c',
|
||||
MajorNumber: 4,
|
||||
MinorNumber: 0,
|
||||
CgroupPermissions: "rwm",
|
||||
},
|
||||
{
|
||||
Path: "/dev/tty1",
|
||||
Type: 'c',
|
||||
MajorNumber: 4,
|
||||
MinorNumber: 1,
|
||||
CgroupPermissions: "rwm",
|
||||
},
|
||||
// /dev/pts/ - pts namespaces are "coming soon"
|
||||
{
|
||||
Path: "",
|
||||
Type: 'c',
|
||||
MajorNumber: 136,
|
||||
MinorNumber: Wildcard,
|
||||
CgroupPermissions: "rwm",
|
||||
},
|
||||
{
|
||||
Path: "",
|
||||
Type: 'c',
|
||||
MajorNumber: 5,
|
||||
MinorNumber: 2,
|
||||
CgroupPermissions: "rwm",
|
||||
},
|
||||
|
||||
// tuntap
|
||||
{
|
||||
Path: "",
|
||||
Type: 'c',
|
||||
MajorNumber: 10,
|
||||
MinorNumber: 200,
|
||||
CgroupPermissions: "rwm",
|
||||
},
|
||||
|
||||
/*// fuse
|
||||
{
|
||||
Path: "",
|
||||
Type: 'c',
|
||||
MajorNumber: 10,
|
||||
MinorNumber: 229,
|
||||
CgroupPermissions: "rwm",
|
||||
},
|
||||
|
||||
// rtc
|
||||
{
|
||||
Path: "",
|
||||
Type: 'c',
|
||||
MajorNumber: 254,
|
||||
MinorNumber: 0,
|
||||
CgroupPermissions: "rwm",
|
||||
},
|
||||
*/
|
||||
}, DefaultSimpleDevices...)
|
||||
|
||||
DefaultAutoCreatedDevices = append([]*Device{
|
||||
{
|
||||
// /dev/fuse is created but not allowed.
|
||||
// This is to allow java to work. Because java
|
||||
// Insists on there being a /dev/fuse
|
||||
// https://github.com/docker/docker/issues/514
|
||||
// https://github.com/docker/docker/issues/2393
|
||||
//
|
||||
Path: "/dev/fuse",
|
||||
Type: 'c',
|
||||
MajorNumber: 10,
|
||||
MinorNumber: 229,
|
||||
CgroupPermissions: "rwm",
|
||||
},
|
||||
}, DefaultSimpleDevices...)
|
||||
)
|
119
Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices.go
generated
vendored
Normal file
119
Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
package devices
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
Wildcard = -1
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotADeviceNode = errors.New("not a device node")
|
||||
)
|
||||
|
||||
type Device struct {
|
||||
Type rune `json:"type,omitempty"`
|
||||
Path string `json:"path,omitempty"` // It is fine if this is an empty string in the case that you are using Wildcards
|
||||
MajorNumber int64 `json:"major_number,omitempty"` // Use the wildcard constant for wildcards.
|
||||
MinorNumber int64 `json:"minor_number,omitempty"` // Use the wildcard constant for wildcards.
|
||||
CgroupPermissions string `json:"cgroup_permissions,omitempty"` // Typically just "rwm"
|
||||
FileMode os.FileMode `json:"file_mode,omitempty"` // The permission bits of the file's mode
|
||||
}
|
||||
|
||||
func GetDeviceNumberString(deviceNumber int64) string {
|
||||
if deviceNumber == Wildcard {
|
||||
return "*"
|
||||
} else {
|
||||
return fmt.Sprintf("%d", deviceNumber)
|
||||
}
|
||||
}
|
||||
|
||||
func (device *Device) GetCgroupAllowString() string {
|
||||
return fmt.Sprintf("%c %s:%s %s", device.Type, GetDeviceNumberString(device.MajorNumber), GetDeviceNumberString(device.MinorNumber), device.CgroupPermissions)
|
||||
}
|
||||
|
||||
// Given the path to a device and it's cgroup_permissions(which cannot be easilly queried) look up the information about a linux device and return that information as a Device struct.
|
||||
func GetDevice(path, cgroupPermissions string) (*Device, error) {
|
||||
fileInfo, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
devType rune
|
||||
mode = fileInfo.Mode()
|
||||
fileModePermissionBits = os.FileMode.Perm(mode)
|
||||
)
|
||||
|
||||
switch {
|
||||
case mode&os.ModeDevice == 0:
|
||||
return nil, ErrNotADeviceNode
|
||||
case mode&os.ModeCharDevice != 0:
|
||||
fileModePermissionBits |= syscall.S_IFCHR
|
||||
devType = 'c'
|
||||
default:
|
||||
fileModePermissionBits |= syscall.S_IFBLK
|
||||
devType = 'b'
|
||||
}
|
||||
|
||||
stat_t, ok := fileInfo.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot determine the device number for device %s", path)
|
||||
}
|
||||
devNumber := int(stat_t.Rdev)
|
||||
|
||||
return &Device{
|
||||
Type: devType,
|
||||
Path: path,
|
||||
MajorNumber: Major(devNumber),
|
||||
MinorNumber: Minor(devNumber),
|
||||
CgroupPermissions: cgroupPermissions,
|
||||
FileMode: fileModePermissionBits,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func GetHostDeviceNodes() ([]*Device, error) {
|
||||
return getDeviceNodes("/dev")
|
||||
}
|
||||
|
||||
func getDeviceNodes(path string) ([]*Device, error) {
|
||||
files, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := []*Device{}
|
||||
for _, f := range files {
|
||||
if f.IsDir() {
|
||||
switch f.Name() {
|
||||
case "pts", "shm", "fd":
|
||||
continue
|
||||
default:
|
||||
sub, err := getDeviceNodes(filepath.Join(path, f.Name()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out = append(out, sub...)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
device, err := GetDevice(filepath.Join(path, f.Name()), "rwm")
|
||||
if err != nil {
|
||||
if err == ErrNotADeviceNode {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, device)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
26
Godeps/_workspace/src/github.com/docker/libcontainer/devices/number.go
generated
vendored
Normal file
26
Godeps/_workspace/src/github.com/docker/libcontainer/devices/number.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package devices
|
||||
|
||||
/*
|
||||
|
||||
This code provides support for manipulating linux device numbers. It should be replaced by normal syscall functions once http://code.google.com/p/go/issues/detail?id=8106 is solved.
|
||||
|
||||
You can read what they are here:
|
||||
|
||||
- http://www.makelinux.net/ldd3/chp-3-sect-2
|
||||
- http://www.linux-tutorial.info/modules.php?name=MContent&pageid=94
|
||||
|
||||
Note! These are NOT the same as the MAJOR(dev_t device);, MINOR(dev_t device); and MKDEV(int major, int minor); functions as defined in <linux/kdev_t.h> as the representation of device numbers used by go is different than the one used internally to the kernel! - https://github.com/torvalds/linux/blob/master/include/linux/kdev_t.h#L9
|
||||
|
||||
*/
|
||||
|
||||
func Major(devNumber int) int64 {
|
||||
return int64((devNumber >> 8) & 0xfff)
|
||||
}
|
||||
|
||||
func Minor(devNumber int) int64 {
|
||||
return int64((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00))
|
||||
}
|
||||
|
||||
func Mkdev(majorNumber int64, minorNumber int64) int {
|
||||
return int((majorNumber << 8) | (minorNumber & 0xff) | ((minorNumber & 0xfff00) << 12))
|
||||
}
|
25
Godeps/_workspace/src/github.com/docker/libcontainer/factory.go
generated
vendored
Normal file
25
Godeps/_workspace/src/github.com/docker/libcontainer/factory.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
package libcontainer
|
||||
|
||||
type Factory interface {
|
||||
// Creates a new container in the given path. A unique ID is generated for the container and
|
||||
// starts the initial process inside the container.
|
||||
//
|
||||
// Returns the new container with a running process.
|
||||
//
|
||||
// Errors:
|
||||
// Path already exists
|
||||
// Config or initialConfig is invalid
|
||||
// System error
|
||||
//
|
||||
// On error, any partially created container parts are cleaned up (the operation is atomic).
|
||||
Create(path string, config *Config) (Container, error)
|
||||
|
||||
// Load takes the path for an existing container and reconstructs the container
|
||||
// from the state.
|
||||
//
|
||||
// Errors:
|
||||
// Path does not exist
|
||||
// Container is stopped
|
||||
// System error
|
||||
Load(path string) (Container, error)
|
||||
}
|
34
Godeps/_workspace/src/github.com/docker/libcontainer/label/label.go
generated
vendored
Normal file
34
Godeps/_workspace/src/github.com/docker/libcontainer/label/label.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
// +build !selinux !linux
|
||||
|
||||
package label
|
||||
|
||||
func GenLabels(options string) (string, string, error) {
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
func FormatMountLabel(src string, mountLabel string) string {
|
||||
return src
|
||||
}
|
||||
|
||||
func SetProcessLabel(processLabel string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetFileLabel(path string, fileLabel string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func Relabel(path string, fileLabel string, relabel string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetPidCon(pid int) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func Init() {
|
||||
}
|
||||
|
||||
func ReserveLabel(label string) error {
|
||||
return nil
|
||||
}
|
100
Godeps/_workspace/src/github.com/docker/libcontainer/label/label_selinux.go
generated
vendored
Normal file
100
Godeps/_workspace/src/github.com/docker/libcontainer/label/label_selinux.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
// +build selinux,linux
|
||||
|
||||
package label
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/libcontainer/selinux"
|
||||
)
|
||||
|
||||
func GenLabels(options string) (string, string, error) {
|
||||
if !selinux.SelinuxEnabled() {
|
||||
return "", "", nil
|
||||
}
|
||||
var err error
|
||||
processLabel, mountLabel := selinux.GetLxcContexts()
|
||||
if processLabel != "" {
|
||||
var (
|
||||
s = strings.Fields(options)
|
||||
l = len(s)
|
||||
)
|
||||
if l > 0 {
|
||||
pcon := selinux.NewContext(processLabel)
|
||||
for i := 0; i < l; i++ {
|
||||
o := strings.Split(s[i], "=")
|
||||
pcon[o[0]] = o[1]
|
||||
}
|
||||
processLabel = pcon.Get()
|
||||
mountLabel, err = selinux.CopyLevel(processLabel, mountLabel)
|
||||
}
|
||||
}
|
||||
return processLabel, mountLabel, err
|
||||
}
|
||||
|
||||
func FormatMountLabel(src, mountLabel string) string {
|
||||
if mountLabel != "" {
|
||||
switch src {
|
||||
case "":
|
||||
src = fmt.Sprintf("context=%q", mountLabel)
|
||||
default:
|
||||
src = fmt.Sprintf("%s,context=%q", src, mountLabel)
|
||||
}
|
||||
}
|
||||
return src
|
||||
}
|
||||
|
||||
func SetProcessLabel(processLabel string) error {
|
||||
if selinux.SelinuxEnabled() {
|
||||
return selinux.Setexeccon(processLabel)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetProcessLabel() (string, error) {
|
||||
if selinux.SelinuxEnabled() {
|
||||
return selinux.Getexeccon()
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func SetFileLabel(path string, fileLabel string) error {
|
||||
if selinux.SelinuxEnabled() && fileLabel != "" {
|
||||
return selinux.Setfilecon(path, fileLabel)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Change the label of path to the filelabel string. If the relabel string
|
||||
// is "z", relabel will change the MCS label to s0. This will allow all
|
||||
// containers to share the content. If the relabel string is a "Z" then
|
||||
// the MCS label should continue to be used. SELinux will use this field
|
||||
// to make sure the content can not be shared by other containes.
|
||||
func Relabel(path string, fileLabel string, relabel string) error {
|
||||
if fileLabel == "" {
|
||||
return nil
|
||||
}
|
||||
if relabel == "z" {
|
||||
c := selinux.NewContext(fileLabel)
|
||||
c["level"] = "s0"
|
||||
fileLabel = c.Get()
|
||||
}
|
||||
return selinux.Chcon(path, fileLabel, true)
|
||||
}
|
||||
|
||||
func GetPidCon(pid int) (string, error) {
|
||||
if !selinux.SelinuxEnabled() {
|
||||
return "", nil
|
||||
}
|
||||
return selinux.Getpidcon(pid)
|
||||
}
|
||||
|
||||
func Init() {
|
||||
selinux.SelinuxEnabled()
|
||||
}
|
||||
|
||||
func ReserveLabel(label string) error {
|
||||
selinux.ReserveLabel(label)
|
||||
return nil
|
||||
}
|
244
Godeps/_workspace/src/github.com/docker/libcontainer/mount/init.go
generated
vendored
Normal file
244
Godeps/_workspace/src/github.com/docker/libcontainer/mount/init.go
generated
vendored
Normal file
@ -0,0 +1,244 @@
|
||||
// +build linux
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/libcontainer/label"
|
||||
"github.com/docker/libcontainer/mount/nodes"
|
||||
)
|
||||
|
||||
// default mount point flags
|
||||
const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
|
||||
|
||||
type mount struct {
|
||||
source string
|
||||
path string
|
||||
device string
|
||||
flags int
|
||||
data string
|
||||
}
|
||||
|
||||
// InitializeMountNamespace sets up the devices, mount points, and filesystems for use inside a
|
||||
// new mount namespace.
|
||||
func InitializeMountNamespace(rootfs, console string, sysReadonly bool, mountConfig *MountConfig) error {
|
||||
var (
|
||||
err error
|
||||
flag = syscall.MS_PRIVATE
|
||||
)
|
||||
if mountConfig.NoPivotRoot {
|
||||
flag = syscall.MS_SLAVE
|
||||
}
|
||||
if err := syscall.Mount("", "/", "", uintptr(flag|syscall.MS_REC), ""); err != nil {
|
||||
return fmt.Errorf("mounting / with flags %X %s", (flag | syscall.MS_REC), err)
|
||||
}
|
||||
if err := syscall.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil {
|
||||
return fmt.Errorf("mouting %s as bind %s", rootfs, err)
|
||||
}
|
||||
if err := mountSystem(rootfs, sysReadonly, mountConfig); err != nil {
|
||||
return fmt.Errorf("mount system %s", err)
|
||||
}
|
||||
if err := setupBindmounts(rootfs, mountConfig); err != nil {
|
||||
return fmt.Errorf("bind mounts %s", err)
|
||||
}
|
||||
if err := nodes.CreateDeviceNodes(rootfs, mountConfig.DeviceNodes); err != nil {
|
||||
return fmt.Errorf("create device nodes %s", err)
|
||||
}
|
||||
if err := SetupPtmx(rootfs, console, mountConfig.MountLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// stdin, stdout and stderr could be pointing to /dev/null from parent namespace.
|
||||
// Re-open them inside this namespace.
|
||||
if err := reOpenDevNull(rootfs); err != nil {
|
||||
return fmt.Errorf("Failed to reopen /dev/null %s", err)
|
||||
}
|
||||
|
||||
if err := setupDevSymlinks(rootfs); err != nil {
|
||||
return fmt.Errorf("dev symlinks %s", err)
|
||||
}
|
||||
|
||||
if err := syscall.Chdir(rootfs); err != nil {
|
||||
return fmt.Errorf("chdir into %s %s", rootfs, err)
|
||||
}
|
||||
|
||||
if mountConfig.NoPivotRoot {
|
||||
err = MsMoveRoot(rootfs)
|
||||
} else {
|
||||
err = PivotRoot(rootfs)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mountConfig.ReadonlyFs {
|
||||
if err := SetReadonly(); err != nil {
|
||||
return fmt.Errorf("set readonly %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
syscall.Umask(0022)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts
|
||||
// inside the mount namespace
|
||||
func mountSystem(rootfs string, sysReadonly bool, mountConfig *MountConfig) error {
|
||||
for _, m := range newSystemMounts(rootfs, mountConfig.MountLabel, sysReadonly, mountConfig.Mounts) {
|
||||
if err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) {
|
||||
return fmt.Errorf("mkdirall %s %s", m.path, err)
|
||||
}
|
||||
if err := syscall.Mount(m.source, m.path, m.device, uintptr(m.flags), m.data); err != nil {
|
||||
return fmt.Errorf("mounting %s into %s %s", m.source, m.path, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createIfNotExists(path string, isDir bool) error {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if isDir {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(path, os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupDevSymlinks(rootfs string) error {
|
||||
var links = [][2]string{
|
||||
{"/proc/self/fd", "/dev/fd"},
|
||||
{"/proc/self/fd/0", "/dev/stdin"},
|
||||
{"/proc/self/fd/1", "/dev/stdout"},
|
||||
{"/proc/self/fd/2", "/dev/stderr"},
|
||||
}
|
||||
|
||||
// kcore support can be toggled with CONFIG_PROC_KCORE; only create a symlink
|
||||
// in /dev if it exists in /proc.
|
||||
if _, err := os.Stat("/proc/kcore"); err == nil {
|
||||
links = append(links, [2]string{"/proc/kcore", "/dev/kcore"})
|
||||
}
|
||||
|
||||
for _, link := range links {
|
||||
var (
|
||||
src = link[0]
|
||||
dst = filepath.Join(rootfs, link[1])
|
||||
)
|
||||
|
||||
if err := os.Symlink(src, dst); err != nil && !os.IsExist(err) {
|
||||
return fmt.Errorf("symlink %s %s %s", src, dst, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupBindmounts(rootfs string, mountConfig *MountConfig) error {
|
||||
bindMounts := mountConfig.Mounts
|
||||
for _, m := range bindMounts.OfType("bind") {
|
||||
var (
|
||||
flags = syscall.MS_BIND | syscall.MS_REC
|
||||
dest = filepath.Join(rootfs, m.Destination)
|
||||
)
|
||||
if !m.Writable {
|
||||
flags = flags | syscall.MS_RDONLY
|
||||
}
|
||||
|
||||
stat, err := os.Stat(m.Source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dest, err = symlink.FollowSymlinkInScope(dest, rootfs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := createIfNotExists(dest, stat.IsDir()); err != nil {
|
||||
return fmt.Errorf("Creating new bind-mount target, %s", err)
|
||||
}
|
||||
|
||||
if err := syscall.Mount(m.Source, dest, "bind", uintptr(flags), ""); err != nil {
|
||||
return fmt.Errorf("mounting %s into %s %s", m.Source, dest, err)
|
||||
}
|
||||
if !m.Writable {
|
||||
if err := syscall.Mount(m.Source, dest, "bind", uintptr(flags|syscall.MS_REMOUNT), ""); err != nil {
|
||||
return fmt.Errorf("remounting %s into %s %s", m.Source, dest, err)
|
||||
}
|
||||
}
|
||||
if m.Relabel != "" {
|
||||
if err := label.Relabel(m.Source, mountConfig.MountLabel, m.Relabel); err != nil {
|
||||
return fmt.Errorf("relabeling %s to %s %s", m.Source, mountConfig.MountLabel, err)
|
||||
}
|
||||
}
|
||||
if m.Private {
|
||||
if err := syscall.Mount("", dest, "none", uintptr(syscall.MS_PRIVATE), ""); err != nil {
|
||||
return fmt.Errorf("mounting %s private %s", dest, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: this is crappy right now and should be cleaned up with a better way of handling system and
|
||||
// standard bind mounts allowing them to be more dynamic
|
||||
func newSystemMounts(rootfs, mountLabel string, sysReadonly bool, mounts Mounts) []mount {
|
||||
systemMounts := []mount{
|
||||
{source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags},
|
||||
{source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: label.FormatMountLabel("mode=755", mountLabel)},
|
||||
{source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)},
|
||||
{source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)},
|
||||
}
|
||||
|
||||
sysMountFlags := defaultMountFlags
|
||||
if sysReadonly {
|
||||
sysMountFlags |= syscall.MS_RDONLY
|
||||
}
|
||||
systemMounts = append(systemMounts, mount{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: sysMountFlags})
|
||||
|
||||
return systemMounts
|
||||
}
|
||||
|
||||
// Is stdin, stdout or stderr were to be pointing to '/dev/null',
|
||||
// this method will make them point to '/dev/null' from within this namespace.
|
||||
func reOpenDevNull(rootfs string) error {
|
||||
var stat, devNullStat syscall.Stat_t
|
||||
file, err := os.Open(filepath.Join(rootfs, "/dev/null"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to open /dev/null - %s", err)
|
||||
}
|
||||
defer file.Close()
|
||||
if err = syscall.Fstat(int(file.Fd()), &devNullStat); err != nil {
|
||||
return fmt.Errorf("Failed to stat /dev/null - %s", err)
|
||||
}
|
||||
for fd := 0; fd < 3; fd++ {
|
||||
if err = syscall.Fstat(fd, &stat); err != nil {
|
||||
return fmt.Errorf("Failed to stat fd %d - %s", fd, err)
|
||||
}
|
||||
if stat.Rdev == devNullStat.Rdev {
|
||||
// Close and re-open the fd.
|
||||
if err = syscall.Dup2(int(file.Fd()), fd); err != nil {
|
||||
return fmt.Errorf("Failed to dup fd %d to fd %d - %s", file.Fd(), fd)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
20
Godeps/_workspace/src/github.com/docker/libcontainer/mount/msmoveroot.go
generated
vendored
Normal file
20
Godeps/_workspace/src/github.com/docker/libcontainer/mount/msmoveroot.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// +build linux
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func MsMoveRoot(rootfs string) error {
|
||||
if err := syscall.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil {
|
||||
return fmt.Errorf("mount move %s into / %s", rootfs, err)
|
||||
}
|
||||
|
||||
if err := syscall.Chroot("."); err != nil {
|
||||
return fmt.Errorf("chroot . %s", err)
|
||||
}
|
||||
|
||||
return syscall.Chdir("/")
|
||||
}
|
52
Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes.go
generated
vendored
Normal file
52
Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
// +build linux
|
||||
|
||||
package nodes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/libcontainer/devices"
|
||||
)
|
||||
|
||||
// Create the device nodes in the container.
|
||||
func CreateDeviceNodes(rootfs string, nodesToCreate []*devices.Device) error {
|
||||
oldMask := syscall.Umask(0000)
|
||||
defer syscall.Umask(oldMask)
|
||||
|
||||
for _, node := range nodesToCreate {
|
||||
if err := CreateDeviceNode(rootfs, node); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Creates the device node in the rootfs of the container.
|
||||
func CreateDeviceNode(rootfs string, node *devices.Device) error {
|
||||
var (
|
||||
dest = filepath.Join(rootfs, node.Path)
|
||||
parent = filepath.Dir(dest)
|
||||
)
|
||||
|
||||
if err := os.MkdirAll(parent, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fileMode := node.FileMode
|
||||
switch node.Type {
|
||||
case 'c':
|
||||
fileMode |= syscall.S_IFCHR
|
||||
case 'b':
|
||||
fileMode |= syscall.S_IFBLK
|
||||
default:
|
||||
return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path)
|
||||
}
|
||||
|
||||
if err := syscall.Mknod(dest, uint32(fileMode), devices.Mkdev(node.MajorNumber, node.MinorNumber)); err != nil && !os.IsExist(err) {
|
||||
return fmt.Errorf("mknod %s %s", node.Path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
13
Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes_unsupported.go
generated
vendored
Normal file
13
Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// +build !linux
|
||||
|
||||
package nodes
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/docker/libcontainer/devices"
|
||||
)
|
||||
|
||||
func CreateDeviceNodes(rootfs string, nodesToCreate []*devices.Device) error {
|
||||
return errors.New("Unsupported method")
|
||||
}
|
34
Godeps/_workspace/src/github.com/docker/libcontainer/mount/pivotroot.go
generated
vendored
Normal file
34
Godeps/_workspace/src/github.com/docker/libcontainer/mount/pivotroot.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
// +build linux
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func PivotRoot(rootfs string) error {
|
||||
pivotDir, err := ioutil.TempDir(rootfs, ".pivot_root")
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create pivot_root dir %s, error %v", pivotDir, err)
|
||||
}
|
||||
|
||||
if err := syscall.PivotRoot(rootfs, pivotDir); err != nil {
|
||||
return fmt.Errorf("pivot_root %s", err)
|
||||
}
|
||||
|
||||
if err := syscall.Chdir("/"); err != nil {
|
||||
return fmt.Errorf("chdir / %s", err)
|
||||
}
|
||||
|
||||
// path to pivot dir now changed, update
|
||||
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
|
||||
if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
|
||||
return fmt.Errorf("unmount pivot_root dir %s", err)
|
||||
}
|
||||
|
||||
return os.Remove(pivotDir)
|
||||
}
|
30
Godeps/_workspace/src/github.com/docker/libcontainer/mount/ptmx.go
generated
vendored
Normal file
30
Godeps/_workspace/src/github.com/docker/libcontainer/mount/ptmx.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// +build linux
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/libcontainer/console"
|
||||
)
|
||||
|
||||
func SetupPtmx(rootfs, consolePath, mountLabel string) error {
|
||||
ptmx := filepath.Join(rootfs, "dev/ptmx")
|
||||
if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Symlink("pts/ptmx", ptmx); err != nil {
|
||||
return fmt.Errorf("symlink dev ptmx %s", err)
|
||||
}
|
||||
|
||||
if consolePath != "" {
|
||||
if err := console.Setup(rootfs, consolePath, mountLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
11
Godeps/_workspace/src/github.com/docker/libcontainer/mount/readonly.go
generated
vendored
Normal file
11
Godeps/_workspace/src/github.com/docker/libcontainer/mount/readonly.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// +build linux
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func SetReadonly() error {
|
||||
return syscall.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "")
|
||||
}
|
31
Godeps/_workspace/src/github.com/docker/libcontainer/mount/remount.go
generated
vendored
Normal file
31
Godeps/_workspace/src/github.com/docker/libcontainer/mount/remount.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
// +build linux
|
||||
|
||||
package mount
|
||||
|
||||
import "syscall"
|
||||
|
||||
func RemountProc() error {
|
||||
if err := syscall.Unmount("/proc", syscall.MNT_DETACH); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := syscall.Mount("proc", "/proc", "proc", uintptr(defaultMountFlags), ""); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func RemountSys() error {
|
||||
if err := syscall.Unmount("/sys", syscall.MNT_DETACH); err != nil {
|
||||
if err != syscall.EINVAL {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := syscall.Mount("sysfs", "/sys", "sysfs", uintptr(defaultMountFlags), ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
49
Godeps/_workspace/src/github.com/docker/libcontainer/mount/types.go
generated
vendored
Normal file
49
Godeps/_workspace/src/github.com/docker/libcontainer/mount/types.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
package mount
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/docker/libcontainer/devices"
|
||||
)
|
||||
|
||||
type MountConfig struct {
|
||||
// NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs
|
||||
// This is a common option when the container is running in ramdisk
|
||||
NoPivotRoot bool `json:"no_pivot_root,omitempty"`
|
||||
|
||||
// ReadonlyFs will remount the container's rootfs as readonly where only externally mounted
|
||||
// bind mounts are writtable
|
||||
ReadonlyFs bool `json:"readonly_fs,omitempty"`
|
||||
|
||||
// Mounts specify additional source and destination paths that will be mounted inside the container's
|
||||
// rootfs and mount namespace if specified
|
||||
Mounts Mounts `json:"mounts,omitempty"`
|
||||
|
||||
// The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well!
|
||||
DeviceNodes []*devices.Device `json:"device_nodes,omitempty"`
|
||||
|
||||
MountLabel string `json:"mount_label,omitempty"`
|
||||
}
|
||||
|
||||
type Mount struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Source string `json:"source,omitempty"` // Source path, in the host namespace
|
||||
Destination string `json:"destination,omitempty"` // Destination path, in the container
|
||||
Writable bool `json:"writable,omitempty"`
|
||||
Relabel string `json:"relabel,omitempty"` // Relabel source if set, "z" indicates shared, "Z" indicates unshared
|
||||
Private bool `json:"private,omitempty"`
|
||||
}
|
||||
|
||||
type Mounts []Mount
|
||||
|
||||
var ErrUnsupported = errors.New("Unsupported method")
|
||||
|
||||
func (s Mounts) OfType(t string) Mounts {
|
||||
out := Mounts{}
|
||||
for _, m := range s {
|
||||
if m.Type == t {
|
||||
out = append(out, m)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
10
Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/create.go
generated
vendored
Normal file
10
Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/create.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
package namespaces
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/docker/libcontainer"
|
||||
)
|
||||
|
||||
type CreateCommand func(container *libcontainer.Config, console, rootfs, dataPath, init string, childPipe *os.File, args []string) *exec.Cmd
|
194
Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/exec.go
generated
vendored
Normal file
194
Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/exec.go
generated
vendored
Normal file
@ -0,0 +1,194 @@
|
||||
// +build linux
|
||||
|
||||
package namespaces
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/libcontainer"
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
"github.com/docker/libcontainer/cgroups/fs"
|
||||
"github.com/docker/libcontainer/cgroups/systemd"
|
||||
"github.com/docker/libcontainer/network"
|
||||
"github.com/docker/libcontainer/syncpipe"
|
||||
"github.com/docker/libcontainer/system"
|
||||
)
|
||||
|
||||
// TODO(vishh): This is part of the libcontainer API and it does much more than just namespaces related work.
|
||||
// Move this to libcontainer package.
|
||||
// Exec performs setup outside of a namespace so that a container can be
|
||||
// executed. Exec is a high level function for working with container namespaces.
|
||||
func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Writer, console string, rootfs, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) {
|
||||
var (
|
||||
err error
|
||||
)
|
||||
|
||||
// create a pipe so that we can syncronize with the namespaced process and
|
||||
// pass the veth name to the child
|
||||
syncPipe, err := syncpipe.NewSyncPipe()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
defer syncPipe.Close()
|
||||
|
||||
command := createCommand(container, console, rootfs, dataPath, os.Args[0], syncPipe.Child(), args)
|
||||
// Note: these are only used in non-tty mode
|
||||
// if there is a tty for the container it will be opened within the namespace and the
|
||||
// fds will be duped to stdin, stdiout, and stderr
|
||||
command.Stdin = stdin
|
||||
command.Stdout = stdout
|
||||
command.Stderr = stderr
|
||||
|
||||
if err := command.Start(); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// Now we passed the pipe to the child, close our side
|
||||
syncPipe.CloseChild()
|
||||
|
||||
started, err := system.GetProcessStartTime(command.Process.Pid)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// Do this before syncing with child so that no children
|
||||
// can escape the cgroup
|
||||
cleaner, err := SetupCgroups(container, command.Process.Pid)
|
||||
if err != nil {
|
||||
command.Process.Kill()
|
||||
command.Wait()
|
||||
return -1, err
|
||||
}
|
||||
if cleaner != nil {
|
||||
defer cleaner.Cleanup()
|
||||
}
|
||||
|
||||
var networkState network.NetworkState
|
||||
if err := InitializeNetworking(container, command.Process.Pid, syncPipe, &networkState); err != nil {
|
||||
command.Process.Kill()
|
||||
command.Wait()
|
||||
return -1, err
|
||||
}
|
||||
|
||||
state := &libcontainer.State{
|
||||
InitPid: command.Process.Pid,
|
||||
InitStartTime: started,
|
||||
NetworkState: networkState,
|
||||
}
|
||||
|
||||
if err := libcontainer.SaveState(dataPath, state); err != nil {
|
||||
command.Process.Kill()
|
||||
command.Wait()
|
||||
return -1, err
|
||||
}
|
||||
defer libcontainer.DeleteState(dataPath)
|
||||
|
||||
// Sync with child
|
||||
if err := syncPipe.ReadFromChild(); err != nil {
|
||||
command.Process.Kill()
|
||||
command.Wait()
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if startCallback != nil {
|
||||
startCallback()
|
||||
}
|
||||
|
||||
if err := command.Wait(); err != nil {
|
||||
if _, ok := err.(*exec.ExitError); !ok {
|
||||
return -1, err
|
||||
}
|
||||
}
|
||||
|
||||
return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil
|
||||
}
|
||||
|
||||
// DefaultCreateCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces
|
||||
// defined on the container's configuration and use the current binary as the init with the
|
||||
// args provided
|
||||
//
|
||||
// console: the /dev/console to setup inside the container
|
||||
// init: the program executed inside the namespaces
|
||||
// root: the path to the container json file and information
|
||||
// pipe: sync pipe to synchronize the parent and child processes
|
||||
// args: the arguments to pass to the container to run as the user's program
|
||||
func DefaultCreateCommand(container *libcontainer.Config, console, rootfs, dataPath, init string, pipe *os.File, args []string) *exec.Cmd {
|
||||
// get our binary name from arg0 so we can always reexec ourself
|
||||
env := []string{
|
||||
"console=" + console,
|
||||
"pipe=3",
|
||||
"data_path=" + dataPath,
|
||||
}
|
||||
|
||||
/*
|
||||
TODO: move user and wd into env
|
||||
if user != "" {
|
||||
env = append(env, "user="+user)
|
||||
}
|
||||
if workingDir != "" {
|
||||
env = append(env, "wd="+workingDir)
|
||||
}
|
||||
*/
|
||||
|
||||
command := exec.Command(init, append([]string{"init"}, args...)...)
|
||||
// make sure the process is executed inside the context of the rootfs
|
||||
command.Dir = rootfs
|
||||
command.Env = append(os.Environ(), env...)
|
||||
|
||||
if command.SysProcAttr == nil {
|
||||
command.SysProcAttr = &syscall.SysProcAttr{}
|
||||
}
|
||||
command.SysProcAttr.Cloneflags = uintptr(GetNamespaceFlags(container.Namespaces))
|
||||
|
||||
command.SysProcAttr.Pdeathsig = syscall.SIGKILL
|
||||
command.ExtraFiles = []*os.File{pipe}
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
// SetupCgroups applies the cgroup restrictions to the process running in the container based
|
||||
// on the container's configuration
|
||||
func SetupCgroups(container *libcontainer.Config, nspid int) (cgroups.ActiveCgroup, error) {
|
||||
if container.Cgroups != nil {
|
||||
c := container.Cgroups
|
||||
|
||||
if systemd.UseSystemd() {
|
||||
return systemd.Apply(c, nspid)
|
||||
}
|
||||
|
||||
return fs.Apply(c, nspid)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// InitializeNetworking creates the container's network stack outside of the namespace and moves
|
||||
// interfaces into the container's net namespaces if necessary
|
||||
func InitializeNetworking(container *libcontainer.Config, nspid int, pipe *syncpipe.SyncPipe, networkState *network.NetworkState) error {
|
||||
for _, config := range container.Networks {
|
||||
strategy, err := network.GetStrategy(config.Type)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := strategy.Create((*network.Network)(config), nspid, networkState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return pipe.SendToChild(networkState)
|
||||
}
|
||||
|
||||
// GetNamespaceFlags parses the container's Namespaces options to set the correct
|
||||
// flags on clone, unshare, and setns
|
||||
func GetNamespaceFlags(namespaces map[string]bool) (flag int) {
|
||||
for key, enabled := range namespaces {
|
||||
if enabled {
|
||||
if ns := GetNamespace(key); ns != nil {
|
||||
flag |= ns.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
return flag
|
||||
}
|
118
Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/execin.go
generated
vendored
Normal file
118
Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/execin.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
// +build linux
|
||||
|
||||
package namespaces
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/docker/libcontainer"
|
||||
"github.com/docker/libcontainer/label"
|
||||
"github.com/docker/libcontainer/system"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Runs the command under 'args' inside an existing container referred to by 'container'.
|
||||
// Returns the exitcode of the command upon success and appropriate error on failure.
|
||||
func RunIn(container *libcontainer.Config, state *libcontainer.State, args []string, nsinitPath string, stdin io.Reader, stdout, stderr io.Writer, console string, startCallback func(*exec.Cmd)) (int, error) {
|
||||
initArgs, err := getNsEnterCommand(strconv.Itoa(state.InitPid), container, console, args)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
cmd := exec.Command(nsinitPath, initArgs...)
|
||||
// Note: these are only used in non-tty mode
|
||||
// if there is a tty for the container it will be opened within the namespace and the
|
||||
// fds will be duped to stdin, stdiout, and stderr
|
||||
cmd.Stdin = stdin
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if startCallback != nil {
|
||||
startCallback(cmd)
|
||||
}
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
if _, ok := err.(*exec.ExitError); !ok {
|
||||
return -1, err
|
||||
}
|
||||
}
|
||||
|
||||
return cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil
|
||||
}
|
||||
|
||||
// ExecIn uses an existing pid and joins the pid's namespaces with the new command.
|
||||
func ExecIn(container *libcontainer.Config, state *libcontainer.State, args []string) error {
|
||||
// Enter the namespace and then finish setup
|
||||
args, err := getNsEnterCommand(strconv.Itoa(state.InitPid), container, "", args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
finalArgs := append([]string{os.Args[0]}, args...)
|
||||
|
||||
if err := system.Execv(finalArgs[0], finalArgs[0:], os.Environ()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func getContainerJson(container *libcontainer.Config) (string, error) {
|
||||
// TODO(vmarmol): If this gets too long, send it over a pipe to the child.
|
||||
// Marshall the container into JSON since it won't be available in the namespace.
|
||||
containerJson, err := json.Marshal(container)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(containerJson), nil
|
||||
}
|
||||
|
||||
func getNsEnterCommand(initPid string, container *libcontainer.Config, console string, args []string) ([]string, error) {
|
||||
containerJson, err := getContainerJson(container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := []string{
|
||||
"nsenter",
|
||||
"--nspid", initPid,
|
||||
"--containerjson", containerJson,
|
||||
}
|
||||
|
||||
if console != "" {
|
||||
out = append(out, "--console", console)
|
||||
}
|
||||
out = append(out, "--")
|
||||
out = append(out, args...)
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Run a command in a container after entering the namespace.
|
||||
func NsEnter(container *libcontainer.Config, args []string) error {
|
||||
// clear the current processes env and replace it with the environment
|
||||
// defined on the container
|
||||
if err := LoadContainerEnvironment(container); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := FinalizeNamespace(container); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if container.ProcessLabel != "" {
|
||||
if err := label.SetProcessLabel(container.ProcessLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := system.Execv(args[0], args[0:], container.Env); err != nil {
|
||||
return err
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
257
Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/init.go
generated
vendored
Normal file
257
Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/init.go
generated
vendored
Normal file
@ -0,0 +1,257 @@
|
||||
// +build linux
|
||||
|
||||
package namespaces
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/user"
|
||||
"github.com/docker/libcontainer"
|
||||
"github.com/docker/libcontainer/apparmor"
|
||||
"github.com/docker/libcontainer/console"
|
||||
"github.com/docker/libcontainer/label"
|
||||
"github.com/docker/libcontainer/mount"
|
||||
"github.com/docker/libcontainer/netlink"
|
||||
"github.com/docker/libcontainer/network"
|
||||
"github.com/docker/libcontainer/security/capabilities"
|
||||
"github.com/docker/libcontainer/security/restrict"
|
||||
"github.com/docker/libcontainer/syncpipe"
|
||||
"github.com/docker/libcontainer/system"
|
||||
"github.com/docker/libcontainer/utils"
|
||||
)
|
||||
|
||||
// TODO(vishh): This is part of the libcontainer API and it does much more than just namespaces related work.
|
||||
// Move this to libcontainer package.
|
||||
// Init is the init process that first runs inside a new namespace to setup mounts, users, networking,
|
||||
// and other options required for the new container.
|
||||
func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syncPipe *syncpipe.SyncPipe, args []string) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
syncPipe.ReportChildError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
rootfs, err := utils.ResolveRootfs(uncleanRootfs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// clear the current processes env and replace it with the environment
|
||||
// defined on the container
|
||||
if err := LoadContainerEnvironment(container); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We always read this as it is a way to sync with the parent as well
|
||||
networkState, err := syncPipe.ReadFromParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if consolePath != "" {
|
||||
if err := console.OpenAndDup(consolePath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := syscall.Setsid(); err != nil {
|
||||
return fmt.Errorf("setsid %s", err)
|
||||
}
|
||||
if consolePath != "" {
|
||||
if err := system.Setctty(); err != nil {
|
||||
return fmt.Errorf("setctty %s", err)
|
||||
}
|
||||
}
|
||||
if err := setupNetwork(container, networkState); err != nil {
|
||||
return fmt.Errorf("setup networking %s", err)
|
||||
}
|
||||
if err := setupRoute(container); err != nil {
|
||||
return fmt.Errorf("setup route %s", err)
|
||||
}
|
||||
|
||||
label.Init()
|
||||
|
||||
if err := mount.InitializeMountNamespace(rootfs,
|
||||
consolePath,
|
||||
container.RestrictSys,
|
||||
(*mount.MountConfig)(container.MountConfig)); err != nil {
|
||||
return fmt.Errorf("setup mount namespace %s", err)
|
||||
}
|
||||
|
||||
if container.Hostname != "" {
|
||||
if err := syscall.Sethostname([]byte(container.Hostname)); err != nil {
|
||||
return fmt.Errorf("sethostname %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
runtime.LockOSThread()
|
||||
|
||||
if err := apparmor.ApplyProfile(container.AppArmorProfile); err != nil {
|
||||
return fmt.Errorf("set apparmor profile %s: %s", container.AppArmorProfile, err)
|
||||
}
|
||||
|
||||
if err := label.SetProcessLabel(container.ProcessLabel); err != nil {
|
||||
return fmt.Errorf("set process label %s", err)
|
||||
}
|
||||
|
||||
// TODO: (crosbymichael) make this configurable at the Config level
|
||||
if container.RestrictSys {
|
||||
if err := restrict.Restrict("proc/sys", "proc/sysrq-trigger", "proc/irq", "proc/bus"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
pdeathSignal, err := system.GetParentDeathSignal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get parent death signal %s", err)
|
||||
}
|
||||
|
||||
if err := FinalizeNamespace(container); err != nil {
|
||||
return fmt.Errorf("finalize namespace %s", err)
|
||||
}
|
||||
|
||||
// FinalizeNamespace can change user/group which clears the parent death
|
||||
// signal, so we restore it here.
|
||||
if err := RestoreParentDeathSignal(pdeathSignal); err != nil {
|
||||
return fmt.Errorf("restore parent death signal %s", err)
|
||||
}
|
||||
|
||||
return system.Execv(args[0], args[0:], container.Env)
|
||||
}
|
||||
|
||||
// RestoreParentDeathSignal sets the parent death signal to old.
|
||||
func RestoreParentDeathSignal(old int) error {
|
||||
if old == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
current, err := system.GetParentDeathSignal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get parent death signal %s", err)
|
||||
}
|
||||
|
||||
if old == current {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := system.ParentDeathSignal(uintptr(old)); err != nil {
|
||||
return fmt.Errorf("set parent death signal %s", err)
|
||||
}
|
||||
|
||||
// Signal self if parent is already dead. Does nothing if running in a new
|
||||
// PID namespace, as Getppid will always return 0.
|
||||
if syscall.Getppid() == 1 {
|
||||
return syscall.Kill(syscall.Getpid(), syscall.SIGKILL)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetupUser changes the groups, gid, and uid for the user inside the container
|
||||
func SetupUser(u string) error {
|
||||
uid, gid, suppGids, err := user.GetUserGroupSupplementary(u, syscall.Getuid(), syscall.Getgid())
|
||||
if err != nil {
|
||||
return fmt.Errorf("get supplementary groups %s", err)
|
||||
}
|
||||
|
||||
if err := syscall.Setgroups(suppGids); err != nil {
|
||||
return fmt.Errorf("setgroups %s", err)
|
||||
}
|
||||
|
||||
if err := syscall.Setgid(gid); err != nil {
|
||||
return fmt.Errorf("setgid %s", err)
|
||||
}
|
||||
|
||||
if err := syscall.Setuid(uid); err != nil {
|
||||
return fmt.Errorf("setuid %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// setupVethNetwork uses the Network config if it is not nil to initialize
|
||||
// the new veth interface inside the container for use by changing the name to eth0
|
||||
// setting the MTU and IP address along with the default gateway
|
||||
func setupNetwork(container *libcontainer.Config, networkState *network.NetworkState) error {
|
||||
for _, config := range container.Networks {
|
||||
strategy, err := network.GetStrategy(config.Type)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err1 := strategy.Initialize((*network.Network)(config), networkState)
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupRoute(container *libcontainer.Config) error {
|
||||
for _, config := range container.Routes {
|
||||
if err := netlink.AddRoute(config.Destination, config.Source, config.Gateway, config.InterfaceName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FinalizeNamespace drops the caps, sets the correct user
|
||||
// and working dir, and closes any leaky file descriptors
|
||||
// before execing the command inside the namespace
|
||||
func FinalizeNamespace(container *libcontainer.Config) error {
|
||||
// Ensure that all non-standard fds we may have accidentally
|
||||
// inherited are marked close-on-exec so they stay out of the
|
||||
// container
|
||||
if err := utils.CloseExecFrom(3); err != nil {
|
||||
return fmt.Errorf("close open file descriptors %s", err)
|
||||
}
|
||||
|
||||
// drop capabilities in bounding set before changing user
|
||||
if err := capabilities.DropBoundingSet(container.Capabilities); err != nil {
|
||||
return fmt.Errorf("drop bounding set %s", err)
|
||||
}
|
||||
|
||||
// preserve existing capabilities while we change users
|
||||
if err := system.SetKeepCaps(); err != nil {
|
||||
return fmt.Errorf("set keep caps %s", err)
|
||||
}
|
||||
|
||||
if err := SetupUser(container.User); err != nil {
|
||||
return fmt.Errorf("setup user %s", err)
|
||||
}
|
||||
|
||||
if err := system.ClearKeepCaps(); err != nil {
|
||||
return fmt.Errorf("clear keep caps %s", err)
|
||||
}
|
||||
|
||||
// drop all other capabilities
|
||||
if err := capabilities.DropCapabilities(container.Capabilities); err != nil {
|
||||
return fmt.Errorf("drop capabilities %s", err)
|
||||
}
|
||||
|
||||
if container.WorkingDir != "" {
|
||||
if err := syscall.Chdir(container.WorkingDir); err != nil {
|
||||
return fmt.Errorf("chdir to %s %s", container.WorkingDir, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func LoadContainerEnvironment(container *libcontainer.Config) error {
|
||||
os.Clearenv()
|
||||
for _, pair := range container.Env {
|
||||
p := strings.SplitN(pair, "=", 2)
|
||||
if len(p) < 2 {
|
||||
return fmt.Errorf("invalid environment '%v'", pair)
|
||||
}
|
||||
if err := os.Setenv(p[0], p[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
223
Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter.go
generated
vendored
Normal file
223
Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter.go
generated
vendored
Normal file
@ -0,0 +1,223 @@
|
||||
// +build linux
|
||||
|
||||
package namespaces
|
||||
|
||||
/*
|
||||
#include <dirent.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/sched.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include <getopt.h>
|
||||
|
||||
static const kBufSize = 256;
|
||||
|
||||
void get_args(int *argc, char ***argv) {
|
||||
// Read argv
|
||||
int fd = open("/proc/self/cmdline", O_RDONLY);
|
||||
|
||||
// Read the whole commandline.
|
||||
ssize_t contents_size = 0;
|
||||
ssize_t contents_offset = 0;
|
||||
char *contents = NULL;
|
||||
ssize_t bytes_read = 0;
|
||||
do {
|
||||
contents_size += kBufSize;
|
||||
contents = (char *) realloc(contents, contents_size);
|
||||
bytes_read = read(fd, contents + contents_offset, contents_size - contents_offset);
|
||||
contents_offset += bytes_read;
|
||||
} while (bytes_read > 0);
|
||||
close(fd);
|
||||
|
||||
// Parse the commandline into an argv. /proc/self/cmdline has \0 delimited args.
|
||||
ssize_t i;
|
||||
*argc = 0;
|
||||
for (i = 0; i < contents_offset; i++) {
|
||||
if (contents[i] == '\0') {
|
||||
(*argc)++;
|
||||
}
|
||||
}
|
||||
*argv = (char **) malloc(sizeof(char *) * ((*argc) + 1));
|
||||
int idx;
|
||||
for (idx = 0; idx < (*argc); idx++) {
|
||||
(*argv)[idx] = contents;
|
||||
contents += strlen(contents) + 1;
|
||||
}
|
||||
(*argv)[*argc] = NULL;
|
||||
}
|
||||
|
||||
// Use raw setns syscall for versions of glibc that don't include it (namely glibc-2.12)
|
||||
#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 14
|
||||
#define _GNU_SOURCE
|
||||
#include <sched.h>
|
||||
#include "syscall.h"
|
||||
#ifdef SYS_setns
|
||||
int setns(int fd, int nstype) {
|
||||
return syscall(SYS_setns, fd, nstype);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
void print_usage() {
|
||||
fprintf(stderr, "<binary> nsenter --nspid <pid> --containerjson <container_json> -- cmd1 arg1 arg2...\n");
|
||||
}
|
||||
|
||||
void nsenter() {
|
||||
int argc;
|
||||
char **argv;
|
||||
get_args(&argc, &argv);
|
||||
|
||||
// Ignore if this is not for us.
|
||||
if (argc < 2 || strcmp(argv[1], "nsenter") != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// USAGE: <binary> nsenter <PID> <process label> <container JSON> <argv>...
|
||||
if (argc < 6) {
|
||||
fprintf(stderr, "nsenter: Incorrect usage, not enough arguments\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static const struct option longopts[] = {
|
||||
{ "nspid", required_argument, NULL, 'n' },
|
||||
{ "containerjson", required_argument, NULL, 'c' },
|
||||
{ "console", required_argument, NULL, 't' },
|
||||
{ NULL, 0, NULL, 0 }
|
||||
};
|
||||
|
||||
int c;
|
||||
pid_t init_pid = -1;
|
||||
char *init_pid_str = NULL;
|
||||
char *container_json = NULL;
|
||||
char *console = NULL;
|
||||
while ((c = getopt_long_only(argc, argv, "n:s:c:", longopts, NULL)) != -1) {
|
||||
switch (c) {
|
||||
case 'n':
|
||||
init_pid_str = optarg;
|
||||
break;
|
||||
case 'c':
|
||||
container_json = optarg;
|
||||
break;
|
||||
case 't':
|
||||
console = optarg;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (container_json == NULL || init_pid_str == NULL) {
|
||||
print_usage();
|
||||
exit(1);
|
||||
}
|
||||
|
||||
init_pid = strtol(init_pid_str, NULL, 10);
|
||||
if (errno != 0 || init_pid <= 0) {
|
||||
fprintf(stderr, "nsenter: Failed to parse PID from \"%s\" with error: \"%s\"\n", init_pid_str, strerror(errno));
|
||||
print_usage();
|
||||
exit(1);
|
||||
}
|
||||
|
||||
argc -= 3;
|
||||
argv += 3;
|
||||
|
||||
if (setsid() == -1) {
|
||||
fprintf(stderr, "setsid failed. Error: %s\n", strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// before we setns we need to dup the console
|
||||
int consolefd = -1;
|
||||
if (console != NULL) {
|
||||
consolefd = open(console, O_RDWR);
|
||||
if (consolefd < 0) {
|
||||
fprintf(stderr, "nsenter: failed to open console %s %s\n", console, strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Setns on all supported namespaces.
|
||||
char ns_dir[PATH_MAX];
|
||||
memset(ns_dir, 0, PATH_MAX);
|
||||
snprintf(ns_dir, PATH_MAX - 1, "/proc/%d/ns/", init_pid);
|
||||
struct dirent *dent;
|
||||
DIR *dir = opendir(ns_dir);
|
||||
if (dir == NULL) {
|
||||
fprintf(stderr, "nsenter: Failed to open directory \"%s\" with error: \"%s\"\n", ns_dir, strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
while((dent = readdir(dir)) != NULL) {
|
||||
if(strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0 || strcmp(dent->d_name, "user") == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get and open the namespace for the init we are joining..
|
||||
char buf[PATH_MAX];
|
||||
memset(buf, 0, PATH_MAX);
|
||||
snprintf(buf, PATH_MAX - 1, "%s%s", ns_dir, dent->d_name);
|
||||
int fd = open(buf, O_RDONLY);
|
||||
if (fd == -1) {
|
||||
fprintf(stderr, "nsenter: Failed to open ns file \"%s\" for ns \"%s\" with error: \"%s\"\n", buf, dent->d_name, strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// Set the namespace.
|
||||
if (setns(fd, 0) == -1) {
|
||||
fprintf(stderr, "nsenter: Failed to setns for \"%s\" with error: \"%s\"\n", dent->d_name, strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
close(fd);
|
||||
}
|
||||
closedir(dir);
|
||||
|
||||
// We must fork to actually enter the PID namespace.
|
||||
int child = fork();
|
||||
if (child == 0) {
|
||||
if (consolefd != -1) {
|
||||
if (dup2(consolefd, STDIN_FILENO) != 0) {
|
||||
fprintf(stderr, "nsenter: failed to dup 0 %s\n", strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
if (dup2(consolefd, STDOUT_FILENO) != STDOUT_FILENO) {
|
||||
fprintf(stderr, "nsenter: failed to dup 1 %s\n", strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
if (dup2(consolefd, STDERR_FILENO) != STDERR_FILENO) {
|
||||
fprintf(stderr, "nsenter: failed to dup 2 %s\n", strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Finish executing, let the Go runtime take over.
|
||||
return;
|
||||
} else {
|
||||
// Parent, wait for the child.
|
||||
int status = 0;
|
||||
if (waitpid(child, &status, 0) == -1) {
|
||||
fprintf(stderr, "nsenter: Failed to waitpid with error: \"%s\"\n", strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// Forward the child's exit code or re-send its death signal.
|
||||
if (WIFEXITED(status)) {
|
||||
exit(WEXITSTATUS(status));
|
||||
} else if (WIFSIGNALED(status)) {
|
||||
kill(getpid(), WTERMSIG(status));
|
||||
}
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
__attribute__((constructor)) init() {
|
||||
nsenter();
|
||||
}
|
||||
*/
|
||||
import "C"
|
50
Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types.go
generated
vendored
Normal file
50
Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
package namespaces
|
||||
|
||||
import "errors"
|
||||
|
||||
type (
|
||||
Namespace struct {
|
||||
Key string `json:"key,omitempty"`
|
||||
Value int `json:"value,omitempty"`
|
||||
File string `json:"file,omitempty"`
|
||||
}
|
||||
Namespaces []*Namespace
|
||||
)
|
||||
|
||||
// namespaceList is used to convert the libcontainer types
|
||||
// into the names of the files located in /proc/<pid>/ns/* for
|
||||
// each namespace
|
||||
var (
|
||||
namespaceList = Namespaces{}
|
||||
ErrUnkownNamespace = errors.New("Unknown namespace")
|
||||
ErrUnsupported = errors.New("Unsupported method")
|
||||
)
|
||||
|
||||
func (ns *Namespace) String() string {
|
||||
return ns.Key
|
||||
}
|
||||
|
||||
func GetNamespace(key string) *Namespace {
|
||||
for _, ns := range namespaceList {
|
||||
if ns.Key == key {
|
||||
cpy := *ns
|
||||
return &cpy
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Contains returns true if the specified Namespace is
|
||||
// in the slice
|
||||
func (n Namespaces) Contains(ns string) bool {
|
||||
return n.Get(ns) != nil
|
||||
}
|
||||
|
||||
func (n Namespaces) Get(ns string) *Namespace {
|
||||
for _, nsp := range n {
|
||||
if nsp != nil && nsp.Key == ns {
|
||||
return nsp
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
16
Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types_linux.go
generated
vendored
Normal file
16
Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types_linux.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
package namespaces
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func init() {
|
||||
namespaceList = Namespaces{
|
||||
{Key: "NEWNS", Value: syscall.CLONE_NEWNS, File: "mnt"},
|
||||
{Key: "NEWUTS", Value: syscall.CLONE_NEWUTS, File: "uts"},
|
||||
{Key: "NEWIPC", Value: syscall.CLONE_NEWIPC, File: "ipc"},
|
||||
{Key: "NEWUSER", Value: syscall.CLONE_NEWUSER, File: "user"},
|
||||
{Key: "NEWPID", Value: syscall.CLONE_NEWPID, File: "pid"},
|
||||
{Key: "NEWNET", Value: syscall.CLONE_NEWNET, File: "net"},
|
||||
}
|
||||
}
|
30
Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types_test.go
generated
vendored
Normal file
30
Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types_test.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package namespaces
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNamespacesContains(t *testing.T) {
|
||||
ns := Namespaces{
|
||||
GetNamespace("NEWPID"),
|
||||
GetNamespace("NEWNS"),
|
||||
GetNamespace("NEWUTS"),
|
||||
}
|
||||
|
||||
if ns.Contains("NEWNET") {
|
||||
t.Fatal("namespaces should not contain NEWNET")
|
||||
}
|
||||
|
||||
if !ns.Contains("NEWPID") {
|
||||
t.Fatal("namespaces should contain NEWPID but does not")
|
||||
}
|
||||
|
||||
withNil := Namespaces{
|
||||
GetNamespace("UNDEFINED"), // this element will be nil
|
||||
GetNamespace("NEWPID"),
|
||||
}
|
||||
|
||||
if !withNil.Contains("NEWPID") {
|
||||
t.Fatal("namespaces should contain NEWPID but does not")
|
||||
}
|
||||
}
|
2
Godeps/_workspace/src/github.com/docker/libcontainer/netlink/MAINTAINERS
generated
vendored
Normal file
2
Godeps/_workspace/src/github.com/docker/libcontainer/netlink/MAINTAINERS
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
|
||||
Guillaume J. Charmes <guillaume@docker.com> (@creack)
|
23
Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink.go
generated
vendored
Normal file
23
Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// Packet netlink provide access to low level Netlink sockets and messages.
|
||||
//
|
||||
// Actual implementations are in:
|
||||
// netlink_linux.go
|
||||
// netlink_darwin.go
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrWrongSockType = errors.New("Wrong socket type")
|
||||
ErrShortResponse = errors.New("Got short response from netlink")
|
||||
)
|
||||
|
||||
// A Route is a subnet associated with the interface to reach it.
|
||||
type Route struct {
|
||||
*net.IPNet
|
||||
Iface *net.Interface
|
||||
Default bool
|
||||
}
|
1002
Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux.go
generated
vendored
Normal file
1002
Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
9
Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_arm.go
generated
vendored
Normal file
9
Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_arm.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
func randIfrDataByte() uint8 {
|
||||
return uint8(rand.Intn(255))
|
||||
}
|
11
Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go
generated
vendored
Normal file
11
Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// +build !arm
|
||||
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
func randIfrDataByte() int8 {
|
||||
return int8(rand.Intn(255))
|
||||
}
|
55
Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go
generated
vendored
Normal file
55
Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCreateBridgeWithMac(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
|
||||
name := "testbridge"
|
||||
|
||||
if err := CreateBridge(name, true); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := net.InterfaceByName(name); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// cleanup and tests
|
||||
|
||||
if err := DeleteBridge(name); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := net.InterfaceByName(name); err == nil {
|
||||
t.Fatal("expected error getting interface because bridge was deleted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateVethPair(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
name1 = "veth1"
|
||||
name2 = "veth2"
|
||||
)
|
||||
|
||||
if err := NetworkCreateVethPair(name1, name2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := net.InterfaceByName(name1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := net.InterfaceByName(name2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
76
Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go
generated
vendored
Normal file
76
Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
// +build !linux
|
||||
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotImplemented = errors.New("not implemented")
|
||||
)
|
||||
|
||||
func NetworkGetRoutes() ([]Route, error) {
|
||||
return nil, ErrNotImplemented
|
||||
}
|
||||
|
||||
func NetworkLinkAdd(name string, linkType string) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func NetworkLinkUp(iface *net.Interface) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func AddRoute(destination, source, gateway, device string) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func AddDefaultGw(ip, device string) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func NetworkSetMTU(iface *net.Interface, mtu int) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func NetworkCreateVethPair(name1, name2 string) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func NetworkChangeName(iface *net.Interface, newName string) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func NetworkSetNsFd(iface *net.Interface, fd int) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func NetworkSetNsPid(iface *net.Interface, nspid int) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func NetworkSetMaster(iface, master *net.Interface) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func NetworkLinkDown(iface *net.Interface) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func CreateBridge(name string, setMacAddr bool) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func DeleteBridge(name string) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func AddToBridge(iface, master *net.Interface) error {
|
||||
return ErrNotImplemented
|
||||
}
|
23
Godeps/_workspace/src/github.com/docker/libcontainer/network/loopback.go
generated
vendored
Normal file
23
Godeps/_workspace/src/github.com/docker/libcontainer/network/loopback.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// +build linux
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Loopback is a network strategy that provides a basic loopback device
|
||||
type Loopback struct {
|
||||
}
|
||||
|
||||
func (l *Loopback) Create(n *Network, nspid int, networkState *NetworkState) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Loopback) Initialize(config *Network, networkState *NetworkState) error {
|
||||
// Do not set the MTU on the loopback interface - use the default.
|
||||
if err := InterfaceUp("lo"); err != nil {
|
||||
return fmt.Errorf("lo up %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
37
Godeps/_workspace/src/github.com/docker/libcontainer/network/netns.go
generated
vendored
Normal file
37
Godeps/_workspace/src/github.com/docker/libcontainer/network/netns.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// +build linux
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/libcontainer/system"
|
||||
)
|
||||
|
||||
// crosbymichael: could make a network strategy that instead of returning veth pair names it returns a pid to an existing network namespace
|
||||
type NetNS struct {
|
||||
}
|
||||
|
||||
func (v *NetNS) Create(n *Network, nspid int, networkState *NetworkState) error {
|
||||
networkState.NsPath = n.NsPath
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *NetNS) Initialize(config *Network, networkState *NetworkState) error {
|
||||
if networkState.NsPath == "" {
|
||||
return fmt.Errorf("nspath does is not specified in NetworkState")
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(networkState.NsPath, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed get network namespace fd: %v", err)
|
||||
}
|
||||
|
||||
if err := system.Setns(f.Fd(), syscall.CLONE_NEWNET); err != nil {
|
||||
return fmt.Errorf("failed to setns current network namespace: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
81
Godeps/_workspace/src/github.com/docker/libcontainer/network/network.go
generated
vendored
Normal file
81
Godeps/_workspace/src/github.com/docker/libcontainer/network/network.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
// +build linux
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/docker/libcontainer/netlink"
|
||||
)
|
||||
|
||||
func InterfaceUp(name string) error {
|
||||
iface, err := net.InterfaceByName(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return netlink.NetworkLinkUp(iface)
|
||||
}
|
||||
|
||||
func InterfaceDown(name string) error {
|
||||
iface, err := net.InterfaceByName(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return netlink.NetworkLinkDown(iface)
|
||||
}
|
||||
|
||||
func ChangeInterfaceName(old, newName string) error {
|
||||
iface, err := net.InterfaceByName(old)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return netlink.NetworkChangeName(iface, newName)
|
||||
}
|
||||
|
||||
func CreateVethPair(name1, name2 string) error {
|
||||
return netlink.NetworkCreateVethPair(name1, name2)
|
||||
}
|
||||
|
||||
func SetInterfaceInNamespacePid(name string, nsPid int) error {
|
||||
iface, err := net.InterfaceByName(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return netlink.NetworkSetNsPid(iface, nsPid)
|
||||
}
|
||||
|
||||
func SetInterfaceMaster(name, master string) error {
|
||||
iface, err := net.InterfaceByName(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
masterIface, err := net.InterfaceByName(master)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return netlink.AddToBridge(iface, masterIface)
|
||||
}
|
||||
|
||||
func SetDefaultGateway(ip, ifaceName string) error {
|
||||
return netlink.AddDefaultGw(ip, ifaceName)
|
||||
}
|
||||
|
||||
func SetInterfaceIp(name string, rawIp string) error {
|
||||
iface, err := net.InterfaceByName(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ip, ipNet, err := net.ParseCIDR(rawIp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return netlink.NetworkLinkAddIp(iface, ip, ipNet)
|
||||
}
|
||||
|
||||
func SetMtu(name string, mtu int) error {
|
||||
iface, err := net.InterfaceByName(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return netlink.NetworkSetMTU(iface, mtu)
|
||||
}
|
69
Godeps/_workspace/src/github.com/docker/libcontainer/network/stats.go
generated
vendored
Normal file
69
Godeps/_workspace/src/github.com/docker/libcontainer/network/stats.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
package network
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type NetworkStats struct {
|
||||
RxBytes uint64 `json:"rx_bytes"`
|
||||
RxPackets uint64 `json:"rx_packets"`
|
||||
RxErrors uint64 `json:"rx_errors"`
|
||||
RxDropped uint64 `json:"rx_dropped"`
|
||||
TxBytes uint64 `json:"tx_bytes"`
|
||||
TxPackets uint64 `json:"tx_packets"`
|
||||
TxErrors uint64 `json:"tx_errors"`
|
||||
TxDropped uint64 `json:"tx_dropped"`
|
||||
}
|
||||
|
||||
// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo.
|
||||
func GetStats(networkState *NetworkState) (*NetworkStats, error) {
|
||||
// This can happen if the network runtime information is missing - possible if the container was created by an old version of libcontainer.
|
||||
if networkState.VethHost == "" {
|
||||
return &NetworkStats{}, nil
|
||||
}
|
||||
data, err := readSysfsNetworkStats(networkState.VethHost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
|
||||
return &NetworkStats{
|
||||
RxBytes: data["tx_bytes"],
|
||||
RxPackets: data["tx_packets"],
|
||||
RxErrors: data["tx_errors"],
|
||||
RxDropped: data["tx_dropped"],
|
||||
TxBytes: data["rx_bytes"],
|
||||
TxPackets: data["rx_packets"],
|
||||
TxErrors: data["rx_errors"],
|
||||
TxDropped: data["rx_dropped"],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Reads all the statistics available under /sys/class/net/<EthInterface>/statistics as a map with file name as key and data as integers.
|
||||
func readSysfsNetworkStats(ethInterface string) (map[string]uint64, error) {
|
||||
out := make(map[string]uint64)
|
||||
|
||||
fullPath := filepath.Join("/sys/class/net", ethInterface, "statistics/")
|
||||
err := filepath.Walk(fullPath, func(path string, _ os.FileInfo, _ error) error {
|
||||
// skip fullPath.
|
||||
if path == fullPath {
|
||||
return nil
|
||||
}
|
||||
base := filepath.Base(path)
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out[base] = value
|
||||
return nil
|
||||
})
|
||||
return out, err
|
||||
}
|
35
Godeps/_workspace/src/github.com/docker/libcontainer/network/strategy.go
generated
vendored
Normal file
35
Godeps/_workspace/src/github.com/docker/libcontainer/network/strategy.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
// +build linux
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotValidStrategyType = errors.New("not a valid network strategy type")
|
||||
)
|
||||
|
||||
var strategies = map[string]NetworkStrategy{
|
||||
"veth": &Veth{},
|
||||
"loopback": &Loopback{},
|
||||
"netns": &NetNS{},
|
||||
}
|
||||
|
||||
// NetworkStrategy represents a specific network configuration for
|
||||
// a container's networking stack
|
||||
type NetworkStrategy interface {
|
||||
Create(*Network, int, *NetworkState) error
|
||||
Initialize(*Network, *NetworkState) error
|
||||
}
|
||||
|
||||
// GetStrategy returns the specific network strategy for the
|
||||
// provided type. If no strategy is registered for the type an
|
||||
// ErrNotValidStrategyType is returned.
|
||||
func GetStrategy(tpe string) (NetworkStrategy, error) {
|
||||
s, exists := strategies[tpe]
|
||||
if !exists {
|
||||
return nil, ErrNotValidStrategyType
|
||||
}
|
||||
return s, nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user