From e6d3895ea2fdb9cb7184f5f72fa3d1f6492dc00a Mon Sep 17 00:00:00 2001 From: Kelsey Hightower Date: Sun, 27 Jul 2014 12:48:16 -0700 Subject: [PATCH] manage dependencies with godep This commit adds all cadvisor dependencies as the result of running the following commands: go get -u godep save The .travis.yml file has been updated to use godep for building binaries and running tests. The prepare.sh script has been updated to use godep to build the cadvisor binary. --- .travis.yml | 6 +- Godeps/Godeps.json | 47 + Godeps/Readme | 5 + Godeps/_workspace/.gitignore | 2 + .../docker/docker/pkg/mount/MAINTAINERS | 1 + .../docker/docker/pkg/mount/flags.go | 62 + .../docker/docker/pkg/mount/flags_freebsd.go | 28 + .../docker/docker/pkg/mount/flags_linux.go | 25 + .../docker/pkg/mount/flags_unsupported.go | 22 + .../docker/docker/pkg/mount/mount.go | 70 + .../docker/docker/pkg/mount/mount_test.go | 137 + .../docker/pkg/mount/mounter_freebsd.go | 59 + .../docker/docker/pkg/mount/mounter_linux.go | 23 + .../docker/pkg/mount/mounter_unsupported.go | 11 + .../docker/docker/pkg/mount/mountinfo.go | 7 + .../docker/pkg/mount/mountinfo_freebsd.go | 38 + .../docker/pkg/mount/mountinfo_linux.go | 73 + .../docker/pkg/mount/mountinfo_test_linux.go | 445 +++ .../docker/pkg/mount/mountinfo_unsupported.go | 12 + .../docker/libcontainer/.travis.yml | 34 + .../docker/libcontainer/CONTRIBUTORS_GUIDE.md | 204 ++ .../github.com/docker/libcontainer/Dockerfile | 21 + .../github.com/docker/libcontainer/LICENSE | 191 ++ .../docker/libcontainer/MAINTAINERS | 4 + .../docker/libcontainer/MAINTAINERS_GUIDE.md | 99 + .../github.com/docker/libcontainer/Makefile | 10 + .../src/github.com/docker/libcontainer/NOTICE | 16 + .../docker/libcontainer/PRINCIPLES.md | 19 + .../github.com/docker/libcontainer/README.md | 62 + .../github.com/docker/libcontainer/ROADMAP.md | 16 + .../docker/libcontainer/api_temp.go | 34 + .../docker/libcontainer/apparmor/apparmor.go | 35 + .../apparmor/apparmor_disabled.go | 11 + .../docker/libcontainer/apparmor/gen.go | 94 + .../docker/libcontainer/apparmor/setup.go | 44 + .../docker/libcontainer/cgroups/cgroups.go | 40 + .../libcontainer/cgroups/cgroups_test.go | 27 + .../libcontainer/cgroups/cgutil/cgutil.go | 264 ++ .../cgroups/cgutil/sample_cgroup.json | 10 + .../libcontainer/cgroups/fs/apply_raw.go | 198 ++ .../docker/libcontainer/cgroups/fs/blkio.go | 141 + .../libcontainer/cgroups/fs/blkio_test.go | 174 + .../docker/libcontainer/cgroups/fs/cpu.go | 72 + .../libcontainer/cgroups/fs/cpu_test.go | 66 + .../docker/libcontainer/cgroups/fs/cpuacct.go | 163 + .../docker/libcontainer/cgroups/fs/cpuset.go | 110 + .../docker/libcontainer/cgroups/fs/devices.go | 34 + .../docker/libcontainer/cgroups/fs/freezer.go | 50 + .../docker/libcontainer/cgroups/fs/memory.go | 92 + .../libcontainer/cgroups/fs/memory_test.go | 127 + .../libcontainer/cgroups/fs/notify_linux.go | 82 + .../cgroups/fs/notify_linux_test.go | 86 + .../libcontainer/cgroups/fs/perf_event.go | 24 + .../cgroups/fs/stats_test_util.go | 73 + .../libcontainer/cgroups/fs/test_util.go | 60 + .../docker/libcontainer/cgroups/fs/utils.go | 40 + .../libcontainer/cgroups/fs/utils_test.go | 68 + .../docker/libcontainer/cgroups/stats.go | 67 + .../cgroups/systemd/apply_nosystemd.go | 29 + .../cgroups/systemd/apply_systemd.go | 439 +++ .../docker/libcontainer/cgroups/utils.go | 168 + .../github.com/docker/libcontainer/config.go | 86 + .../docker/libcontainer/config_test.go | 160 + .../docker/libcontainer/console/console.go | 128 + .../docker/libcontainer/container.go | 71 + .../docker/libcontainer/devices/defaults.go | 159 + .../docker/libcontainer/devices/devices.go | 119 + .../docker/libcontainer/devices/number.go | 26 + .../github.com/docker/libcontainer/factory.go | 25 + .../docker/libcontainer/label/label.go | 34 + .../libcontainer/label/label_selinux.go | 100 + .../docker/libcontainer/mount/init.go | 244 ++ .../docker/libcontainer/mount/msmoveroot.go | 20 + .../docker/libcontainer/mount/nodes/nodes.go | 52 + .../mount/nodes/nodes_unsupported.go | 13 + .../docker/libcontainer/mount/pivotroot.go | 34 + .../docker/libcontainer/mount/ptmx.go | 30 + .../docker/libcontainer/mount/readonly.go | 11 + .../docker/libcontainer/mount/remount.go | 31 + .../docker/libcontainer/mount/types.go | 49 + .../docker/libcontainer/namespaces/create.go | 10 + .../docker/libcontainer/namespaces/exec.go | 194 ++ .../docker/libcontainer/namespaces/execin.go | 118 + .../docker/libcontainer/namespaces/init.go | 257 ++ .../docker/libcontainer/namespaces/nsenter.go | 223 ++ .../docker/libcontainer/namespaces/types.go | 50 + .../libcontainer/namespaces/types_linux.go | 16 + .../libcontainer/namespaces/types_test.go | 30 + .../docker/libcontainer/netlink/MAINTAINERS | 2 + .../docker/libcontainer/netlink/netlink.go | 23 + .../libcontainer/netlink/netlink_linux.go | 1002 ++++++ .../libcontainer/netlink/netlink_linux_arm.go | 9 + .../netlink/netlink_linux_notarm.go | 11 + .../netlink/netlink_linux_test.go | 55 + .../netlink/netlink_unsupported.go | 76 + .../docker/libcontainer/network/loopback.go | 23 + .../docker/libcontainer/network/netns.go | 37 + .../docker/libcontainer/network/network.go | 81 + .../docker/libcontainer/network/stats.go | 69 + .../docker/libcontainer/network/strategy.go | 35 + .../docker/libcontainer/network/types.go | 41 + .../docker/libcontainer/network/veth.go | 95 + .../docker/libcontainer/nsinit/cli.go | 42 + .../docker/libcontainer/nsinit/config.go | 29 + .../docker/libcontainer/nsinit/exec.go | 186 ++ .../docker/libcontainer/nsinit/init.go | 49 + .../docker/libcontainer/nsinit/nsenter.go | 41 + .../libcontainer/nsinit/nsinit/nsinit.go | 7 + .../docker/libcontainer/nsinit/pause.go | 49 + .../docker/libcontainer/nsinit/stats.go | 39 + .../docker/libcontainer/nsinit/utils.go | 46 + .../github.com/docker/libcontainer/process.go | 27 + .../libcontainer/sample_configs/README.md | 5 + .../libcontainer/sample_configs/apparmor.json | 196 ++ .../sample_configs/attach_to_bridge.json | 202 ++ .../libcontainer/sample_configs/minimal.json | 195 ++ .../libcontainer/sample_configs/selinux.json | 197 ++ .../security/capabilities/capabilities.go | 56 + .../security/capabilities/types.go | 88 + .../security/capabilities/types_test.go | 19 + .../security/restrict/restrict.go | 53 + .../security/restrict/unsupported.go | 9 + .../docker/libcontainer/selinux/selinux.go | 442 +++ .../libcontainer/selinux/selinux_test.go | 64 + .../github.com/docker/libcontainer/state.go | 74 + .../docker/libcontainer/syncpipe/sync_pipe.go | 102 + .../libcontainer/syncpipe/sync_pipe_linux.go | 20 + .../libcontainer/syncpipe/sync_pipe_test.go | 61 + .../docker/libcontainer/system/linux.go | 60 + .../docker/libcontainer/system/proc.go | 27 + .../docker/libcontainer/system/setns_linux.go | 31 + .../docker/libcontainer/system/sysconfig.go | 12 + .../libcontainer/system/sysconfig_notcgo.go | 8 + .../libcontainer/system/xattrs_linux.go | 59 + .../github.com/docker/libcontainer/types.go | 11 + .../docker/libcontainer/utils/utils.go | 55 + .../fsouza/go-dockerclient/.travis.yml | 13 + .../github.com/fsouza/go-dockerclient/AUTHORS | 33 + .../fsouza/go-dockerclient/DOCKER-LICENSE | 6 + .../github.com/fsouza/go-dockerclient/LICENSE | 22 + .../fsouza/go-dockerclient/README.markdown | 41 + .../fsouza/go-dockerclient/change.go | 36 + .../fsouza/go-dockerclient/change_test.go | 26 + .../fsouza/go-dockerclient/client.go | 568 ++++ .../fsouza/go-dockerclient/client_test.go | 285 ++ .../fsouza/go-dockerclient/container.go | 633 ++++ .../fsouza/go-dockerclient/container_test.go | 1281 ++++++++ .../fsouza/go-dockerclient/engine/engine.go | 147 + .../go-dockerclient/engine/engine_test.go | 111 + .../fsouza/go-dockerclient/engine/env.go | 238 ++ .../fsouza/go-dockerclient/engine/env_test.go | 127 + .../fsouza/go-dockerclient/engine/hack.go | 25 + .../go-dockerclient/engine/helpers_test.go | 28 + .../fsouza/go-dockerclient/engine/http.go | 44 + .../fsouza/go-dockerclient/engine/job.go | 197 ++ .../fsouza/go-dockerclient/engine/job_test.go | 84 + .../fsouza/go-dockerclient/engine/streams.go | 196 ++ .../go-dockerclient/engine/streams_test.go | 298 ++ .../fsouza/go-dockerclient/event.go | 278 ++ .../fsouza/go-dockerclient/event_test.go | 93 + .../fsouza/go-dockerclient/example_test.go | 134 + .../fsouza/go-dockerclient/image.go | 322 ++ .../fsouza/go-dockerclient/image_test.go | 642 ++++ .../github.com/fsouza/go-dockerclient/misc.go | 47 + .../fsouza/go-dockerclient/misc_test.go | 123 + .../fsouza/go-dockerclient/signal.go | 49 + .../go-dockerclient/testing/data/Dockerfile | 15 + .../testing/data/container.tar | Bin 0 -> 2048 bytes .../testing/data/dockerfile.tar | Bin 0 -> 2560 bytes .../fsouza/go-dockerclient/testing/server.go | 651 ++++ .../go-dockerclient/testing/server_test.go | 963 ++++++ .../fsouza/go-dockerclient/utils/random.go | 20 + .../fsouza/go-dockerclient/utils/stdcopy.go | 168 + .../go-dockerclient/utils/stdcopy_test.go | 217 ++ .../go-dockerclient/utils/uname_darwin.go | 17 + .../go-dockerclient/utils/uname_linux.go | 20 + .../fsouza/go-dockerclient/utils/utils.go | 1114 +++++++ .../go-dockerclient/utils/utils_test.go | 535 +++ .../go-dockerclient/utils/utils_windows.go | 17 + .../influxdb/influxdb/client/README.md | 2 + .../influxdb/influxdb/client/influxdb.go | 610 ++++ .../influxdb/influxdb/client/influxdb_test.go | 190 ++ .../influxdb/influxdb/client/series.go | 19 + .../influxdb/influxdb/client/shard_space.go | 15 + .../src/github.com/kr/pretty/.gitignore | 4 + .../src/github.com/kr/pretty/License | 21 + .../src/github.com/kr/pretty/Readme | 9 + .../src/github.com/kr/pretty/diff.go | 148 + .../src/github.com/kr/pretty/diff_test.go | 73 + .../src/github.com/kr/pretty/example_test.go | 20 + .../src/github.com/kr/pretty/formatter.go | 309 ++ .../github.com/kr/pretty/formatter_test.go | 148 + .../src/github.com/kr/pretty/pretty.go | 98 + .../src/github.com/kr/pretty/zero.go | 41 + .../_workspace/src/github.com/kr/text/License | 19 + .../_workspace/src/github.com/kr/text/Readme | 3 + .../src/github.com/kr/text/colwriter/Readme | 5 + .../github.com/kr/text/colwriter/column.go | 147 + .../kr/text/colwriter/column_test.go | 90 + .../_workspace/src/github.com/kr/text/doc.go | 3 + .../src/github.com/kr/text/indent.go | 74 + .../src/github.com/kr/text/indent_test.go | 119 + .../src/github.com/kr/text/mc/Readme | 9 + .../src/github.com/kr/text/mc/mc.go | 62 + .../_workspace/src/github.com/kr/text/wrap.go | 86 + .../src/github.com/kr/text/wrap_test.go | 44 + .../src/github.com/stretchr/objx/.gitignore | 22 + .../src/github.com/stretchr/objx/LICENSE.md | 23 + .../src/github.com/stretchr/objx/README.md | 3 + .../src/github.com/stretchr/objx/accessors.go | 179 + .../stretchr/objx/accessors_test.go | 145 + .../stretchr/objx/codegen/array-access.txt | 14 + .../stretchr/objx/codegen/index.html | 86 + .../stretchr/objx/codegen/template.txt | 286 ++ .../stretchr/objx/codegen/types_list.txt | 20 + .../src/github.com/stretchr/objx/constants.go | 13 + .../github.com/stretchr/objx/conversions.go | 117 + .../stretchr/objx/conversions_test.go | 94 + .../src/github.com/stretchr/objx/doc.go | 72 + .../github.com/stretchr/objx/fixture_test.go | 98 + .../src/github.com/stretchr/objx/map.go | 222 ++ .../github.com/stretchr/objx/map_for_test.go | 10 + .../src/github.com/stretchr/objx/map_test.go | 147 + .../src/github.com/stretchr/objx/mutations.go | 81 + .../stretchr/objx/mutations_test.go | 77 + .../src/github.com/stretchr/objx/security.go | 14 + .../github.com/stretchr/objx/security_test.go | 12 + .../stretchr/objx/simple_example_test.go | 41 + .../src/github.com/stretchr/objx/tests.go | 17 + .../github.com/stretchr/objx/tests_test.go | 24 + .../stretchr/objx/type_specific_codegen.go | 2881 +++++++++++++++++ .../objx/type_specific_codegen_test.go | 2867 ++++++++++++++++ .../src/github.com/stretchr/objx/value.go | 13 + .../github.com/stretchr/objx/value_test.go | 1 + .../stretchr/testify/assert/assertions.go | 657 ++++ .../testify/assert/assertions_test.go | 538 +++ .../github.com/stretchr/testify/assert/doc.go | 150 + .../stretchr/testify/assert/errors.go | 10 + .../testify/assert/forward_assertions.go | 232 ++ .../testify/assert/forward_assertions_test.go | 380 +++ .../github.com/stretchr/testify/mock/doc.go | 43 + .../github.com/stretchr/testify/mock/mock.go | 505 +++ .../stretchr/testify/mock/mock_test.go | 669 ++++ deploy/prepare.sh | 2 +- 244 files changed, 34075 insertions(+), 3 deletions(-) create mode 100644 Godeps/Godeps.json create mode 100644 Godeps/Readme create mode 100644 Godeps/_workspace/.gitignore create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/mount/MAINTAINERS create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_test_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/CONTRIBUTORS_GUIDE.md create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/Dockerfile create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/LICENSE create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS_GUIDE.md create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/Makefile create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/NOTICE create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/PRINCIPLES.md create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/ROADMAP.md create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/api_temp.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor_disabled.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/gen.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/setup.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgutil/cgutil.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgutil/sample_cgroup.json create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuacct.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/devices.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/freezer.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/perf_event.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/stats_test_util.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/test_util.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/stats.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/utils.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/config.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/config_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/console/console.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/container.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/devices/defaults.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/devices/number.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/factory.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/label/label.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/label/label_selinux.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/mount/init.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/mount/msmoveroot.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/mount/pivotroot.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/mount/ptmx.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/mount/readonly.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/mount/remount.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/mount/types.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/create.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/exec.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/execin.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/init.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/netlink/MAINTAINERS create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_arm.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/network/loopback.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/network/netns.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/network/network.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/network/stats.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/network/strategy.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/network/types.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/network/veth.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/cli.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/config.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/exec.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/init.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/nsenter.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/nsinit/nsinit.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/pause.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/stats.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/utils.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/process.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/apparmor.json create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/attach_to_bridge.json create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/minimal.json create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/selinux.json create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/capabilities.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/types.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/types_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/security/restrict/restrict.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/security/restrict/unsupported.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/selinux/selinux.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/selinux/selinux_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/state.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/syncpipe/sync_pipe.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/syncpipe/sync_pipe_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/syncpipe/sync_pipe_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/system/linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/system/proc.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/system/setns_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/system/sysconfig.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/system/sysconfig_notcgo.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/system/xattrs_linux.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/types.go create mode 100644 Godeps/_workspace/src/github.com/docker/libcontainer/utils/utils.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/engine.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/engine_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/env.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/env_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/hack.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/helpers_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/http.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/job.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/job_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/streams.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/streams_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/random.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/stdcopy.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/stdcopy_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/uname_darwin.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/uname_linux.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/utils.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/utils_test.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/utils_windows.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/client/series.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/client/shard_space.go create mode 100644 Godeps/_workspace/src/github.com/kr/pretty/.gitignore create mode 100644 Godeps/_workspace/src/github.com/kr/pretty/License create mode 100644 Godeps/_workspace/src/github.com/kr/pretty/Readme create mode 100644 Godeps/_workspace/src/github.com/kr/pretty/diff.go create mode 100644 Godeps/_workspace/src/github.com/kr/pretty/diff_test.go create mode 100644 Godeps/_workspace/src/github.com/kr/pretty/example_test.go create mode 100644 Godeps/_workspace/src/github.com/kr/pretty/formatter.go create mode 100644 Godeps/_workspace/src/github.com/kr/pretty/formatter_test.go create mode 100644 Godeps/_workspace/src/github.com/kr/pretty/pretty.go create mode 100644 Godeps/_workspace/src/github.com/kr/pretty/zero.go create mode 100644 Godeps/_workspace/src/github.com/kr/text/License create mode 100644 Godeps/_workspace/src/github.com/kr/text/Readme create mode 100644 Godeps/_workspace/src/github.com/kr/text/colwriter/Readme create mode 100644 Godeps/_workspace/src/github.com/kr/text/colwriter/column.go create mode 100644 Godeps/_workspace/src/github.com/kr/text/colwriter/column_test.go create mode 100644 Godeps/_workspace/src/github.com/kr/text/doc.go create mode 100644 Godeps/_workspace/src/github.com/kr/text/indent.go create mode 100644 Godeps/_workspace/src/github.com/kr/text/indent_test.go create mode 100644 Godeps/_workspace/src/github.com/kr/text/mc/Readme create mode 100644 Godeps/_workspace/src/github.com/kr/text/mc/mc.go create mode 100644 Godeps/_workspace/src/github.com/kr/text/wrap.go create mode 100644 Godeps/_workspace/src/github.com/kr/text/wrap_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/.gitignore create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/LICENSE.md create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/README.md create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/accessors.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/constants.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/conversions.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/doc.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/map.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/map_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/mutations.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/security.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/security_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/tests.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/value.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/value_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go diff --git a/.travis.yml b/.travis.yml index 8b47c7d0..a216b540 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,10 +5,12 @@ before_script: - go get github.com/stretchr/testify/mock - go get github.com/kr/pretty - go get code.google.com/p/go.tools/cmd/vet + - go get github.com/kr/godep - wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb - sudo dpkg -i influxdb_latest_amd64.deb - sudo service influxdb start script: - - go test -v -race github.com/google/cadvisor/... - - go build github.com/google/cadvisor + - export PATH=$PATH:$HOME/gopath/bin + - godep go test -v -race github.com/google/cadvisor/... + - godep go build github.com/google/cadvisor - go vet github.com/google/cadvisor diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json new file mode 100644 index 00000000..c9522544 --- /dev/null +++ b/Godeps/Godeps.json @@ -0,0 +1,47 @@ +{ + "ImportPath": "github.com/google/cadvisor", + "GoVersion": "go1.3", + "Deps": [ + { + "ImportPath": "github.com/docker/docker/pkg/mount", + "Comment": "v1.1.1-402-g487a417", + "Rev": "487a417d9fd074d0e78876072c7d1ebfd398ea7a" + }, + { + "ImportPath": "github.com/docker/libcontainer", + "Comment": "v1.1.0-115-ge6a43c1", + "Rev": "e6a43c1c2b9f769deb96348a0a93417cd48a36d8" + }, + { + "ImportPath": "github.com/fsouza/go-dockerclient", + "Comment": "0.2.1-185-g4fc0e0d", + "Rev": "4fc0e0dbba85f2f799aa77cea594d40267f5bd5a" + }, + { + "ImportPath": "github.com/influxdb/influxdb/client", + "Comment": "v0.8.0-rc.3-9-g3284662", + "Rev": "3284662b350688b651359f9124928856071bd3f5" + }, + { + "ImportPath": "github.com/kr/pretty", + "Comment": "go.weekly.2011-12-22-20-g088c856", + "Rev": "088c856450c08c03eb32f7a6c221e6eefaa10e6f" + }, + { + "ImportPath": "github.com/kr/text", + "Rev": "6807e777504f54ad073ecef66747de158294b639" + }, + { + "ImportPath": "github.com/stretchr/objx", + "Rev": "cbeaeb16a013161a98496fad62933b1d21786672" + }, + { + "ImportPath": "github.com/stretchr/testify/assert", + "Rev": "ed6ea184117f7ae3168d1c05dbdbde1a86e2be68" + }, + { + "ImportPath": "github.com/stretchr/testify/mock", + "Rev": "ed6ea184117f7ae3168d1c05dbdbde1a86e2be68" + } + ] +} diff --git a/Godeps/Readme b/Godeps/Readme new file mode 100644 index 00000000..4cdaa53d --- /dev/null +++ b/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore new file mode 100644 index 00000000..f037d684 --- /dev/null +++ b/Godeps/_workspace/.gitignore @@ -0,0 +1,2 @@ +/pkg +/bin diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/MAINTAINERS new file mode 100644 index 00000000..1e998f8a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/MAINTAINERS @@ -0,0 +1 @@ +Michael Crosby (@crosbymichael) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go new file mode 100644 index 00000000..742698e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go @@ -0,0 +1,62 @@ +package mount + +import ( + "strings" +) + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + flags := map[string]struct { + clear bool + flag int + }{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "private": {false, PRIVATE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, + } + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go new file mode 100644 index 00000000..4ddf4d70 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go @@ -0,0 +1,28 @@ +// +build freebsd,cgo + +package mount + +/* +#include +*/ +import "C" + +const ( + RDONLY = C.MNT_RDONLY + NOSUID = C.MNT_NOSUID + NOEXEC = C.MNT_NOEXEC + SYNCHRONOUS = C.MNT_SYNCHRONOUS + NOATIME = C.MNT_NOATIME + + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + PRIVATE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go new file mode 100644 index 00000000..19c882fc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go @@ -0,0 +1,25 @@ +// +build amd64 + +package mount + +import ( + "syscall" +) + +const ( + RDONLY = syscall.MS_RDONLY + NOSUID = syscall.MS_NOSUID + NODEV = syscall.MS_NODEV + NOEXEC = syscall.MS_NOEXEC + SYNCHRONOUS = syscall.MS_SYNCHRONOUS + DIRSYNC = syscall.MS_DIRSYNC + REMOUNT = syscall.MS_REMOUNT + MANDLOCK = syscall.MS_MANDLOCK + NOATIME = syscall.MS_NOATIME + NODIRATIME = syscall.MS_NODIRATIME + BIND = syscall.MS_BIND + RBIND = syscall.MS_BIND | syscall.MS_REC + PRIVATE = syscall.MS_PRIVATE + RELATIME = syscall.MS_RELATIME + STRICTATIME = syscall.MS_STRICTATIME +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go new file mode 100644 index 00000000..e5983541 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go @@ -0,0 +1,22 @@ +// +build !linux,!freebsd linux,!amd64 freebsd,!cgo + +package mount + +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + PRIVATE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go new file mode 100644 index 00000000..5ca73160 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go @@ -0,0 +1,70 @@ +package mount + +import ( + "time" +) + +func GetMounts() ([]*MountInfo, error) { + return parseMountTable() +} + +// Looks at /proc/self/mountinfo to determine of the specified +// mountpoint has been mounted +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} + +// Mount the specified options at the target path only if +// the target is not mounted +// Options must be specified as fstab style +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// Mount the specified options at the target path +// reguardless if the target is mounted or not +// Options must be specified as fstab style +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + if err := mount(device, target, mType, uintptr(flag), data); err != nil { + return err + } + return nil +} + +// Unmount the target only if it is mounted +func Unmount(target string) error { + if mounted, err := Mounted(target); err != nil || !mounted { + return err + } + return ForceUnmount(target) +} + +// Unmount the target reguardless if it is mounted or not +func ForceUnmount(target string) (err error) { + // Simple retry logic for unmount + for i := 0; i < 10; i++ { + if err = unmount(target, 0); err == nil { + return nil + } + time.Sleep(100 * time.Millisecond) + } + return +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount_test.go new file mode 100644 index 00000000..5c7f1b86 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount_test.go @@ -0,0 +1,137 @@ +package mount + +import ( + "os" + "path" + "testing" +) + +func TestMountOptionsParsing(t *testing.T) { + options := "noatime,ro,size=10k" + + flag, data := parseOptions(options) + + if data != "size=10k" { + t.Fatalf("Expected size=10 got %s", data) + } + + expectedFlag := NOATIME | RDONLY + + if flag != expectedFlag { + t.Fatalf("Expected %d got %d", expectedFlag, flag) + } +} + +func TestMounted(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + mounted, err := Mounted(targetDir) + if err != nil { + t.Fatal(err) + } + if !mounted { + t.Fatalf("Expected %s to be mounted", targetDir) + } + if _, err := os.Stat(targetDir); err != nil { + t.Fatal(err) + } +} + +func TestMountReadonly(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + f, err = os.OpenFile(targetPath, os.O_RDWR, 0777) + if err == nil { + t.Fatal("Should not be able to open a ro file as rw") + } +} + +func TestGetMounts(t *testing.T) { + mounts, err := GetMounts() + if err != nil { + t.Fatal(err) + } + + root := false + for _, entry := range mounts { + if entry.Mountpoint == "/" { + root = true + } + } + + if !root { + t.Fatal("/ should be mounted at least") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go new file mode 100644 index 00000000..bb870e6f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go @@ -0,0 +1,59 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "syscall" + "unsafe" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go new file mode 100644 index 00000000..70b7798d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go @@ -0,0 +1,23 @@ +// +build amd64 + +package mount + +import ( + "syscall" +) + +func mount(device, target, mType string, flag uintptr, data string) error { + if err := syscall.Mount(device, target, mType, flag, data); err != nil { + return err + } + + // If we have a bind mount or remount, remount... + if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { + return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go new file mode 100644 index 00000000..06f2ac00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd linux,!amd64 freebsd,!cgo + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go new file mode 100644 index 00000000..78b83ced --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go @@ -0,0 +1,7 @@ +package mount + +type MountInfo struct { + Id, Parent, Major, Minor int + Root, Mountpoint, Opts string + Fstype, Source, VfsOpts string +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 00000000..a16bdb84 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,38 @@ +package mount + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts +func parseMountTable() ([]*MountInfo, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*MountInfo + for _, entry := range entries { + var mountinfo MountInfo + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + out = append(out, &mountinfo) + } + return out, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go new file mode 100644 index 00000000..01c954ff --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -0,0 +1,73 @@ +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s " +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts +func parseMountTable() ([]*MountInfo, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*MountInfo, error) { + var ( + s = bufio.NewScanner(r) + out = []*MountInfo{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &MountInfo{} + text = s.Text() + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.Id, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) != 3 { + return nil, fmt.Errorf("Error did not find 3 fields post '-' in '%s'", text) + } + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = postSeparatorFields[2] + out = append(out, p) + } + return out, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_test_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_test_linux.go new file mode 100644 index 00000000..f2e3daa8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_test_linux.go @@ -0,0 +1,445 @@ +package mount + +import ( + "bytes" + "testing" +) + +const ( + fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw + 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel + 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 + 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw + 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw + 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel + 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 + 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 + 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 + 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd + 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw + 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children + 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children + 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children + 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children + 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children + 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children + 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children + 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children + 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children + 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered + 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct + 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel + 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel + 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel + 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw + 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw + 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw + 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw + 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered + 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered + 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered + 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered + 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 + 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw + 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered + 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered + 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered + 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered + 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered + 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered + 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered + 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered + 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered + 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered + 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered + 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered + 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered + 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered + 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered + 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered + 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered + 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered + 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered + 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered + 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered + 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered` + + ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 +20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered +21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 +22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children +27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu +29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw +31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct +32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory +33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices +34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer +35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio +36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event +37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb +38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 +40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 +41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 +42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 +43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 +44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 +45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 +46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 +47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 +48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 +49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 +50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 +51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 +52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 +53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 +54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 +55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 +56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 +57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 +58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 +59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 +60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 +61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 +62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 +63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 +64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 +65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 +66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 +67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 +68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 +69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 +70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 +71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 +72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 +73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 +74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 +75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 +76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 +77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 +78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 +79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 +80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 +81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 +82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 +83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 +84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 +85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 +86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 +87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 +88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 +89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 +90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 +91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 +92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 +93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 +94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 +95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 +96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 +97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 +98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 +99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 +100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 +101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 +102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 +103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 +104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 +105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 +106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 +107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 +108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 +109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 +110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 +111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 +112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 +113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 +114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 +115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 +116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 +117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 +118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 +119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 +120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 +121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 +122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 +123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 +124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 +125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 +126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 +127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 +128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 +129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 +130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 +131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 +132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 +133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 +134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 +135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 +136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 +137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 +138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 +139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 +140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 +141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 +142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 +143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 +144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` + + gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 +18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 +19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw +22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw +24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 +25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc +26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children +27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children +28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children +29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children +30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children +31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children +32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children +33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro +34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota +35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw +36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw +42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw +43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw +44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 +68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c +85 68 8:6 /var/lib/docker/init/dockerinit-0.7.2-dev//deleted /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerinit rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c +39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c +40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c +41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c +45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c +46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c +47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c +48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c +49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c +50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c +51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c +52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c +53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c +54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c +55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c +56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c +57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c +59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c +60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c +61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c +62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c +63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c +64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c +65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c +66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c +70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c +71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c +72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c +73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c +76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c +77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c +78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c +79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c +80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c +81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c +82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c +83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c +84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c +94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c +95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c +96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c +97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c +98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c +102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c +103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c +104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c +105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c +106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c +107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c +108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c +109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c +110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c +111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c +112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c +113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c +114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c +117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c +118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c +119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c +120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c +121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c +122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c +123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c +126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c +127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c +128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c +130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c +131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c +132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c +133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c +134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c +135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c +136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c +137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c +138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c +139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c +140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c +141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c +142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c +143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c +144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c +147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c +150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c +151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c +152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c +153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c +154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c +155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c +156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c +157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c +158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c +159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c +160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c +162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c +163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c +164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c +165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c +166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c +167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c +168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c +169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c +170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c +171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c +172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c +173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c +174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c +184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c +187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c +188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c +189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c +190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c +191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c +192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c +193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c +194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c +195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c +196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c +197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c +198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c +199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c +200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c +201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c +202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c +203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c +204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c +205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c +206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c +207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c +208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c +209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c +210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c +211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c +212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c +213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c +214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c +215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c +216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c +217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c +218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c +219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c +220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c +221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c +222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c +223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c +224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c +225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c +226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c +227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c +228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c +229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c +230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c +231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c +232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c +233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c +234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c +235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c +237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c +238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c +239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c +240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c +241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c +242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c +243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c +244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c +245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c +246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c +247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c +249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c +250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c +251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c +252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c +253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c +254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c +255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c +256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c +257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c +259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c +260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c +261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c +262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c +263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c +264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c +58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c +67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c +265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c +270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c +273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c +278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c +281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c +286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c +289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c +99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` +) + +func TestParseFedoraMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseUbuntuMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(ubuntuMountInfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseGentooMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(gentooMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 00000000..352336b9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount + +import ( + "fmt" + "runtime" +) + +func parseMountTable() ([]*MountInfo, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/.travis.yml b/Godeps/_workspace/src/github.com/docker/libcontainer/.travis.yml new file mode 100644 index 00000000..7040d0bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/.travis.yml @@ -0,0 +1,34 @@ +language: go +go: 1.3 + +# let us have pretty experimental Docker-based Travis workers +sudo: false + +env: + - TRAVIS_GLOBAL_WTF=1 + - _GOOS=linux _GOARCH=amd64 CGO_ENABLED=1 + - _GOOS=linux _GOARCH=amd64 CGO_ENABLED=0 +# - _GOOS=linux _GOARCH=386 CGO_ENABLED=1 # TODO add this once Travis can handle it (https://github.com/travis-ci/travis-ci/issues/2207#issuecomment-49625061) + - _GOOS=linux _GOARCH=386 CGO_ENABLED=0 + - _GOOS=linux _GOARCH=arm CGO_ENABLED=0 + +install: + - mkdir -pv "${GOPATH%%:*}/src/github.com/docker" && [ -d "${GOPATH%%:*}/src/github.com/docker/libcontainer" ] || ln -sv "$(readlink -f .)" "${GOPATH%%:*}/src/github.com/docker/libcontainer" + - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then + gvm cross "$_GOOS" "$_GOARCH"; + export GOOS="$_GOOS" GOARCH="$_GOARCH"; + fi + - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then go env; fi + - go get -d -v ./... + - if [ "$TRAVIS_GLOBAL_WTF" ]; then + export DOCKER_PATH="${GOPATH%%:*}/src/github.com/dotcloud/docker"; + mkdir -p "$DOCKER_PATH/hack/make"; + ( cd "$DOCKER_PATH/hack/make" && wget -c 'https://raw.githubusercontent.com/dotcloud/docker/master/hack/make/'{.validate,validate-dco,validate-gofmt} ); + sed -i 's!dotcloud/docker!docker/libcontainer!' "$DOCKER_PATH/hack/make/.validate"; + fi + +script: + - if [ "$TRAVIS_GLOBAL_WTF" ]; then bash "$DOCKER_PATH/hack/make/validate-dco"; fi + - if [ "$TRAVIS_GLOBAL_WTF" ]; then bash "$DOCKER_PATH/hack/make/validate-gofmt"; fi + - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then go build -v ./...; fi + - if [ -z "$TRAVIS_GLOBAL_WTF" -a "$GOARCH" != 'arm' ]; then go test -test.short -v ./...; fi diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/CONTRIBUTORS_GUIDE.md b/Godeps/_workspace/src/github.com/docker/libcontainer/CONTRIBUTORS_GUIDE.md new file mode 100644 index 00000000..f0268962 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/CONTRIBUTORS_GUIDE.md @@ -0,0 +1,204 @@ +# The libcontainer Contributors' Guide + +Want to hack on libcontainer? Awesome! Here are instructions to get you +started. They are probably not perfect, please let us know if anything +feels wrong or incomplete. + +## Reporting Issues + +When reporting [issues](https://github.com/docker/libcontainer/issues) +on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc), +the output of `uname -a`. Please include the steps required to reproduce +the problem if possible and applicable. +This information will help us review and fix your issue faster. + +## Development Environment + +*Add instructions on setting up the development environment.* + +## Contribution Guidelines + +### Pull requests are always welcome + +We are always thrilled to receive pull requests, and do our best to +process them as fast as possible. Not sure if that typo is worth a pull +request? Do it! We will appreciate it. + +If your pull request is not accepted on the first try, don't be +discouraged! If there's a problem with the implementation, hopefully you +received feedback on what to improve. + +We're trying very hard to keep libcontainer lean and focused. We don't want it +to do everything for everybody. This means that we might decide against +incorporating a new feature. However, there might be a way to implement +that feature *on top of* libcontainer. + +### Discuss your design on the mailing list + +We recommend discussing your plans [on the mailing +list](https://groups.google.com/forum/?fromgroups#!forum/libcontainer) +before starting to code - especially for more ambitious contributions. +This gives other contributors a chance to point you in the right +direction, give feedback on your design, and maybe point out if someone +else is working on the same thing. + +### Create issues... + +Any significant improvement should be documented as [a GitHub +issue](https://github.com/docker/libcontainer/issues) before anybody +starts working on it. + +### ...but check for existing issues first! + +Please take a moment to check that an issue doesn't already exist +documenting your bug report or improvement proposal. If it does, it +never hurts to add a quick "+1" or "I have this problem too". This will +help prioritize the most common problems and requests. + +### Conventions + +Fork the repo and make changes on your fork in a feature branch: + +- If it's a bugfix branch, name it XXX-something where XXX is the number of the + issue +- If it's a feature branch, create an enhancement issue to announce your + intentions, and name it XXX-something where XXX is the number of the issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. Run the full test suite on +your branch before submitting a pull request. + +Update the documentation when creating or modifying features. Test +your documentation changes for clarity, concision, and correctness, as +well as a clean documentation build. See ``docs/README.md`` for more +information on building the docs and how docs get released. + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plugins that do this automatically. + +Pull requests descriptions should be as clear as possible and include a +reference to all the issues that they address. + +Pull requests must not contain commits from other users or branches. + +Commit messages must start with a capitalized and short summary (max. 50 +chars) written in the imperative, followed by an optional, more detailed +explanatory text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Be +sure to post a comment after pushing. The new commits will show up in the pull +request automatically, but the reviewers will not be notified unless you +comment. + +Before the pull request is merged, make sure that you squash your commits into +logical units of work using `git rebase -i` and `git push -f`. After every +commit the test suite should be passing. Include documentation changes in the +same commit so that a revert would remove all traces of the feature or fix. + +Commits that fix or close an issue should include a reference like `Closes #XXX` +or `Fixes #XXX`, which will automatically close the issue when merged. + +### Testing + +Make sure you include suitable tests, preferably unit tests, in your pull request +and that all the tests pass. + +*Instructions for running tests to be added.* + +### Merge approval + +libcontainer maintainers use LGTM (looks good to me) in comments on the code review +to indicate acceptance. + +A change requires LGTMs from at lease two maintainers. One of those must come from +a maintainer of the component affected. For example, if a change affects `netlink/` +and `security`, it needs at least one LGTM from a maintainer of each. Maintainers +only need one LGTM as presumably they LGTM their own change. + +For more details see [MAINTAINERS.md](MAINTAINERS.md) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the +patch, which certifies that you wrote it or otherwise have the right to +pass it on as an open-source patch. The rules are pretty simple: if you +can certify the below (from +[developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Docker-DCO-1.1-Signed-off-by: Joe Smith (github: github_handle) + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +One way to automate this, is customise your get ``commit.template`` by adding +a ``prepare-commit-msg`` hook to your libcontainer checkout: + +``` +curl -o .git/hooks/prepare-commit-msg https://raw.githubusercontent.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg +``` + +* Note: the above script expects to find your GitHub user name in ``git config --get github.user`` + +#### Small patch exception + +There are several exceptions to the signing requirement. Currently these are: + +* Your patch fixes spelling or grammar errors. +* Your patch is a single line change to documentation contained in the + `docs` directory. +* Your patch fixes Markdown formatting or syntax errors in the + documentation contained in the `docs` directory. + +If you have any questions, please refer to the FAQ in the [docs](to be written) + +### How can I become a maintainer? + +* Step 1: learn the component inside out +* Step 2: make yourself useful by contributing code, bugfixes, support etc. +* Step 3: volunteer on the irc channel (#libcontainer@freenode) + +Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available. +You don't have to be a maintainer to make a difference on the project! + diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/Dockerfile b/Godeps/_workspace/src/github.com/docker/libcontainer/Dockerfile new file mode 100644 index 00000000..1fbba3e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/Dockerfile @@ -0,0 +1,21 @@ +FROM crosbymichael/golang + +RUN apt-get update && apt-get install -y gcc +RUN go get code.google.com/p/go.tools/cmd/cover + +# setup a playground for us to spawn containers in +RUN mkdir /busybox && \ + curl -sSL 'https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.02/rootfs.tar' | tar -xC /busybox + +RUN curl -sSL https://raw.githubusercontent.com/docker/docker/master/hack/dind -o /dind && \ + chmod +x /dind + +COPY . /go/src/github.com/docker/libcontainer +WORKDIR /go/src/github.com/docker/libcontainer +RUN cp sample_configs/minimal.json /busybox/container.json + +RUN go get -d -v ./... +RUN go install -v ./... + +ENTRYPOINT ["/dind"] +CMD ["go", "test", "-cover", "./..."] diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/LICENSE b/Godeps/_workspace/src/github.com/docker/libcontainer/LICENSE new file mode 100644 index 00000000..27448585 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS new file mode 100644 index 00000000..8c36d096 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS @@ -0,0 +1,4 @@ +Michael Crosby (@crosbymichael) +Rohit Jnagal (@rjnagal) +Victor Marmol (@vmarmol) +.travis.yml: Tianon Gravi (@tianon) diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS_GUIDE.md b/Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS_GUIDE.md new file mode 100644 index 00000000..2ac9ca21 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS_GUIDE.md @@ -0,0 +1,99 @@ +# The libcontainer Maintainers' Guide + +## Introduction + +Dear maintainer. Thank you for investing the time and energy to help +make libcontainer as useful as possible. Maintaining a project is difficult, +sometimes unrewarding work. Sure, you will get to contribute cool +features to the project. But most of your time will be spent reviewing, +cleaning up, documenting, answering questions, justifying design +decisions - while everyone has all the fun! But remember - the quality +of the maintainers work is what distinguishes the good projects from the +great. So please be proud of your work, even the unglamourous parts, +and encourage a culture of appreciation and respect for *every* aspect +of improving the project - not just the hot new features. + +This document is a manual for maintainers old and new. It explains what +is expected of maintainers, how they should work, and what tools are +available to them. + +This is a living document - if you see something out of date or missing, +speak up! + +## What are a maintainer's responsibility? + +It is every maintainer's responsibility to: + +* 1) Expose a clear roadmap for improving their component. +* 2) Deliver prompt feedback and decisions on pull requests. +* 3) Be available to anyone with questions, bug reports, criticism etc. + on their component. This includes IRC, GitHub requests and the mailing + list. +* 4) Make sure their component respects the philosophy, design and + roadmap of the project. + +## How are decisions made? + +Short answer: with pull requests to the libcontainer repository. + +libcontainer is an open-source project with an open design philosophy. This +means that the repository is the source of truth for EVERY aspect of the +project, including its philosophy, design, roadmap and APIs. *If it's +part of the project, it's in the repo. It's in the repo, it's part of +the project.* + +As a result, all decisions can be expressed as changes to the +repository. An implementation change is a change to the source code. An +API change is a change to the API specification. A philosophy change is +a change to the philosophy manifesto. And so on. + +All decisions affecting libcontainer, big and small, follow the same 3 steps: + +* Step 1: Open a pull request. Anyone can do this. + +* Step 2: Discuss the pull request. Anyone can do this. + +* Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do +this (see below "Who decides what?") + + +## Who decides what? + +All decisions are pull requests, and the relevant maintainers make +decisions by accepting or refusing the pull request. Review and acceptance +by anyone is denoted by adding a comment in the pull request: `LGTM`. +However, only currently listed `MAINTAINERS` are counted towards the required +two LGTMs. + +libcontainer follows the timeless, highly efficient and totally unfair system +known as [Benevolent dictator for life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with Michael Crosby in the role of BDFL. +This means that all decisions are made by default by Michael. Since making +every decision himself would be highly un-scalable, in practice decisions +are spread across multiple maintainers. + +The relevant maintainers for a pull request can be worked out in two steps: + +* Step 1: Determine the subdirectories affected by the pull request. This + might be `netlink/` and `security/`, or any other part of the repo. + +* Step 2: Find the `MAINTAINERS` file which affects this directory. If the + directory itself does not have a `MAINTAINERS` file, work your way up + the repo hierarchy until you find one. + +### I'm a maintainer, and I'm going on holiday + +Please let your co-maintainers and other contributors know by raising a pull +request that comments out your `MAINTAINERS` file entry using a `#`. + +### I'm a maintainer, should I make pull requests too? + +Yes. Nobody should ever push to master directly. All changes should be +made through a pull request. + +### Who assigns maintainers? + +Michael has final `LGTM` approval for all pull requests to `MAINTAINERS` files. + +### How is this process changed? + +Just like everything else: by making a pull request :) diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/Makefile b/Godeps/_workspace/src/github.com/docker/libcontainer/Makefile new file mode 100644 index 00000000..843b761e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/Makefile @@ -0,0 +1,10 @@ + +all: + docker build -t docker/libcontainer . + +test: + # we need NET_ADMIN for the netlink tests and SYS_ADMIN for mounting + docker run --rm --cap-add NET_ADMIN --cap-add SYS_ADMIN docker/libcontainer + +sh: + docker run --rm -ti -w /busybox --rm --cap-add NET_ADMIN --cap-add SYS_ADMIN docker/libcontainer nsinit exec sh diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/NOTICE b/Godeps/_workspace/src/github.com/docker/libcontainer/NOTICE new file mode 100644 index 00000000..ca1635f8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/NOTICE @@ -0,0 +1,16 @@ +libcontainer +Copyright 2012-2014 Docker, Inc. + +This product includes software developed at Docker, Inc. (http://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see http://www.bis.doc.gov + +See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/PRINCIPLES.md b/Godeps/_workspace/src/github.com/docker/libcontainer/PRINCIPLES.md new file mode 100644 index 00000000..42396c0e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/PRINCIPLES.md @@ -0,0 +1,19 @@ +# libcontainer Principles + +In the design and development of libcontainer we try to follow these principles: + +(Work in progress) + +* Don't try to replace every tool. Instead, be an ingredient to improve them. +* Less code is better. +* Fewer components are better. Do you really need to add one more class? +* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. +* Don't do later what you can do now. "//FIXME: refactor" is not acceptable in new code. +* When hesitating between two options, choose the one that is easier to reverse. +* "No" is temporary; "Yes" is forever. If you're not sure about a new feature, say no. You can change your mind later. +* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. +* The fewer moving parts in a container, the better. +* Don't merge it unless you document it. +* Don't document it unless you can keep it up-to-date. +* Don't merge it unless you test it! +* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/README.md b/Godeps/_workspace/src/github.com/docker/libcontainer/README.md new file mode 100644 index 00000000..ee14a57c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/README.md @@ -0,0 +1,62 @@ +## libcontainer - reference implementation for containers + +### Note on API changes: + +Please bear with us while we work on making the libcontainer API stable and something that we can support long term. We are currently discussing the API with the community, therefore, if you currently depend on libcontainer please pin your dependency at a specific tag or commit id. Please join the discussion and help shape the API. + +#### Background + +libcontainer specifies configuration options for what a container is. It provides a native Go implementation for using Linux namespaces with no external dependencies. libcontainer provides many convenience functions for working with namespaces, networking, and management. + + +#### Container +A container is a self contained execution environment that shares the kernel of the host system and which is (optionally) isolated from other containers in the system. + +libcontainer may be used to execute a process in a container. If a user tries to run a new process inside an existing container, the new process is added to the processes executing in the container. + + +#### Root file system + +A container runs with a directory known as its *root file system*, or *rootfs*, mounted as the file system root. The rootfs is usually a full system tree. + + +#### Configuration + +A container is initially configured by supplying configuration data when the container is created. + + +#### nsinit + +`nsinit` is a cli application which demonstrates the use of libcontainer. It is able to spawn new containers or join existing containers, based on the current directory. + +To use `nsinit`, cd into a Linux rootfs and copy a `container.json` file into the directory with your specified configuration. Environment, networking, and different capabilities for the container are specified in this file. The configuration is used for each process executed inside the container. + +See the `sample_configs` folder for examples of what the container configuration should look like. + +To execute `/bin/bash` in the current directory as a container just run the following **as root**: +```bash +nsinit exec /bin/bash +``` + +If you wish to spawn another process inside the container while your current bash session is running, run the same command again to get another bash shell (or change the command). If the original process (PID 1) dies, all other processes spawned inside the container will be killed and the namespace will be removed. + +You can identify if a process is running in a container by looking to see if `state.json` is in the root of the directory. + +You may also specify an alternate root place where the `container.json` file is read and where the `state.json` file will be saved. + +#### Future +See the [roadmap](ROADMAP.md). + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + +## Hacking on libcontainer + +First of all, please familiarise yourself with the [libcontainer Principles](PRINCIPLES.md). + +If you're a *contributor* or aspiring contributor, you should read the [Contributors' Guide](CONTRIBUTORS_GUIDE.md). + +If you're a *maintainer* or aspiring maintainer, you should read the [Maintainers' Guide](MAINTAINERS_GUIDE.md) and +"How can I become a maintainer?" in the Contributors' Guide. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/ROADMAP.md b/Godeps/_workspace/src/github.com/docker/libcontainer/ROADMAP.md new file mode 100644 index 00000000..08deb9ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/ROADMAP.md @@ -0,0 +1,16 @@ +# libcontainer: what's next? + +This document is a high-level overview of where we want to take libcontainer next. +It is a curated selection of planned improvements which are either important, difficult, or both. + +For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/docker/libcontainer/issues). + +To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request. + +## Broader kernel support + +Our goal is to make libcontainer run everywhere, but currently libcontainer requires Linux version 3.8 or higher. If you’re deploying new machines for the purpose of running libcontainer, this is a fairly easy requirement to meet. However, if you’re adding libcontainer to an existing deployment, you may not have the flexibility to update and patch the kernel. + +## Cross-architecture support + +Our goal is to make libcontainer run everywhere. However currently libcontainer only runs on x86_64 systems. We plan on expanding architecture support, so that libcontainer containers can be created and used on more architectures. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/api_temp.go b/Godeps/_workspace/src/github.com/docker/libcontainer/api_temp.go new file mode 100644 index 00000000..9b2c5207 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/api_temp.go @@ -0,0 +1,34 @@ +/* +Temporary API endpoint for libcontainer while the full API is finalized (api.go). +*/ +package libcontainer + +import ( + "github.com/docker/libcontainer/cgroups/fs" + "github.com/docker/libcontainer/cgroups/systemd" + "github.com/docker/libcontainer/network" +) + +// TODO(vmarmol): Complete Stats() in final libcontainer API and move users to that. +// DEPRECATED: The below portions are only to be used during the transition to the official API. +// Returns all available stats for the given container. +func GetStats(container *Config, state *State) (*ContainerStats, error) { + var ( + err error + stats = &ContainerStats{} + ) + + if systemd.UseSystemd() { + stats.CgroupStats, err = systemd.GetStats(container.Cgroups) + } else { + stats.CgroupStats, err = fs.GetStats(container.Cgroups) + } + + if err != nil { + return stats, err + } + + stats.NetworkStats, err = network.GetStats(&state.NetworkState) + + return stats, err +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor.go b/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor.go new file mode 100644 index 00000000..fb1574df --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor.go @@ -0,0 +1,35 @@ +// +build apparmor,linux + +package apparmor + +// #cgo LDFLAGS: -lapparmor +// #include +// #include +import "C" +import ( + "io/ioutil" + "os" + "unsafe" +) + +func IsEnabled() bool { + if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" { + buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") + return err == nil && len(buf) > 1 && buf[0] == 'Y' + } + return false +} + +func ApplyProfile(name string) error { + if name == "" { + return nil + } + + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + + if _, err := C.aa_change_onexec(cName); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor_disabled.go b/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor_disabled.go new file mode 100644 index 00000000..937bf915 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor_disabled.go @@ -0,0 +1,11 @@ +// +build !apparmor !linux + +package apparmor + +func IsEnabled() bool { + return false +} + +func ApplyProfile(name string) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/gen.go b/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/gen.go new file mode 100644 index 00000000..825e646d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/gen.go @@ -0,0 +1,94 @@ +package apparmor + +import ( + "io" + "os" + "text/template" +) + +type data struct { + Name string + Imports []string + InnerImports []string +} + +const baseTemplate = ` +{{range $value := .Imports}} +{{$value}} +{{end}} + +profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { +{{range $value := .InnerImports}} + {{$value}} +{{end}} + + network, + capability, + file, + umount, + + mount fstype=tmpfs, + mount fstype=mqueue, + mount fstype=fuse.*, + mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/, + mount fstype=efivarfs -> /sys/firmware/efi/efivars/, + mount fstype=fusectl -> /sys/fs/fuse/connections/, + mount fstype=securityfs -> /sys/kernel/security/, + mount fstype=debugfs -> /sys/kernel/debug/, + mount fstype=proc -> /proc/, + mount fstype=sysfs -> /sys/, + + deny @{PROC}/sys/fs/** wklx, + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx, + deny @{PROC}/sys/kernel/*/** wklx, + + deny mount options=(ro, remount) -> /, + deny mount fstype=debugfs -> /var/lib/ureadahead/debugfs/, + deny mount fstype=devpts, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/efi/efivars/** rwklx, + deny /sys/kernel/security/** rwklx, +} +` + +func generateProfile(out io.Writer) error { + compiled, err := template.New("apparmor_profile").Parse(baseTemplate) + if err != nil { + return err + } + data := &data{ + Name: "docker-default", + } + if tuntablesExists() { + data.Imports = append(data.Imports, "#include ") + } else { + data.Imports = append(data.Imports, "@{PROC}=/proc/") + } + if abstrctionsEsists() { + data.InnerImports = append(data.InnerImports, "#include ") + } + if err := compiled.Execute(out, data); err != nil { + return err + } + return nil +} + +// check if the tunables/global exist +func tuntablesExists() bool { + _, err := os.Stat("/etc/apparmor.d/tunables/global") + return err == nil +} + +// check if abstractions/base exist +func abstrctionsEsists() bool { + _, err := os.Stat("/etc/apparmor.d/abstractions/base") + return err == nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/setup.go b/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/setup.go new file mode 100644 index 00000000..8ed54374 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/setup.go @@ -0,0 +1,44 @@ +package apparmor + +import ( + "fmt" + "os" + "os/exec" + "path" +) + +const ( + DefaultProfilePath = "/etc/apparmor.d/docker" +) + +func InstallDefaultProfile() error { + if !IsEnabled() { + return nil + } + + // Make sure /etc/apparmor.d exists + if err := os.MkdirAll(path.Dir(DefaultProfilePath), 0755); err != nil { + return err + } + + f, err := os.OpenFile(DefaultProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + if err := generateProfile(f); err != nil { + f.Close() + return err + } + f.Close() + + cmd := exec.Command("/sbin/apparmor_parser", "-r", "-W", "docker") + // to use the parser directly we have to make sure we are in the correct + // dir with the profile + cmd.Dir = "/etc/apparmor.d" + + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Error loading docker apparmor profile: %s (%s)", err, output) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups.go new file mode 100644 index 00000000..64ece568 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups.go @@ -0,0 +1,40 @@ +package cgroups + +import ( + "errors" + + "github.com/docker/libcontainer/devices" +) + +var ( + ErrNotFound = errors.New("mountpoint not found") +) + +type FreezerState string + +const ( + Undefined FreezerState = "" + Frozen FreezerState = "FROZEN" + Thawed FreezerState = "THAWED" +) + +type Cgroup struct { + Name string `json:"name,omitempty"` + Parent string `json:"parent,omitempty"` // name of parent cgroup or slice + + AllowAllDevices bool `json:"allow_all_devices,omitempty"` // If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list. + AllowedDevices []*devices.Device `json:"allowed_devices,omitempty"` + Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes) + MemoryReservation int64 `json:"memory_reservation,omitempty"` // Memory reservation or soft_limit (in bytes) + MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap + CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers) + CpuQuota int64 `json:"cpu_quota,omitempty"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period. + CpuPeriod int64 `json:"cpu_period,omitempty"` // CPU period to be used for hardcapping (in usecs). 0 to use system default. + CpusetCpus string `json:"cpuset_cpus,omitempty"` // CPU to use + Freezer FreezerState `json:"freezer,omitempty"` // set the freeze value for the process + Slice string `json:"slice,omitempty"` // Parent slice to use for systemd +} + +type ActiveCgroup interface { + Cleanup() error +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups_test.go new file mode 100644 index 00000000..537336a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups_test.go @@ -0,0 +1,27 @@ +package cgroups + +import ( + "bytes" + "testing" +) + +const ( + cgroupsContents = `11:hugetlb:/ +10:perf_event:/ +9:blkio:/ +8:net_cls:/ +7:freezer:/ +6:devices:/ +5:memory:/ +4:cpuacct,cpu:/ +3:cpuset:/ +2:name=systemd:/user.slice/user-1000.slice/session-16.scope` +) + +func TestParseCgroups(t *testing.T) { + r := bytes.NewBuffer([]byte(cgroupsContents)) + _, err := parseCgroupFile("blkio", r) + if err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgutil/cgutil.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgutil/cgutil.go new file mode 100644 index 00000000..f4a541ea --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgutil/cgutil.go @@ -0,0 +1,264 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + "os" + "syscall" + "time" + + "github.com/codegangsta/cli" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/cgroups/fs" + "github.com/docker/libcontainer/cgroups/systemd" +) + +var createCommand = cli.Command{ + Name: "create", + Usage: "Create a cgroup container using the supplied configuration and initial process.", + Flags: []cli.Flag{ + cli.StringFlag{"config, c", "cgroup.json", "path to container configuration (cgroups.Cgroup object)"}, + cli.IntFlag{"pid, p", 0, "pid of the initial process in the container"}, + }, + Action: createAction, +} + +var destroyCommand = cli.Command{ + Name: "destroy", + Usage: "Destroy an existing cgroup container.", + Flags: []cli.Flag{ + cli.StringFlag{"name, n", "", "container name"}, + cli.StringFlag{"parent, p", "", "container parent"}, + }, + Action: destroyAction, +} + +var statsCommand = cli.Command{ + Name: "stats", + Usage: "Get stats for cgroup", + Flags: []cli.Flag{ + cli.StringFlag{"name, n", "", "container name"}, + cli.StringFlag{"parent, p", "", "container parent"}, + }, + Action: statsAction, +} + +var pauseCommand = cli.Command{ + Name: "pause", + Usage: "Pause cgroup", + Flags: []cli.Flag{ + cli.StringFlag{"name, n", "", "container name"}, + cli.StringFlag{"parent, p", "", "container parent"}, + }, + Action: pauseAction, +} + +var resumeCommand = cli.Command{ + Name: "resume", + Usage: "Resume a paused cgroup", + Flags: []cli.Flag{ + cli.StringFlag{"name, n", "", "container name"}, + cli.StringFlag{"parent, p", "", "container parent"}, + }, + Action: resumeAction, +} + +var psCommand = cli.Command{ + Name: "ps", + Usage: "Get list of pids for a cgroup", + Flags: []cli.Flag{ + cli.StringFlag{"name, n", "", "container name"}, + cli.StringFlag{"parent, p", "", "container parent"}, + }, + Action: psAction, +} + +func getConfigFromFile(c *cli.Context) (*cgroups.Cgroup, error) { + f, err := os.Open(c.String("config")) + if err != nil { + return nil, err + } + defer f.Close() + + var config *cgroups.Cgroup + if err := json.NewDecoder(f).Decode(&config); err != nil { + log.Fatal(err) + } + return config, nil +} + +func openLog(name string) error { + f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755) + if err != nil { + return err + } + + log.SetOutput(f) + return nil +} + +func getConfig(context *cli.Context) (*cgroups.Cgroup, error) { + name := context.String("name") + if name == "" { + log.Fatal(fmt.Errorf("Missing container name")) + } + parent := context.String("parent") + return &cgroups.Cgroup{ + Name: name, + Parent: parent, + }, nil +} + +func killAll(config *cgroups.Cgroup) { + // We could use freezer here to prevent process spawning while we are trying + // to kill everything. But going with more portable solution of retrying for + // now. + pids := getPids(config) + retry := 10 + for len(pids) != 0 || retry > 0 { + killPids(pids) + time.Sleep(100 * time.Millisecond) + retry-- + pids = getPids(config) + } + if len(pids) != 0 { + log.Fatal(fmt.Errorf("Could not kill existing processes in the container.")) + } +} + +func getPids(config *cgroups.Cgroup) []int { + pids, err := fs.GetPids(config) + if err != nil { + log.Fatal(err) + } + return pids +} + +func killPids(pids []int) { + for _, pid := range pids { + // pids might go away on their own. Ignore errors. + syscall.Kill(pid, syscall.SIGKILL) + } +} + +func setFreezerState(context *cli.Context, state cgroups.FreezerState) { + config, err := getConfig(context) + if err != nil { + log.Fatal(err) + } + + if systemd.UseSystemd() { + err = systemd.Freeze(config, state) + } else { + err = fs.Freeze(config, state) + } + if err != nil { + log.Fatal(err) + } +} + +func createAction(context *cli.Context) { + config, err := getConfigFromFile(context) + if err != nil { + log.Fatal(err) + } + pid := context.Int("pid") + if pid <= 0 { + log.Fatal(fmt.Errorf("Invalid pid : %d", pid)) + } + if systemd.UseSystemd() { + _, err := systemd.Apply(config, pid) + if err != nil { + log.Fatal(err) + } + } else { + _, err := fs.Apply(config, pid) + if err != nil { + log.Fatal(err) + } + } +} + +func destroyAction(context *cli.Context) { + config, err := getConfig(context) + if err != nil { + log.Fatal(err) + } + + killAll(config) + // Systemd will clean up cgroup state for empty container. + if !systemd.UseSystemd() { + err := fs.Cleanup(config) + if err != nil { + log.Fatal(err) + } + } +} + +func statsAction(context *cli.Context) { + config, err := getConfig(context) + if err != nil { + log.Fatal(err) + } + stats, err := fs.GetStats(config) + if err != nil { + log.Fatal(err) + } + + out, err := json.MarshalIndent(stats, "", "\t") + if err != nil { + log.Fatal(err) + } + fmt.Printf("Usage stats for '%s':\n %v\n", config.Name, string(out)) +} + +func pauseAction(context *cli.Context) { + setFreezerState(context, cgroups.Frozen) +} + +func resumeAction(context *cli.Context) { + setFreezerState(context, cgroups.Thawed) +} + +func psAction(context *cli.Context) { + config, err := getConfig(context) + if err != nil { + log.Fatal(err) + } + + pids, err := fs.GetPids(config) + if err != nil { + log.Fatal(err) + } + + fmt.Printf("Pids in '%s':\n", config.Name) + fmt.Println(pids) +} + +func main() { + logPath := os.Getenv("log") + if logPath != "" { + if err := openLog(logPath); err != nil { + log.Fatal(err) + } + } + + app := cli.NewApp() + app.Name = "cgutil" + app.Usage = "Test utility for libcontainer cgroups package" + app.Version = "0.1" + + app.Commands = []cli.Command{ + createCommand, + destroyCommand, + statsCommand, + pauseCommand, + resumeCommand, + psCommand, + } + + if err := app.Run(os.Args); err != nil { + log.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgutil/sample_cgroup.json b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgutil/sample_cgroup.json new file mode 100644 index 00000000..2d297849 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgutil/sample_cgroup.json @@ -0,0 +1,10 @@ +{ + "name": "luke", + "parent": "darth", + "allow_all_devices": true, + "memory": 1073741824, + "memory_swap": -1, + "cpu_shares": 2048, + "cpu_quota": 500000, + "cpu_period": 250000 +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go new file mode 100644 index 00000000..e9c06e1e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go @@ -0,0 +1,198 @@ +package fs + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + "github.com/docker/libcontainer/cgroups" +) + +var ( + subsystems = map[string]subsystem{ + "devices": &DevicesGroup{}, + "memory": &MemoryGroup{}, + "cpu": &CpuGroup{}, + "cpuset": &CpusetGroup{}, + "cpuacct": &CpuacctGroup{}, + "blkio": &BlkioGroup{}, + "perf_event": &PerfEventGroup{}, + "freezer": &FreezerGroup{}, + } +) + +type subsystem interface { + Set(*data) error + Remove(*data) error + GetStats(string, *cgroups.Stats) error +} + +type data struct { + root string + cgroup string + c *cgroups.Cgroup + pid int +} + +func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { + d, err := getCgroupData(c, pid) + if err != nil { + return nil, err + } + + for _, sys := range subsystems { + if err := sys.Set(d); err != nil { + d.Cleanup() + return nil, err + } + } + + return d, nil +} + +func Cleanup(c *cgroups.Cgroup) error { + d, err := getCgroupData(c, 0) + if err != nil { + return fmt.Errorf("Could not get Cgroup data %s", err) + } + return d.Cleanup() +} + +func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) { + stats := cgroups.NewStats() + + d, err := getCgroupData(c, 0) + if err != nil { + return nil, fmt.Errorf("getting CgroupData %s", err) + } + + for sysname, sys := range subsystems { + path, err := d.path(sysname) + if err != nil { + // Don't fail if a cgroup hierarchy was not found, just skip this subsystem + if err == cgroups.ErrNotFound { + continue + } + + return nil, err + } + + if err := sys.GetStats(path, stats); err != nil { + return nil, err + } + } + + return stats, nil +} + +// Freeze toggles the container's freezer cgroup depending on the state +// provided +func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { + d, err := getCgroupData(c, 0) + if err != nil { + return err + } + + c.Freezer = state + + freezer := subsystems["freezer"] + + return freezer.Set(d) +} + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + d, err := getCgroupData(c, 0) + if err != nil { + return nil, err + } + + dir, err := d.path("devices") + if err != nil { + return nil, err + } + + return cgroups.ReadProcsFile(dir) +} + +func getCgroupData(c *cgroups.Cgroup, pid int) (*data, error) { + // we can pick any subsystem to find the root + cgroupRoot, err := cgroups.FindCgroupMountpoint("cpu") + if err != nil { + return nil, err + } + cgroupRoot = filepath.Dir(cgroupRoot) + + if _, err := os.Stat(cgroupRoot); err != nil { + return nil, fmt.Errorf("cgroups fs not found") + } + + cgroup := c.Name + if c.Parent != "" { + cgroup = filepath.Join(c.Parent, cgroup) + } + + return &data{ + root: cgroupRoot, + cgroup: cgroup, + c: c, + pid: pid, + }, nil +} + +func (raw *data) parent(subsystem string) (string, error) { + initPath, err := cgroups.GetInitCgroupDir(subsystem) + if err != nil { + return "", err + } + return filepath.Join(raw.root, subsystem, initPath), nil +} + +func (raw *data) path(subsystem string) (string, error) { + parent, err := raw.parent(subsystem) + if err != nil { + return "", err + } + return filepath.Join(parent, raw.cgroup), nil +} + +func (raw *data) join(subsystem string) (string, error) { + path, err := raw.path(subsystem) + if err != nil { + return "", err + } + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return "", err + } + if err := writeFile(path, "cgroup.procs", strconv.Itoa(raw.pid)); err != nil { + return "", err + } + return path, nil +} + +func (raw *data) Cleanup() error { + for _, sys := range subsystems { + sys.Remove(raw) + } + return nil +} + +func writeFile(dir, file, data string) error { + return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) +} + +func readFile(dir, file string) (string, error) { + data, err := ioutil.ReadFile(filepath.Join(dir, file)) + return string(data), err +} + +func removePath(p string, err error) error { + if err != nil { + return err + } + if p != "" { + return os.RemoveAll(p) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio.go new file mode 100644 index 00000000..38b87577 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio.go @@ -0,0 +1,141 @@ +package fs + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/libcontainer/cgroups" +) + +type BlkioGroup struct { +} + +func (s *BlkioGroup) Set(d *data) error { + // we just want to join this group even though we don't set anything + if _, err := d.join("blkio"); err != nil && err != cgroups.ErrNotFound { + return err + } + return nil +} + +func (s *BlkioGroup) Remove(d *data) error { + return removePath(d.path("blkio")) +} + +/* +examples: + + blkio.sectors + 8:0 6792 + + blkio.io_service_bytes + 8:0 Read 1282048 + 8:0 Write 2195456 + 8:0 Sync 2195456 + 8:0 Async 1282048 + 8:0 Total 3477504 + Total 3477504 + + blkio.io_serviced + 8:0 Read 124 + 8:0 Write 104 + 8:0 Sync 104 + 8:0 Async 124 + 8:0 Total 228 + Total 228 + + blkio.io_queued + 8:0 Read 0 + 8:0 Write 0 + 8:0 Sync 0 + 8:0 Async 0 + 8:0 Total 0 + Total 0 +*/ + +func splitBlkioStatLine(r rune) bool { + return r == ' ' || r == ':' +} + +func getBlkioStat(path string) ([]cgroups.BlkioStatEntry, error) { + var blkioStats []cgroups.BlkioStatEntry + f, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + return blkioStats, nil + } + return nil, err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + // format: dev type amount + fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine) + if len(fields) < 3 { + if len(fields) == 2 && fields[0] == "Total" { + // skip total line + continue + } else { + return nil, fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text()) + } + } + + v, err := strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + major := v + + v, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + minor := v + + op := "" + valueField := 2 + if len(fields) == 4 { + op = fields[2] + valueField = 3 + } + v, err = strconv.ParseUint(fields[valueField], 10, 64) + if err != nil { + return nil, err + } + blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v}) + } + + return blkioStats, nil +} + +func (s *BlkioGroup) GetStats(path string, stats *cgroups.Stats) error { + var blkioStats []cgroups.BlkioStatEntry + var err error + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil { + return err + } + stats.BlkioStats.SectorsRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_bytes_recursive")); err != nil { + return err + } + stats.BlkioStats.IoServiceBytesRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err != nil { + return err + } + stats.BlkioStats.IoServicedRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_queued_recursive")); err != nil { + return err + } + stats.BlkioStats.IoQueuedRecursive = blkioStats + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go new file mode 100644 index 00000000..db6f8d52 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go @@ -0,0 +1,174 @@ +package fs + +import ( + "testing" + + "github.com/docker/libcontainer/cgroups" +) + +const ( + sectorsRecursiveContents = `8:0 1024` + serviceBytesRecursiveContents = `8:0 Read 100 +8:0 Write 200 +8:0 Sync 300 +8:0 Async 500 +8:0 Total 500 +Total 500` + servicedRecursiveContents = `8:0 Read 10 +8:0 Write 40 +8:0 Sync 20 +8:0 Async 30 +8:0 Total 50 +Total 50` + queuedRecursiveContents = `8:0 Read 1 +8:0 Write 4 +8:0 Sync 2 +8:0 Async 3 +8:0 Total 5 +Total 5` +) + +var actualStats = *cgroups.NewStats() + +func appendBlkioStatEntry(blkioStatEntries *[]cgroups.BlkioStatEntry, major, minor, value uint64, op string) { + *blkioStatEntries = append(*blkioStatEntries, cgroups.BlkioStatEntry{Major: major, Minor: minor, Value: value, Op: op}) +} + +func TestBlkioStats(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } + + // Verify expected stats. + expectedStats := cgroups.BlkioStats{} + appendBlkioStatEntry(&expectedStats.SectorsRecursive, 8, 0, 1024, "") + + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 100, "Read") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 200, "Write") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 300, "Sync") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Async") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Total") + + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 10, "Read") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 40, "Write") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 20, "Sync") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 30, "Async") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 50, "Total") + + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 1, "Read") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 4, "Write") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 2, "Sync") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 3, "Async") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 5, "Total") + + expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats) +} + +func TestBlkioStatsNoSectorsFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + }) + + blkio := &BlkioGroup{} + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoServiceBytesFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoServicedFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoQueuedFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsUnexpectedNumberOfFields(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": "8:0 Read 100 100", + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected to fail, but did not") + } +} + +func TestBlkioStatsUnexpectedFieldType(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": "8:0 Read Write", + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected to fail, but did not") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu.go new file mode 100644 index 00000000..efac9ed1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu.go @@ -0,0 +1,72 @@ +package fs + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + + "github.com/docker/libcontainer/cgroups" +) + +type CpuGroup struct { +} + +func (s *CpuGroup) Set(d *data) error { + // We always want to join the cpu group, to allow fair cpu scheduling + // on a container basis + dir, err := d.join("cpu") + if err != nil { + return err + } + if d.c.CpuShares != 0 { + if err := writeFile(dir, "cpu.shares", strconv.FormatInt(d.c.CpuShares, 10)); err != nil { + return err + } + } + if d.c.CpuPeriod != 0 { + if err := writeFile(dir, "cpu.cfs_period_us", strconv.FormatInt(d.c.CpuPeriod, 10)); err != nil { + return err + } + } + if d.c.CpuQuota != 0 { + if err := writeFile(dir, "cpu.cfs_quota_us", strconv.FormatInt(d.c.CpuQuota, 10)); err != nil { + return err + } + } + return nil +} + +func (s *CpuGroup) Remove(d *data) error { + return removePath(d.path("cpu")) +} + +func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error { + f, err := os.Open(filepath.Join(path, "cpu.stat")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + t, v, err := getCgroupParamKeyValue(sc.Text()) + if err != nil { + return err + } + switch t { + case "nr_periods": + stats.CpuStats.ThrottlingData.Periods = v + + case "nr_throttled": + stats.CpuStats.ThrottlingData.ThrottledPeriods = v + + case "throttled_time": + stats.CpuStats.ThrottlingData.ThrottledTime = v + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu_test.go new file mode 100644 index 00000000..017a1f4b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu_test.go @@ -0,0 +1,66 @@ +package fs + +import ( + "fmt" + "testing" + + "github.com/docker/libcontainer/cgroups" +) + +func TestCpuStats(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + + const ( + kNrPeriods = 2000 + kNrThrottled = 200 + kThrottledTime = uint64(18446744073709551615) + ) + + cpuStatContent := fmt.Sprintf("nr_periods %d\n nr_throttled %d\n throttled_time %d\n", + kNrPeriods, kNrThrottled, kThrottledTime) + helper.writeFileContents(map[string]string{ + "cpu.stat": cpuStatContent, + }) + + cpu := &CpuGroup{} + err := cpu.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } + + expectedStats := cgroups.ThrottlingData{ + Periods: kNrPeriods, + ThrottledPeriods: kNrThrottled, + ThrottledTime: kThrottledTime} + + expectThrottlingDataEquals(t, expectedStats, actualStats.CpuStats.ThrottlingData) +} + +func TestNoCpuStatFile(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + + cpu := &CpuGroup{} + err := cpu.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal("Expected not to fail, but did") + } +} + +func TestInvalidCpuStat(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + cpuStatContent := `nr_periods 2000 + nr_throttled 200 + throttled_time fortytwo` + helper.writeFileContents(map[string]string{ + "cpu.stat": cpuStatContent, + }) + + cpu := &CpuGroup{} + err := cpu.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failed stat parsing.") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuacct.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuacct.go new file mode 100644 index 00000000..7979009c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuacct.go @@ -0,0 +1,163 @@ +package fs + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/system" +) + +var ( + cpuCount = uint64(runtime.NumCPU()) + clockTicks = uint64(system.GetClockTicks()) +) + +const nanosecondsInSecond = 1000000000 + +type CpuacctGroup struct { +} + +func (s *CpuacctGroup) Set(d *data) error { + // we just want to join this group even though we don't set anything + if _, err := d.join("cpuacct"); err != nil && err != cgroups.ErrNotFound { + return err + } + return nil +} + +func (s *CpuacctGroup) Remove(d *data) error { + return removePath(d.path("cpuacct")) +} + +func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error { + var ( + err error + startCpu, lastCpu, startSystem, lastSystem, startUsage, lastUsage, kernelModeUsage, userModeUsage, percentage uint64 + ) + if kernelModeUsage, userModeUsage, err = getCpuUsage(path); err != nil { + return err + } + startCpu = kernelModeUsage + userModeUsage + if startSystem, err = getSystemCpuUsage(); err != nil { + return err + } + startUsageTime := time.Now() + if startUsage, err = getCgroupParamInt(path, "cpuacct.usage"); err != nil { + return err + } + // sample for 100ms + time.Sleep(100 * time.Millisecond) + if kernelModeUsage, userModeUsage, err = getCpuUsage(path); err != nil { + return err + } + lastCpu = kernelModeUsage + userModeUsage + if lastSystem, err = getSystemCpuUsage(); err != nil { + return err + } + usageSampleDuration := time.Since(startUsageTime) + if lastUsage, err = getCgroupParamInt(path, "cpuacct.usage"); err != nil { + return err + } + + var ( + deltaProc = lastCpu - startCpu + deltaSystem = lastSystem - startSystem + deltaUsage = lastUsage - startUsage + ) + if deltaSystem > 0.0 { + percentage = ((deltaProc / deltaSystem) * clockTicks) * cpuCount + } + // NOTE: a percentage over 100% is valid for POSIX because that means the + // processes is using multiple cores + stats.CpuStats.CpuUsage.PercentUsage = percentage + // Delta usage is in nanoseconds of CPU time so get the usage (in cores) over the sample time. + stats.CpuStats.CpuUsage.CurrentUsage = deltaUsage / uint64(usageSampleDuration.Nanoseconds()) + percpuUsage, err := getPercpuUsage(path) + if err != nil { + return err + } + stats.CpuStats.CpuUsage.TotalUsage = lastUsage + stats.CpuStats.CpuUsage.PercpuUsage = percpuUsage + stats.CpuStats.CpuUsage.UsageInKernelmode = (kernelModeUsage * nanosecondsInSecond) / clockTicks + stats.CpuStats.CpuUsage.UsageInUsermode = (userModeUsage * nanosecondsInSecond) / clockTicks + return nil +} + +// TODO(vmarmol): Use cgroups stats. +func getSystemCpuUsage() (uint64, error) { + + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + parts := strings.Fields(sc.Text()) + switch parts[0] { + case "cpu": + if len(parts) < 8 { + return 0, fmt.Errorf("invalid number of cpu fields") + } + + var total uint64 + for _, i := range parts[1:8] { + v, err := strconv.ParseUint(i, 10, 64) + if err != nil { + return 0.0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) + } + total += v + } + return total, nil + default: + continue + } + } + return 0, fmt.Errorf("invalid stat format") +} + +func getCpuUsage(path string) (uint64, uint64, error) { + kernelModeUsage := uint64(0) + userModeUsage := uint64(0) + data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.stat")) + if err != nil { + return 0, 0, err + } + fields := strings.Fields(string(data)) + if len(fields) != 4 { + return 0, 0, fmt.Errorf("Failure - %s is expected to have 4 fields", filepath.Join(path, "cpuacct.stat")) + } + if userModeUsage, err = strconv.ParseUint(fields[1], 10, 64); err != nil { + return 0, 0, err + } + if kernelModeUsage, err = strconv.ParseUint(fields[3], 10, 64); err != nil { + return 0, 0, err + } + + return kernelModeUsage, userModeUsage, nil +} + +func getPercpuUsage(path string) ([]uint64, error) { + percpuUsage := []uint64{} + data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.usage_percpu")) + if err != nil { + return percpuUsage, err + } + for _, value := range strings.Fields(string(data)) { + value, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return percpuUsage, fmt.Errorf("Unable to convert param value to uint64: %s", err) + } + percpuUsage = append(percpuUsage, value) + } + return percpuUsage, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go new file mode 100644 index 00000000..9570125f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go @@ -0,0 +1,110 @@ +package fs + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + "github.com/docker/libcontainer/cgroups" +) + +type CpusetGroup struct { +} + +func (s *CpusetGroup) Set(d *data) error { + // we don't want to join this cgroup unless it is specified + if d.c.CpusetCpus != "" { + dir, err := d.path("cpuset") + if err != nil { + return err + } + if err := s.ensureParent(dir); err != nil { + return err + } + + // because we are not using d.join we need to place the pid into the procs file + // unlike the other subsystems + if err := writeFile(dir, "cgroup.procs", strconv.Itoa(d.pid)); err != nil { + return err + } + if err := writeFile(dir, "cpuset.cpus", d.c.CpusetCpus); err != nil { + return err + } + } + return nil +} + +func (s *CpusetGroup) Remove(d *data) error { + return removePath(d.path("cpuset")) +} + +func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error { + return nil +} + +func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) { + if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil { + return + } + if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems")); err != nil { + return + } + return cpus, mems, nil +} + +// ensureParent ensures that the parent directory of current is created +// with the proper cpus and mems files copied from it's parent if the values +// are a file with a new line char +func (s *CpusetGroup) ensureParent(current string) error { + parent := filepath.Dir(current) + + if _, err := os.Stat(parent); err != nil { + if !os.IsNotExist(err) { + return err + } + + if err := s.ensureParent(parent); err != nil { + return err + } + } + + if err := os.MkdirAll(current, 0755); err != nil && !os.IsExist(err) { + return err + } + return s.copyIfNeeded(current, parent) +} + +// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent +// directory to the current directory if the file's contents are 0 +func (s *CpusetGroup) copyIfNeeded(current, parent string) error { + var ( + err error + currentCpus, currentMems []byte + parentCpus, parentMems []byte + ) + + if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil { + return err + } + if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil { + return err + } + + if s.isEmpty(currentCpus) { + if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil { + return err + } + } + if s.isEmpty(currentMems) { + if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil { + return err + } + } + return nil +} + +func (s *CpusetGroup) isEmpty(b []byte) bool { + return len(bytes.Trim(b, "\n")) == 0 +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/devices.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/devices.go new file mode 100644 index 00000000..98d5d2d7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/devices.go @@ -0,0 +1,34 @@ +package fs + +import "github.com/docker/libcontainer/cgroups" + +type DevicesGroup struct { +} + +func (s *DevicesGroup) Set(d *data) error { + dir, err := d.join("devices") + if err != nil { + return err + } + + if !d.c.AllowAllDevices { + if err := writeFile(dir, "devices.deny", "a"); err != nil { + return err + } + + for _, dev := range d.c.AllowedDevices { + if err := writeFile(dir, "devices.allow", dev.GetCgroupAllowString()); err != nil { + return err + } + } + } + return nil +} + +func (s *DevicesGroup) Remove(d *data) error { + return removePath(d.path("devices")) +} + +func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/freezer.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/freezer.go new file mode 100644 index 00000000..db2a41ca --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/freezer.go @@ -0,0 +1,50 @@ +package fs + +import ( + "strings" + "time" + + "github.com/docker/libcontainer/cgroups" +) + +type FreezerGroup struct { +} + +func (s *FreezerGroup) Set(d *data) error { + switch d.c.Freezer { + case cgroups.Frozen, cgroups.Thawed: + dir, err := d.path("freezer") + if err != nil { + return err + } + + if err := writeFile(dir, "freezer.state", string(d.c.Freezer)); err != nil { + return err + } + + for { + state, err := readFile(dir, "freezer.state") + if err != nil { + return err + } + if strings.TrimSpace(state) == string(d.c.Freezer) { + break + } + time.Sleep(1 * time.Millisecond) + } + default: + if _, err := d.join("freezer"); err != nil && err != cgroups.ErrNotFound { + return err + } + } + + return nil +} + +func (s *FreezerGroup) Remove(d *data) error { + return removePath(d.path("freezer")) +} + +func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory.go new file mode 100644 index 00000000..c27150d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory.go @@ -0,0 +1,92 @@ +package fs + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + + "github.com/docker/libcontainer/cgroups" +) + +type MemoryGroup struct { +} + +func (s *MemoryGroup) Set(d *data) error { + dir, err := d.join("memory") + // only return an error for memory if it was not specified + if err != nil && (d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0) { + return err + } + defer func() { + if err != nil { + os.RemoveAll(dir) + } + }() + + // Only set values if some config was specified. + if d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0 { + if d.c.Memory != 0 { + if err := writeFile(dir, "memory.limit_in_bytes", strconv.FormatInt(d.c.Memory, 10)); err != nil { + return err + } + } + if d.c.MemoryReservation != 0 { + if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(d.c.MemoryReservation, 10)); err != nil { + return err + } + } + // By default, MemorySwap is set to twice the size of RAM. + // If you want to omit MemorySwap, set it to `-1'. + if d.c.MemorySwap != -1 { + if err := writeFile(dir, "memory.memsw.limit_in_bytes", strconv.FormatInt(d.c.Memory*2, 10)); err != nil { + return err + } + } + } + return nil +} + +func (s *MemoryGroup) Remove(d *data) error { + return removePath(d.path("memory")) +} + +func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error { + // Set stats from memory.stat. + statsFile, err := os.Open(filepath.Join(path, "memory.stat")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer statsFile.Close() + + sc := bufio.NewScanner(statsFile) + for sc.Scan() { + t, v, err := getCgroupParamKeyValue(sc.Text()) + if err != nil { + return err + } + stats.MemoryStats.Stats[t] = v + } + + // Set memory usage and max historical usage. + value, err := getCgroupParamInt(path, "memory.usage_in_bytes") + if err != nil { + return err + } + stats.MemoryStats.Usage = value + value, err = getCgroupParamInt(path, "memory.max_usage_in_bytes") + if err != nil { + return err + } + stats.MemoryStats.MaxUsage = value + value, err = getCgroupParamInt(path, "memory.failcnt") + if err != nil { + return err + } + stats.MemoryStats.Failcnt = value + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go new file mode 100644 index 00000000..e92f1daf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go @@ -0,0 +1,127 @@ +package fs + +import ( + "testing" + + "github.com/docker/libcontainer/cgroups" +) + +const ( + memoryStatContents = `cache 512 +rss 1024` + memoryUsageContents = "2048\n" + memoryMaxUsageContents = "4096\n" + memoryFailcnt = "100\n" +) + +func TestMemoryStats(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + "memory.failcnt": memoryFailcnt, + }) + + memory := &MemoryGroup{} + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } + expectedStats := cgroups.MemoryStats{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Stats: map[string]uint64{"cache": 512, "rss": 1024}} + expectMemoryStatEquals(t, expectedStats, actualStats.MemoryStats) +} + +func TestMemoryStatsNoStatFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &MemoryGroup{} + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } +} + +func TestMemoryStatsNoUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &MemoryGroup{} + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsNoMaxUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": memoryUsageContents, + }) + + memory := &MemoryGroup{} + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsBadStatFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": "rss rss", + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &MemoryGroup{} + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsBadUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": "bad", + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &MemoryGroup{} + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsBadMaxUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": "bad", + }) + + memory := &MemoryGroup{} + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux.go new file mode 100644 index 00000000..d92063ba --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux.go @@ -0,0 +1,82 @@ +// +build linux + +package fs + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/docker/libcontainer/cgroups" +) + +// NotifyOnOOM sends signals on the returned channel when the cgroup reaches +// its memory limit. The channel is closed when the cgroup is removed. +func NotifyOnOOM(c *cgroups.Cgroup) (<-chan struct{}, error) { + d, err := getCgroupData(c, 0) + if err != nil { + return nil, err + } + + return notifyOnOOM(d) +} + +func notifyOnOOM(d *data) (<-chan struct{}, error) { + dir, err := d.path("memory") + if err != nil { + return nil, err + } + + fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0) + if syserr != 0 { + return nil, syserr + } + + eventfd := os.NewFile(fd, "eventfd") + + oomControl, err := os.Open(filepath.Join(dir, "memory.oom_control")) + if err != nil { + eventfd.Close() + return nil, err + } + + var ( + eventControlPath = filepath.Join(dir, "cgroup.event_control") + data = fmt.Sprintf("%d %d", eventfd.Fd(), oomControl.Fd()) + ) + + if err := writeFile(dir, "cgroup.event_control", data); err != nil { + eventfd.Close() + oomControl.Close() + return nil, err + } + + ch := make(chan struct{}) + + go func() { + defer func() { + close(ch) + eventfd.Close() + oomControl.Close() + }() + + buf := make([]byte, 8) + + for { + if _, err := eventfd.Read(buf); err != nil { + return + } + + // When a cgroup is destroyed, an event is sent to eventfd. + // So if the control path is gone, return instead of notifying. + if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) { + return + } + + ch <- struct{}{} + } + }() + + return ch, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux_test.go new file mode 100644 index 00000000..a11880cb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux_test.go @@ -0,0 +1,86 @@ +// +build linux + +package fs + +import ( + "encoding/binary" + "fmt" + "syscall" + "testing" + "time" +) + +func TestNotifyOnOOM(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + + helper.writeFileContents(map[string]string{ + "memory.oom_control": "", + "cgroup.event_control": "", + }) + + var eventFd, oomControlFd int + + ooms, err := notifyOnOOM(helper.CgroupData) + if err != nil { + t.Fatal("expected no error, got:", err) + } + + memoryPath, _ := helper.CgroupData.path("memory") + data, err := readFile(memoryPath, "cgroup.event_control") + if err != nil { + t.Fatal("couldn't read event control file:", err) + } + + if _, err := fmt.Sscanf(data, "%d %d", &eventFd, &oomControlFd); err != nil { + t.Fatalf("invalid control data %q: %s", data, err) + } + + // re-open the eventfd + efd, err := syscall.Dup(eventFd) + if err != nil { + t.Fatal("unable to reopen eventfd:", err) + } + defer syscall.Close(efd) + + if err != nil { + t.Fatal("unable to dup event fd:", err) + } + + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, 1) + + if _, err := syscall.Write(efd, buf); err != nil { + t.Fatal("unable to write to eventfd:", err) + } + + select { + case <-ooms: + case <-time.After(100 * time.Millisecond): + t.Fatal("no notification on oom channel after 100ms") + } + + // simulate what happens when a cgroup is destroyed by cleaning up and then + // writing to the eventfd. + helper.cleanup() + if _, err := syscall.Write(efd, buf); err != nil { + t.Fatal("unable to write to eventfd:", err) + } + + // give things a moment to shut down + select { + case _, ok := <-ooms: + if ok { + t.Fatal("expected no oom to be triggered") + } + case <-time.After(100 * time.Millisecond): + } + + if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(oomControlFd), syscall.F_GETFD, 0); err != syscall.EBADF { + t.Error("expected oom control to be closed") + } + + if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(eventFd), syscall.F_GETFD, 0); err != syscall.EBADF { + t.Error("expected event fd to be closed") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/perf_event.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/perf_event.go new file mode 100644 index 00000000..5f45678f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/perf_event.go @@ -0,0 +1,24 @@ +package fs + +import ( + "github.com/docker/libcontainer/cgroups" +) + +type PerfEventGroup struct { +} + +func (s *PerfEventGroup) Set(d *data) error { + // we just want to join this group even though we don't set anything + if _, err := d.join("perf_event"); err != nil && err != cgroups.ErrNotFound { + return err + } + return nil +} + +func (s *PerfEventGroup) Remove(d *data) error { + return removePath(d.path("perf_event")) +} + +func (s *PerfEventGroup) GetStats(path string, stats *cgroups.Stats) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/stats_test_util.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/stats_test_util.go new file mode 100644 index 00000000..7e7da754 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/stats_test_util.go @@ -0,0 +1,73 @@ +package fs + +import ( + "fmt" + "log" + "testing" + + "github.com/docker/libcontainer/cgroups" +) + +func blkioStatEntryEquals(expected, actual []cgroups.BlkioStatEntry) error { + if len(expected) != len(actual) { + return fmt.Errorf("blkioStatEntries length do not match") + } + for i, expValue := range expected { + actValue := actual[i] + if expValue != actValue { + return fmt.Errorf("Expected blkio stat entry %v but found %v", expValue, actValue) + } + } + return nil +} + +func expectBlkioStatsEquals(t *testing.T, expected, actual cgroups.BlkioStats) { + if err := blkioStatEntryEquals(expected.IoServiceBytesRecursive, actual.IoServiceBytesRecursive); err != nil { + log.Printf("blkio IoServiceBytesRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoServicedRecursive, actual.IoServicedRecursive); err != nil { + log.Printf("blkio IoServicedRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoQueuedRecursive, actual.IoQueuedRecursive); err != nil { + log.Printf("blkio IoQueuedRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.SectorsRecursive, actual.SectorsRecursive); err != nil { + log.Printf("blkio SectorsRecursive do not match - %s\n", err) + t.Fail() + } +} + +func expectThrottlingDataEquals(t *testing.T, expected, actual cgroups.ThrottlingData) { + if expected != actual { + log.Printf("Expected throttling data %v but found %v\n", expected, actual) + t.Fail() + } +} + +func expectMemoryStatEquals(t *testing.T, expected, actual cgroups.MemoryStats) { + if expected.Usage != actual.Usage { + log.Printf("Expected memory usage %d but found %d\n", expected.Usage, actual.Usage) + t.Fail() + } + if expected.MaxUsage != actual.MaxUsage { + log.Printf("Expected memory max usage %d but found %d\n", expected.MaxUsage, actual.MaxUsage) + t.Fail() + } + for key, expValue := range expected.Stats { + actValue, ok := actual.Stats[key] + if !ok { + log.Printf("Expected memory stat key %s not found\n", key) + t.Fail() + } + if expValue != actValue { + log.Printf("Expected memory stat value %d but found %d\n", expValue, actValue) + t.Fail() + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/test_util.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/test_util.go new file mode 100644 index 00000000..548870a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/test_util.go @@ -0,0 +1,60 @@ +/* +Utility for testing cgroup operations. + +Creates a mock of the cgroup filesystem for the duration of the test. +*/ +package fs + +import ( + "fmt" + "io/ioutil" + "os" + "testing" +) + +type cgroupTestUtil struct { + // data to use in tests. + CgroupData *data + + // Path to the mock cgroup directory. + CgroupPath string + + // Temporary directory to store mock cgroup filesystem. + tempDir string + t *testing.T +} + +// Creates a new test util for the specified subsystem +func NewCgroupTestUtil(subsystem string, t *testing.T) *cgroupTestUtil { + d := &data{} + tempDir, err := ioutil.TempDir("", fmt.Sprintf("%s_cgroup_test", subsystem)) + if err != nil { + t.Fatal(err) + } + d.root = tempDir + testCgroupPath, err := d.path(subsystem) + if err != nil { + t.Fatal(err) + } + + // Ensure the full mock cgroup path exists. + err = os.MkdirAll(testCgroupPath, 0755) + if err != nil { + t.Fatal(err) + } + return &cgroupTestUtil{CgroupData: d, CgroupPath: testCgroupPath, tempDir: tempDir, t: t} +} + +func (c *cgroupTestUtil) cleanup() { + os.RemoveAll(c.tempDir) +} + +// Write the specified contents on the mock of the specified cgroup files. +func (c *cgroupTestUtil) writeFileContents(fileContents map[string]string) { + for file, contents := range fileContents { + err := writeFile(c.CgroupPath, file, contents) + if err != nil { + c.t.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils.go new file mode 100644 index 00000000..f65622a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils.go @@ -0,0 +1,40 @@ +package fs + +import ( + "errors" + "fmt" + "io/ioutil" + "path/filepath" + "strconv" + "strings" +) + +var ( + ErrNotSupportStat = errors.New("stats are not supported for subsystem") + ErrNotValidFormat = errors.New("line is not a valid key value format") +) + +// Parses a cgroup param and returns as name, value +// i.e. "io_service_bytes 1234" will return as io_service_bytes, 1234 +func getCgroupParamKeyValue(t string) (string, uint64, error) { + parts := strings.Fields(t) + switch len(parts) { + case 2: + value, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return "", 0, fmt.Errorf("Unable to convert param value to uint64: %s", err) + } + return parts[0], value, nil + default: + return "", 0, ErrNotValidFormat + } +} + +// Gets a single int64 value from the specified cgroup file. +func getCgroupParamInt(cgroupPath, cgroupFile string) (uint64, error) { + contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile)) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(contents)), 10, 64) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go new file mode 100644 index 00000000..63d743f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go @@ -0,0 +1,68 @@ +package fs + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +const ( + cgroupFile = "cgroup.file" + floatValue = 2048.0 + floatString = "2048" +) + +func TestGetCgroupParamsInt(t *testing.T) { + // Setup tempdir. + tempDir, err := ioutil.TempDir("", "cgroup_utils_test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + tempFile := filepath.Join(tempDir, cgroupFile) + + // Success. + err = ioutil.WriteFile(tempFile, []byte(floatString), 0755) + if err != nil { + t.Fatal(err) + } + value, err := getCgroupParamInt(tempDir, cgroupFile) + if err != nil { + t.Fatal(err) + } else if value != floatValue { + t.Fatalf("Expected %d to equal %f", value, floatValue) + } + + // Success with new line. + err = ioutil.WriteFile(tempFile, []byte(floatString+"\n"), 0755) + if err != nil { + t.Fatal(err) + } + value, err = getCgroupParamInt(tempDir, cgroupFile) + if err != nil { + t.Fatal(err) + } else if value != floatValue { + t.Fatalf("Expected %d to equal %f", value, floatValue) + } + + // Not a float. + err = ioutil.WriteFile(tempFile, []byte("not-a-float"), 0755) + if err != nil { + t.Fatal(err) + } + _, err = getCgroupParamInt(tempDir, cgroupFile) + if err == nil { + t.Fatal("Expecting error, got none") + } + + // Unknown file. + err = os.Remove(tempFile) + if err != nil { + t.Fatal(err) + } + _, err = getCgroupParamInt(tempDir, cgroupFile) + if err == nil { + t.Fatal("Expecting error, got none") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/stats.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/stats.go new file mode 100644 index 00000000..49913bce --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/stats.go @@ -0,0 +1,67 @@ +package cgroups + +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods,omitempty"` + // Number of periods when the container hit its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time,omitempty"` +} + +type CpuUsage struct { + // percentage of available CPUs currently being used. + PercentUsage uint64 `json:"percent_usage,omitempty"` + // nanoseconds of cpu time consumed over the last 100 ms. + CurrentUsage uint64 `json:"current_usage,omitempty"` + // total nanoseconds of cpu time consumed + TotalUsage uint64 `json:"total_usage,omitempty"` + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + // Time spent by tasks of the cgroup in kernel mode. Units: nanoseconds. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + // Time spent by tasks of the cgroup in user mode. Units: nanoseconds. + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +type CpuStats struct { + CpuUsage CpuUsage `json:"cpu_usage,omitempty"` + ThrottlingData ThrottlingData `json:"throlling_data,omitempty"` +} + +type MemoryStats struct { + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt"` +} + +type BlkioStatEntry struct { + Major uint64 `json:"major,omitempty"` + Minor uint64 `json:"minor,omitempty"` + Op string `json:"op,omitempty"` + Value uint64 `json:"value,omitempty"` +} + +type BlkioStats struct { + // number of bytes tranferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recusrive,omitempty"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"` +} + +type Stats struct { + CpuStats CpuStats `json:"cpu_stats,omitempty"` + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` +} + +func NewStats() *Stats { + memoryStats := MemoryStats{Stats: make(map[string]uint64)} + return &Stats{MemoryStats: memoryStats} +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go new file mode 100644 index 00000000..68559109 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go @@ -0,0 +1,29 @@ +// +build !linux + +package systemd + +import ( + "fmt" + + "github.com/docker/libcontainer/cgroups" +) + +func UseSystemd() bool { + return false +} + +func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { + return nil, fmt.Errorf("Systemd not supported") +} + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + return nil, fmt.Errorf("Systemd not supported") +} + +func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { + return fmt.Errorf("Systemd not supported") +} + +func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) { + return nil, fmt.Errorf("Systemd not supported") +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go new file mode 100644 index 00000000..01e5bf49 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go @@ -0,0 +1,439 @@ +// +build linux + +package systemd + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + systemd1 "github.com/coreos/go-systemd/dbus" + "github.com/docker/docker/pkg/systemd" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/cgroups/fs" + "github.com/godbus/dbus" +) + +type systemdCgroup struct { + cleanupDirs []string +} + +type subsystem interface { + GetStats(string, *cgroups.Stats) error +} + +var ( + connLock sync.Mutex + theConn *systemd1.Conn + hasStartTransientUnit bool + subsystems = map[string]subsystem{ + "devices": &fs.DevicesGroup{}, + "memory": &fs.MemoryGroup{}, + "cpu": &fs.CpuGroup{}, + "cpuset": &fs.CpusetGroup{}, + "cpuacct": &fs.CpuacctGroup{}, + "blkio": &fs.BlkioGroup{}, + "perf_event": &fs.PerfEventGroup{}, + "freezer": &fs.FreezerGroup{}, + } +) + +func UseSystemd() bool { + if !systemd.SdBooted() { + return false + } + + connLock.Lock() + defer connLock.Unlock() + + if theConn == nil { + var err error + theConn, err = systemd1.New() + if err != nil { + return false + } + + // Assume we have StartTransientUnit + hasStartTransientUnit = true + + // But if we get UnknownMethod error we don't + if _, err := theConn.StartTransientUnit("test.scope", "invalid"); err != nil { + if dbusError, ok := err.(dbus.Error); ok { + if dbusError.Name == "org.freedesktop.DBus.Error.UnknownMethod" { + hasStartTransientUnit = false + } + } + } + } + return hasStartTransientUnit +} + +func getIfaceForUnit(unitName string) string { + if strings.HasSuffix(unitName, ".scope") { + return "Scope" + } + if strings.HasSuffix(unitName, ".service") { + return "Service" + } + return "Unit" +} + +type cgroupArg struct { + File string + Value string +} + +func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { + var ( + unitName = getUnitName(c) + slice = "system.slice" + properties []systemd1.Property + cpuArgs []cgroupArg + cpusetArgs []cgroupArg + memoryArgs []cgroupArg + res systemdCgroup + ) + + // First set up things not supported by systemd + + // -1 disables memorySwap + if c.MemorySwap >= 0 && (c.Memory != 0 || c.MemorySwap > 0) { + memorySwap := c.MemorySwap + + if memorySwap == 0 { + // By default, MemorySwap is set to twice the size of RAM. + memorySwap = c.Memory * 2 + } + + memoryArgs = append(memoryArgs, cgroupArg{"memory.memsw.limit_in_bytes", strconv.FormatInt(memorySwap, 10)}) + } + + if c.CpusetCpus != "" { + cpusetArgs = append(cpusetArgs, cgroupArg{"cpuset.cpus", c.CpusetCpus}) + } + + if c.Slice != "" { + slice = c.Slice + } + + properties = append(properties, + systemd1.Property{"Slice", dbus.MakeVariant(slice)}, + systemd1.Property{"Description", dbus.MakeVariant("docker container " + c.Name)}, + systemd1.Property{"PIDs", dbus.MakeVariant([]uint32{uint32(pid)})}, + ) + + // Always enable accounting, this gets us the same behaviour as the fs implementation, + // plus the kernel has some problems with joining the memory cgroup at a later time. + properties = append(properties, + systemd1.Property{"MemoryAccounting", dbus.MakeVariant(true)}, + systemd1.Property{"CPUAccounting", dbus.MakeVariant(true)}, + systemd1.Property{"BlockIOAccounting", dbus.MakeVariant(true)}) + + if c.Memory != 0 { + properties = append(properties, + systemd1.Property{"MemoryLimit", dbus.MakeVariant(uint64(c.Memory))}) + } + // TODO: MemoryReservation and MemorySwap not available in systemd + + if c.CpuShares != 0 { + properties = append(properties, + systemd1.Property{"CPUShares", dbus.MakeVariant(uint64(c.CpuShares))}) + } + + if _, err := theConn.StartTransientUnit(unitName, "replace", properties...); err != nil { + return nil, err + } + + // To work around the lack of /dev/pts/* support above we need to manually add these + // so, ask systemd for the cgroup used + props, err := theConn.GetUnitTypeProperties(unitName, getIfaceForUnit(unitName)) + if err != nil { + return nil, err + } + + cgroup := props["ControlGroup"].(string) + + if !c.AllowAllDevices { + // Atm we can't use the systemd device support because of two missing things: + // * Support for wildcards to allow mknod on any device + // * Support for wildcards to allow /dev/pts support + // + // The second is available in more recent systemd as "char-pts", but not in e.g. v208 which is + // in wide use. When both these are availalable we will be able to switch, but need to keep the old + // implementation for backwards compat. + // + // Note: we can't use systemd to set up the initial limits, and then change the cgroup + // because systemd will re-write the device settings if it needs to re-apply the cgroup context. + // This happens at least for v208 when any sibling unit is started. + + mountpoint, err := cgroups.FindCgroupMountpoint("devices") + if err != nil { + return nil, err + } + + initPath, err := cgroups.GetInitCgroupDir("devices") + if err != nil { + return nil, err + } + + dir := filepath.Join(mountpoint, initPath, c.Parent, c.Name) + + res.cleanupDirs = append(res.cleanupDirs, dir) + + if err := os.MkdirAll(dir, 0755); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := ioutil.WriteFile(filepath.Join(dir, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil { + return nil, err + } + + if err := writeFile(dir, "devices.deny", "a"); err != nil { + return nil, err + } + + for _, dev := range c.AllowedDevices { + if err := writeFile(dir, "devices.allow", dev.GetCgroupAllowString()); err != nil { + return nil, err + } + } + } + + if len(cpuArgs) != 0 { + mountpoint, err := cgroups.FindCgroupMountpoint("cpu") + if err != nil { + return nil, err + } + + path := filepath.Join(mountpoint, cgroup) + + for _, arg := range cpuArgs { + if err := ioutil.WriteFile(filepath.Join(path, arg.File), []byte(arg.Value), 0700); err != nil { + return nil, err + } + } + } + + if len(memoryArgs) != 0 { + mountpoint, err := cgroups.FindCgroupMountpoint("memory") + if err != nil { + return nil, err + } + + path := filepath.Join(mountpoint, cgroup) + + for _, arg := range memoryArgs { + if err := ioutil.WriteFile(filepath.Join(path, arg.File), []byte(arg.Value), 0700); err != nil { + return nil, err + } + } + } + + // we need to manually join the freezer cgroup in systemd because it does not currently support it + // via the dbus api + freezerPath, err := joinFreezer(c, pid) + if err != nil { + return nil, err + } + res.cleanupDirs = append(res.cleanupDirs, freezerPath) + + if len(cpusetArgs) != 0 { + // systemd does not atm set up the cpuset controller, so we must manually + // join it. Additionally that is a very finicky controller where each + // level must have a full setup as the default for a new directory is "no cpus", + // so we avoid using any hierarchies here, creating a toplevel directory. + mountpoint, err := cgroups.FindCgroupMountpoint("cpuset") + if err != nil { + return nil, err + } + + initPath, err := cgroups.GetInitCgroupDir("cpuset") + if err != nil { + return nil, err + } + + var ( + foundCpus bool + foundMems bool + + rootPath = filepath.Join(mountpoint, initPath) + path = filepath.Join(mountpoint, initPath, c.Parent+"-"+c.Name) + ) + + res.cleanupDirs = append(res.cleanupDirs, path) + + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return nil, err + } + + for _, arg := range cpusetArgs { + if arg.File == "cpuset.cpus" { + foundCpus = true + } + if arg.File == "cpuset.mems" { + foundMems = true + } + if err := ioutil.WriteFile(filepath.Join(path, arg.File), []byte(arg.Value), 0700); err != nil { + return nil, err + } + } + + // These are required, if not specified inherit from parent + if !foundCpus { + s, err := ioutil.ReadFile(filepath.Join(rootPath, "cpuset.cpus")) + if err != nil { + return nil, err + } + + if err := ioutil.WriteFile(filepath.Join(path, "cpuset.cpus"), s, 0700); err != nil { + return nil, err + } + } + + // These are required, if not specified inherit from parent + if !foundMems { + s, err := ioutil.ReadFile(filepath.Join(rootPath, "cpuset.mems")) + if err != nil { + return nil, err + } + + if err := ioutil.WriteFile(filepath.Join(path, "cpuset.mems"), s, 0700); err != nil { + return nil, err + } + } + + if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil { + return nil, err + } + } + + return &res, nil +} + +func writeFile(dir, file, data string) error { + return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) +} + +func (c *systemdCgroup) Cleanup() error { + // systemd cleans up, we don't need to do much + + for _, path := range c.cleanupDirs { + os.RemoveAll(path) + } + + return nil +} + +func joinFreezer(c *cgroups.Cgroup, pid int) (string, error) { + path, err := getSubsystemPath(c, "freezer") + if err != nil { + return "", err + } + + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return "", err + } + + if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil { + return "", err + } + + return path, nil +} + +func getSubsystemPath(c *cgroups.Cgroup, subsystem string) (string, error) { + mountpoint, err := cgroups.FindCgroupMountpoint(subsystem) + if err != nil { + return "", err + } + + initPath, err := cgroups.GetInitCgroupDir(subsystem) + if err != nil { + return "", err + } + + slice := "system.slice" + if c.Slice != "" { + slice = c.Slice + } + + return filepath.Join(mountpoint, initPath, slice, getUnitName(c)), nil +} + +func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { + path, err := getSubsystemPath(c, "freezer") + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(path, "freezer.state"), []byte(state), 0); err != nil { + return err + } + for { + state_, err := ioutil.ReadFile(filepath.Join(path, "freezer.state")) + if err != nil { + return err + } + if string(state) == string(bytes.TrimSpace(state_)) { + break + } + time.Sleep(1 * time.Millisecond) + } + return nil +} + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + unitName := getUnitName(c) + + mountpoint, err := cgroups.FindCgroupMountpoint("cpu") + if err != nil { + return nil, err + } + + props, err := theConn.GetUnitTypeProperties(unitName, getIfaceForUnit(unitName)) + if err != nil { + return nil, err + } + cgroup := props["ControlGroup"].(string) + + return cgroups.ReadProcsFile(filepath.Join(mountpoint, cgroup)) +} + +func getUnitName(c *cgroups.Cgroup) string { + return fmt.Sprintf("%s-%s.scope", c.Parent, c.Name) +} + +/* + * This would be nicer to get from the systemd API when accounting + * is enabled, but sadly there is no way to do that yet. + * The lack of this functionality in the API & the approach taken + * is guided by + * http://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#readingaccountinginformation. + */ +func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) { + stats := cgroups.NewStats() + + for sysname, sys := range subsystems { + subsystemPath, err := getSubsystemPath(c, sysname) + if err != nil { + // Don't fail if a cgroup hierarchy was not found, just skip this subsystem + if err == cgroups.ErrNotFound { + continue + } + + return nil, err + } + + if err := sys.GetStats(subsystemPath, stats); err != nil { + return nil, err + } + } + + return stats, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/utils.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/utils.go new file mode 100644 index 00000000..ce5c4f33 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/utils.go @@ -0,0 +1,168 @@ +package cgroups + +import ( + "bufio" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/pkg/mount" +) + +// https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt +func FindCgroupMountpoint(subsystem string) (string, error) { + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + + for _, mount := range mounts { + if mount.Fstype == "cgroup" { + for _, opt := range strings.Split(mount.VfsOpts, ",") { + if opt == subsystem { + return mount.Mountpoint, nil + } + } + } + } + return "", ErrNotFound +} + +type Mount struct { + Mountpoint string + Subsystems []string +} + +func (m Mount) GetThisCgroupDir() (string, error) { + if len(m.Subsystems) == 0 { + return "", fmt.Errorf("no subsystem for mount") + } + + return GetThisCgroupDir(m.Subsystems[0]) +} + +func GetCgroupMounts() ([]Mount, error) { + mounts, err := mount.GetMounts() + if err != nil { + return nil, err + } + + all, err := GetAllSubsystems() + if err != nil { + return nil, err + } + + allMap := make(map[string]bool) + for _, s := range all { + allMap[s] = true + } + + res := []Mount{} + for _, mount := range mounts { + if mount.Fstype == "cgroup" { + m := Mount{Mountpoint: mount.Mountpoint} + + for _, opt := range strings.Split(mount.VfsOpts, ",") { + if strings.HasPrefix(opt, "name=") { + m.Subsystems = append(m.Subsystems, opt) + } + if allMap[opt] { + m.Subsystems = append(m.Subsystems, opt) + } + } + res = append(res, m) + } + } + return res, nil +} + +// Returns all the cgroup subsystems supported by the kernel +func GetAllSubsystems() ([]string, error) { + f, err := os.Open("/proc/cgroups") + if err != nil { + return nil, err + } + defer f.Close() + + subsystems := []string{} + + s := bufio.NewScanner(f) + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + text := s.Text() + if text[0] != '#' { + parts := strings.Fields(text) + if len(parts) >= 4 && parts[3] != "0" { + subsystems = append(subsystems, parts[0]) + } + } + } + return subsystems, nil +} + +// Returns the relative path to the cgroup docker is running in. +func GetThisCgroupDir(subsystem string) (string, error) { + f, err := os.Open("/proc/self/cgroup") + if err != nil { + return "", err + } + defer f.Close() + + return parseCgroupFile(subsystem, f) +} + +func GetInitCgroupDir(subsystem string) (string, error) { + f, err := os.Open("/proc/1/cgroup") + if err != nil { + return "", err + } + defer f.Close() + + return parseCgroupFile(subsystem, f) +} + +func ReadProcsFile(dir string) ([]int, error) { + f, err := os.Open(filepath.Join(dir, "cgroup.procs")) + if err != nil { + return nil, err + } + defer f.Close() + + var ( + s = bufio.NewScanner(f) + out = []int{} + ) + + for s.Scan() { + if t := s.Text(); t != "" { + pid, err := strconv.Atoi(t) + if err != nil { + return nil, err + } + out = append(out, pid) + } + } + return out, nil +} + +func parseCgroupFile(subsystem string, r io.Reader) (string, error) { + s := bufio.NewScanner(r) + for s.Scan() { + if err := s.Err(); err != nil { + return "", err + } + text := s.Text() + parts := strings.Split(text, ":") + for _, subs := range strings.Split(parts[1], ",") { + if subs == subsystem { + return parts[2], nil + } + } + } + return "", ErrNotFound +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/config.go b/Godeps/_workspace/src/github.com/docker/libcontainer/config.go new file mode 100644 index 00000000..8fe95c24 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/config.go @@ -0,0 +1,86 @@ +package libcontainer + +import ( + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/mount" + "github.com/docker/libcontainer/network" +) + +type MountConfig mount.MountConfig + +type Network network.Network + +// Config defines configuration options for executing a process inside a contained environment. +type Config struct { + // Mount specific options. + MountConfig *MountConfig `json:"mount_config,omitempty"` + + // Hostname optionally sets the container's hostname if provided + Hostname string `json:"hostname,omitempty"` + + // User will set the uid and gid of the executing process running inside the container + User string `json:"user,omitempty"` + + // WorkingDir will change the processes current working directory inside the container's rootfs + WorkingDir string `json:"working_dir,omitempty"` + + // Env will populate the processes environment with the provided values + // Any values from the parent processes will be cleared before the values + // provided in Env are provided to the process + Env []string `json:"environment,omitempty"` + + // Tty when true will allocate a pty slave on the host for access by the container's process + // and ensure that it is mounted inside the container's rootfs + Tty bool `json:"tty,omitempty"` + + // Namespaces specifies the container's namespaces that it should setup when cloning the init process + // If a namespace is not provided that namespace is shared from the container's parent process + Namespaces map[string]bool `json:"namespaces,omitempty"` + + // Capabilities specify the capabilities to keep when executing the process inside the container + // All capbilities not specified will be dropped from the processes capability mask + Capabilities []string `json:"capabilities,omitempty"` + + // Networks specifies the container's network setup to be created + Networks []*Network `json:"networks,omitempty"` + + // Routes can be specified to create entries in the route table as the container is started + Routes []*Route `json:"routes,omitempty"` + + // Cgroups specifies specific cgroup settings for the various subsystems that the container is + // placed into to limit the resources the container has available + Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` + + // AppArmorProfile specifies the profile to apply to the process running in the container and is + // change at the time the process is execed + AppArmorProfile string `json:"apparmor_profile,omitempty"` + + // ProcessLabel specifies the label to apply to the process running in the container. It is + // commonly used by selinux + ProcessLabel string `json:"process_label,omitempty"` + + // RestrictSys will remount /proc/sys, /sys, and mask over sysrq-trigger as well as /proc/irq and + // /proc/bus + RestrictSys bool `json:"restrict_sys,omitempty"` +} + +// Routes can be specified to create entries in the route table as the container is started +// +// All of destination, source, and gateway should be either IPv4 or IPv6. +// One of the three options must be present, and ommitted entries will use their +// IP family default for the route table. For IPv4 for example, setting the +// gateway to 1.2.3.4 and the interface to eth0 will set up a standard +// destination of 0.0.0.0(or *) when viewed in the route table. +type Route struct { + // Sets the destination and mask, should be a CIDR. Accepts IPv4 and IPv6 + Destination string `json:"destination,omitempty"` + + // Sets the source and mask, should be a CIDR. Accepts IPv4 and IPv6 + Source string `json:"source,omitempty"` + + // Sets the gateway. Accepts IPv4 and IPv6 + Gateway string `json:"gateway,omitempty"` + + // The device to set this route up for, for example: eth0 + InterfaceName string `json:"interface_name,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/config_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/config_test.go new file mode 100644 index 00000000..59812811 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/config_test.go @@ -0,0 +1,160 @@ +package libcontainer + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/docker/libcontainer/devices" +) + +// Checks whether the expected capability is specified in the capabilities. +func contains(expected string, values []string) bool { + for _, v := range values { + if v == expected { + return true + } + } + return false +} + +func containsDevice(expected *devices.Device, values []*devices.Device) bool { + for _, d := range values { + if d.Path == expected.Path && + d.CgroupPermissions == expected.CgroupPermissions && + d.FileMode == expected.FileMode && + d.MajorNumber == expected.MajorNumber && + d.MinorNumber == expected.MinorNumber && + d.Type == expected.Type { + return true + } + } + return false +} + +func loadConfig(name string) (*Config, error) { + f, err := os.Open(filepath.Join("sample_configs", name)) + if err != nil { + return nil, err + } + defer f.Close() + + var container *Config + if err := json.NewDecoder(f).Decode(&container); err != nil { + return nil, err + } + + return container, nil +} + +func TestConfigJsonFormat(t *testing.T) { + container, err := loadConfig("attach_to_bridge.json") + if err != nil { + t.Fatal(err) + } + + if container.Hostname != "koye" { + t.Log("hostname is not set") + t.Fail() + } + + if !container.Tty { + t.Log("tty should be set to true") + t.Fail() + } + + if !container.Namespaces["NEWNET"] { + t.Log("namespaces should contain NEWNET") + t.Fail() + } + + if container.Namespaces["NEWUSER"] { + t.Log("namespaces should not contain NEWUSER") + t.Fail() + } + + if contains("SYS_ADMIN", container.Capabilities) { + t.Log("SYS_ADMIN should not be enabled in capabilities mask") + t.Fail() + } + + if !contains("MKNOD", container.Capabilities) { + t.Log("MKNOD should be enabled in capabilities mask") + t.Fail() + } + + if !contains("SYS_CHROOT", container.Capabilities) { + t.Log("capabilities mask should contain SYS_CHROOT") + t.Fail() + } + + for _, n := range container.Networks { + if n.Type == "veth" { + if n.Bridge != "docker0" { + t.Logf("veth bridge should be docker0 but received %q", n.Bridge) + t.Fail() + } + + if n.Address != "172.17.0.101/16" { + t.Logf("veth address should be 172.17.0.101/61 but received %q", n.Address) + t.Fail() + } + + if n.VethPrefix != "veth" { + t.Logf("veth prefix should be veth but received %q", n.VethPrefix) + t.Fail() + } + + if n.Gateway != "172.17.42.1" { + t.Logf("veth gateway should be 172.17.42.1 but received %q", n.Gateway) + t.Fail() + } + + if n.Mtu != 1500 { + t.Logf("veth mtu should be 1500 but received %d", n.Mtu) + t.Fail() + } + + break + } + } + + for _, d := range devices.DefaultSimpleDevices { + if !containsDevice(d, container.MountConfig.DeviceNodes) { + t.Logf("expected device configuration for %s", d.Path) + t.Fail() + } + } + + if !container.RestrictSys { + t.Log("expected restrict sys to be true") + t.Fail() + } +} + +func TestApparmorProfile(t *testing.T) { + container, err := loadConfig("apparmor.json") + if err != nil { + t.Fatal(err) + } + + if container.AppArmorProfile != "docker-default" { + t.Fatalf("expected apparmor profile to be docker-default but received %q", container.AppArmorProfile) + } +} + +func TestSelinuxLabels(t *testing.T) { + container, err := loadConfig("selinux.json") + if err != nil { + t.Fatal(err) + } + label := "system_u:system_r:svirt_lxc_net_t:s0:c164,c475" + + if container.ProcessLabel != label { + t.Fatalf("expected process label %q but received %q", label, container.ProcessLabel) + } + if container.MountConfig.MountLabel != label { + t.Fatalf("expected mount label %q but received %q", label, container.MountConfig.MountLabel) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/console/console.go b/Godeps/_workspace/src/github.com/docker/libcontainer/console/console.go new file mode 100644 index 00000000..c0d1fb04 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/console/console.go @@ -0,0 +1,128 @@ +// +build linux + +package console + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "unsafe" + + "github.com/docker/libcontainer/label" +) + +// Setup initializes the proper /dev/console inside the rootfs path +func Setup(rootfs, consolePath, mountLabel string) error { + oldMask := syscall.Umask(0000) + defer syscall.Umask(oldMask) + + if err := os.Chmod(consolePath, 0600); err != nil { + return err + } + + if err := os.Chown(consolePath, 0, 0); err != nil { + return err + } + + if err := label.SetFileLabel(consolePath, mountLabel); err != nil { + return fmt.Errorf("set file label %s %s", consolePath, err) + } + + dest := filepath.Join(rootfs, "dev/console") + + f, err := os.Create(dest) + if err != nil && !os.IsExist(err) { + return fmt.Errorf("create %s %s", dest, err) + } + + if f != nil { + f.Close() + } + + if err := syscall.Mount(consolePath, dest, "bind", syscall.MS_BIND, ""); err != nil { + return fmt.Errorf("bind %s to %s %s", consolePath, dest, err) + } + + return nil +} + +func OpenAndDup(consolePath string) error { + slave, err := OpenTerminal(consolePath, syscall.O_RDWR) + if err != nil { + return fmt.Errorf("open terminal %s", err) + } + + if err := syscall.Dup2(int(slave.Fd()), 0); err != nil { + return err + } + + if err := syscall.Dup2(int(slave.Fd()), 1); err != nil { + return err + } + + return syscall.Dup2(int(slave.Fd()), 2) +} + +// Unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// Unlockpt should be called before opening the slave side of a pseudoterminal. +func Unlockpt(f *os.File) error { + var u int + + return Ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) +} + +// Ptsname retrieves the name of the first available pts for the given master. +func Ptsname(f *os.File) (string, error) { + var n int + + if err := Ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { + return "", err + } + + return fmt.Sprintf("/dev/pts/%d", n), nil +} + +// CreateMasterAndConsole will open /dev/ptmx on the host and retreive the +// pts name for use as the pty slave inside the container +func CreateMasterAndConsole() (*os.File, string, error) { + master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) + if err != nil { + return nil, "", err + } + + console, err := Ptsname(master) + if err != nil { + return nil, "", err + } + + if err := Unlockpt(master); err != nil { + return nil, "", err + } + + return master, console, nil +} + +// OpenPtmx opens /dev/ptmx, i.e. the PTY master. +func OpenPtmx() (*os.File, error) { + // O_NOCTTY and O_CLOEXEC are not present in os package so we use the syscall's one for all. + return os.OpenFile("/dev/ptmx", syscall.O_RDONLY|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) +} + +// OpenTerminal is a clone of os.OpenFile without the O_CLOEXEC +// used to open the pty slave inside the container namespace +func OpenTerminal(name string, flag int) (*os.File, error) { + r, e := syscall.Open(name, flag, 0) + if e != nil { + return nil, &os.PathError{"open", name, e} + } + return os.NewFile(uintptr(r), name), nil +} + +func Ioctl(fd uintptr, flag, data uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/container.go b/Godeps/_workspace/src/github.com/docker/libcontainer/container.go new file mode 100644 index 00000000..c4d372ce --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/container.go @@ -0,0 +1,71 @@ +/* +NOTE: The API is in flux and mainly not implemented. Proceed with caution until further notice. +*/ +package libcontainer + +// A libcontainer container object. +// +// Each container is thread-safe within the same process. Since a container can +// be destroyed by a separate process, any function may return that the container +// was not found. +type Container interface { + // Returns the path to the container which contains the state + Path() string + + // Returns the current run state of the container. + // + // Errors: container no longer exists, + // system error. + RunState() (*RunState, error) + + // Returns the current config of the container. + Config() *Config + + // Start a process inside the container. Returns the PID of the new process (in the caller process's namespace) and a channel that will return the exit status of the process whenever it dies. + // + // Errors: container no longer exists, + // config is invalid, + // container is paused, + // system error. + Start(*ProcessConfig) (pid int, exitChan chan int, err error) + + // Destroys the container after killing all running processes. + // + // Any event registrations are removed before the container is destroyed. + // No error is returned if the container is already destroyed. + // + // Errors: system error. + Destroy() error + + // Returns the PIDs inside this container. The PIDs are in the namespace of the calling process. + // + // Errors: container no longer exists, + // system error. + // + // Some of the returned PIDs may no longer refer to processes in the Container, unless + // the Container state is PAUSED in which case every PID in the slice is valid. + Processes() ([]int, error) + + // Returns statistics for the container. + // + // Errors: container no longer exists, + // system error. + Stats() (*ContainerStats, error) + + // If the Container state is RUNNING or PAUSING, sets the Container state to PAUSING and pauses + // the execution of any user processes. Asynchronously, when the container finished being paused the + // state is changed to PAUSED. + // If the Container state is PAUSED, do nothing. + // + // Errors: container no longer exists, + // system error. + Pause() error + + // If the Container state is PAUSED, resumes the execution of any user processes in the + // Container before setting the Container state to RUNNING. + // If the Container state is RUNNING, do nothing. + // + // Errors: container no longer exists, + // system error. + Resume() error +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/devices/defaults.go b/Godeps/_workspace/src/github.com/docker/libcontainer/devices/defaults.go new file mode 100644 index 00000000..e0ad0b08 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/devices/defaults.go @@ -0,0 +1,159 @@ +package devices + +var ( + // These are devices that are to be both allowed and created. + + DefaultSimpleDevices = []*Device{ + // /dev/null and zero + { + Path: "/dev/null", + Type: 'c', + MajorNumber: 1, + MinorNumber: 3, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + { + Path: "/dev/zero", + Type: 'c', + MajorNumber: 1, + MinorNumber: 5, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + + { + Path: "/dev/full", + Type: 'c', + MajorNumber: 1, + MinorNumber: 7, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + + // consoles and ttys + { + Path: "/dev/tty", + Type: 'c', + MajorNumber: 5, + MinorNumber: 0, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + + // /dev/urandom,/dev/random + { + Path: "/dev/urandom", + Type: 'c', + MajorNumber: 1, + MinorNumber: 9, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + { + Path: "/dev/random", + Type: 'c', + MajorNumber: 1, + MinorNumber: 8, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + } + + DefaultAllowedDevices = append([]*Device{ + // allow mknod for any device + { + Type: 'c', + MajorNumber: Wildcard, + MinorNumber: Wildcard, + CgroupPermissions: "m", + }, + { + Type: 'b', + MajorNumber: Wildcard, + MinorNumber: Wildcard, + CgroupPermissions: "m", + }, + + { + Path: "/dev/console", + Type: 'c', + MajorNumber: 5, + MinorNumber: 1, + CgroupPermissions: "rwm", + }, + { + Path: "/dev/tty0", + Type: 'c', + MajorNumber: 4, + MinorNumber: 0, + CgroupPermissions: "rwm", + }, + { + Path: "/dev/tty1", + Type: 'c', + MajorNumber: 4, + MinorNumber: 1, + CgroupPermissions: "rwm", + }, + // /dev/pts/ - pts namespaces are "coming soon" + { + Path: "", + Type: 'c', + MajorNumber: 136, + MinorNumber: Wildcard, + CgroupPermissions: "rwm", + }, + { + Path: "", + Type: 'c', + MajorNumber: 5, + MinorNumber: 2, + CgroupPermissions: "rwm", + }, + + // tuntap + { + Path: "", + Type: 'c', + MajorNumber: 10, + MinorNumber: 200, + CgroupPermissions: "rwm", + }, + + /*// fuse + { + Path: "", + Type: 'c', + MajorNumber: 10, + MinorNumber: 229, + CgroupPermissions: "rwm", + }, + + // rtc + { + Path: "", + Type: 'c', + MajorNumber: 254, + MinorNumber: 0, + CgroupPermissions: "rwm", + }, + */ + }, DefaultSimpleDevices...) + + DefaultAutoCreatedDevices = append([]*Device{ + { + // /dev/fuse is created but not allowed. + // This is to allow java to work. Because java + // Insists on there being a /dev/fuse + // https://github.com/docker/docker/issues/514 + // https://github.com/docker/docker/issues/2393 + // + Path: "/dev/fuse", + Type: 'c', + MajorNumber: 10, + MinorNumber: 229, + CgroupPermissions: "rwm", + }, + }, DefaultSimpleDevices...) +) diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices.go b/Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices.go new file mode 100644 index 00000000..727a5bdf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices.go @@ -0,0 +1,119 @@ +package devices + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" +) + +const ( + Wildcard = -1 +) + +var ( + ErrNotADeviceNode = errors.New("not a device node") +) + +type Device struct { + Type rune `json:"type,omitempty"` + Path string `json:"path,omitempty"` // It is fine if this is an empty string in the case that you are using Wildcards + MajorNumber int64 `json:"major_number,omitempty"` // Use the wildcard constant for wildcards. + MinorNumber int64 `json:"minor_number,omitempty"` // Use the wildcard constant for wildcards. + CgroupPermissions string `json:"cgroup_permissions,omitempty"` // Typically just "rwm" + FileMode os.FileMode `json:"file_mode,omitempty"` // The permission bits of the file's mode +} + +func GetDeviceNumberString(deviceNumber int64) string { + if deviceNumber == Wildcard { + return "*" + } else { + return fmt.Sprintf("%d", deviceNumber) + } +} + +func (device *Device) GetCgroupAllowString() string { + return fmt.Sprintf("%c %s:%s %s", device.Type, GetDeviceNumberString(device.MajorNumber), GetDeviceNumberString(device.MinorNumber), device.CgroupPermissions) +} + +// Given the path to a device and it's cgroup_permissions(which cannot be easilly queried) look up the information about a linux device and return that information as a Device struct. +func GetDevice(path, cgroupPermissions string) (*Device, error) { + fileInfo, err := os.Lstat(path) + if err != nil { + return nil, err + } + + var ( + devType rune + mode = fileInfo.Mode() + fileModePermissionBits = os.FileMode.Perm(mode) + ) + + switch { + case mode&os.ModeDevice == 0: + return nil, ErrNotADeviceNode + case mode&os.ModeCharDevice != 0: + fileModePermissionBits |= syscall.S_IFCHR + devType = 'c' + default: + fileModePermissionBits |= syscall.S_IFBLK + devType = 'b' + } + + stat_t, ok := fileInfo.Sys().(*syscall.Stat_t) + if !ok { + return nil, fmt.Errorf("cannot determine the device number for device %s", path) + } + devNumber := int(stat_t.Rdev) + + return &Device{ + Type: devType, + Path: path, + MajorNumber: Major(devNumber), + MinorNumber: Minor(devNumber), + CgroupPermissions: cgroupPermissions, + FileMode: fileModePermissionBits, + }, nil +} + +func GetHostDeviceNodes() ([]*Device, error) { + return getDeviceNodes("/dev") +} + +func getDeviceNodes(path string) ([]*Device, error) { + files, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + + out := []*Device{} + for _, f := range files { + if f.IsDir() { + switch f.Name() { + case "pts", "shm", "fd": + continue + default: + sub, err := getDeviceNodes(filepath.Join(path, f.Name())) + if err != nil { + return nil, err + } + + out = append(out, sub...) + continue + } + } + + device, err := GetDevice(filepath.Join(path, f.Name()), "rwm") + if err != nil { + if err == ErrNotADeviceNode { + continue + } + return nil, err + } + out = append(out, device) + } + + return out, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/devices/number.go b/Godeps/_workspace/src/github.com/docker/libcontainer/devices/number.go new file mode 100644 index 00000000..3aae380b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/devices/number.go @@ -0,0 +1,26 @@ +package devices + +/* + +This code provides support for manipulating linux device numbers. It should be replaced by normal syscall functions once http://code.google.com/p/go/issues/detail?id=8106 is solved. + +You can read what they are here: + + - http://www.makelinux.net/ldd3/chp-3-sect-2 + - http://www.linux-tutorial.info/modules.php?name=MContent&pageid=94 + +Note! These are NOT the same as the MAJOR(dev_t device);, MINOR(dev_t device); and MKDEV(int major, int minor); functions as defined in as the representation of device numbers used by go is different than the one used internally to the kernel! - https://github.com/torvalds/linux/blob/master/include/linux/kdev_t.h#L9 + +*/ + +func Major(devNumber int) int64 { + return int64((devNumber >> 8) & 0xfff) +} + +func Minor(devNumber int) int64 { + return int64((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00)) +} + +func Mkdev(majorNumber int64, minorNumber int64) int { + return int((majorNumber << 8) | (minorNumber & 0xff) | ((minorNumber & 0xfff00) << 12)) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/factory.go b/Godeps/_workspace/src/github.com/docker/libcontainer/factory.go new file mode 100644 index 00000000..9161ff05 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/factory.go @@ -0,0 +1,25 @@ +package libcontainer + +type Factory interface { + // Creates a new container in the given path. A unique ID is generated for the container and + // starts the initial process inside the container. + // + // Returns the new container with a running process. + // + // Errors: + // Path already exists + // Config or initialConfig is invalid + // System error + // + // On error, any partially created container parts are cleaned up (the operation is atomic). + Create(path string, config *Config) (Container, error) + + // Load takes the path for an existing container and reconstructs the container + // from the state. + // + // Errors: + // Path does not exist + // Container is stopped + // System error + Load(path string) (Container, error) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/label/label.go b/Godeps/_workspace/src/github.com/docker/libcontainer/label/label.go new file mode 100644 index 00000000..5c8228cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/label/label.go @@ -0,0 +1,34 @@ +// +build !selinux !linux + +package label + +func GenLabels(options string) (string, string, error) { + return "", "", nil +} + +func FormatMountLabel(src string, mountLabel string) string { + return src +} + +func SetProcessLabel(processLabel string) error { + return nil +} + +func SetFileLabel(path string, fileLabel string) error { + return nil +} + +func Relabel(path string, fileLabel string, relabel string) error { + return nil +} + +func GetPidCon(pid int) (string, error) { + return "", nil +} + +func Init() { +} + +func ReserveLabel(label string) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/label/label_selinux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/label/label_selinux.go new file mode 100644 index 00000000..aa502a3d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/label/label_selinux.go @@ -0,0 +1,100 @@ +// +build selinux,linux + +package label + +import ( + "fmt" + "strings" + + "github.com/docker/libcontainer/selinux" +) + +func GenLabels(options string) (string, string, error) { + if !selinux.SelinuxEnabled() { + return "", "", nil + } + var err error + processLabel, mountLabel := selinux.GetLxcContexts() + if processLabel != "" { + var ( + s = strings.Fields(options) + l = len(s) + ) + if l > 0 { + pcon := selinux.NewContext(processLabel) + for i := 0; i < l; i++ { + o := strings.Split(s[i], "=") + pcon[o[0]] = o[1] + } + processLabel = pcon.Get() + mountLabel, err = selinux.CopyLevel(processLabel, mountLabel) + } + } + return processLabel, mountLabel, err +} + +func FormatMountLabel(src, mountLabel string) string { + if mountLabel != "" { + switch src { + case "": + src = fmt.Sprintf("context=%q", mountLabel) + default: + src = fmt.Sprintf("%s,context=%q", src, mountLabel) + } + } + return src +} + +func SetProcessLabel(processLabel string) error { + if selinux.SelinuxEnabled() { + return selinux.Setexeccon(processLabel) + } + return nil +} + +func GetProcessLabel() (string, error) { + if selinux.SelinuxEnabled() { + return selinux.Getexeccon() + } + return "", nil +} + +func SetFileLabel(path string, fileLabel string) error { + if selinux.SelinuxEnabled() && fileLabel != "" { + return selinux.Setfilecon(path, fileLabel) + } + return nil +} + +// Change the label of path to the filelabel string. If the relabel string +// is "z", relabel will change the MCS label to s0. This will allow all +// containers to share the content. If the relabel string is a "Z" then +// the MCS label should continue to be used. SELinux will use this field +// to make sure the content can not be shared by other containes. +func Relabel(path string, fileLabel string, relabel string) error { + if fileLabel == "" { + return nil + } + if relabel == "z" { + c := selinux.NewContext(fileLabel) + c["level"] = "s0" + fileLabel = c.Get() + } + return selinux.Chcon(path, fileLabel, true) +} + +func GetPidCon(pid int) (string, error) { + if !selinux.SelinuxEnabled() { + return "", nil + } + return selinux.Getpidcon(pid) +} + +func Init() { + selinux.SelinuxEnabled() +} + +func ReserveLabel(label string) error { + selinux.ReserveLabel(label) + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/init.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/init.go new file mode 100644 index 00000000..7edea499 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/init.go @@ -0,0 +1,244 @@ +// +build linux + +package mount + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/symlink" + "github.com/docker/libcontainer/label" + "github.com/docker/libcontainer/mount/nodes" +) + +// default mount point flags +const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV + +type mount struct { + source string + path string + device string + flags int + data string +} + +// InitializeMountNamespace sets up the devices, mount points, and filesystems for use inside a +// new mount namespace. +func InitializeMountNamespace(rootfs, console string, sysReadonly bool, mountConfig *MountConfig) error { + var ( + err error + flag = syscall.MS_PRIVATE + ) + if mountConfig.NoPivotRoot { + flag = syscall.MS_SLAVE + } + if err := syscall.Mount("", "/", "", uintptr(flag|syscall.MS_REC), ""); err != nil { + return fmt.Errorf("mounting / with flags %X %s", (flag | syscall.MS_REC), err) + } + if err := syscall.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil { + return fmt.Errorf("mouting %s as bind %s", rootfs, err) + } + if err := mountSystem(rootfs, sysReadonly, mountConfig); err != nil { + return fmt.Errorf("mount system %s", err) + } + if err := setupBindmounts(rootfs, mountConfig); err != nil { + return fmt.Errorf("bind mounts %s", err) + } + if err := nodes.CreateDeviceNodes(rootfs, mountConfig.DeviceNodes); err != nil { + return fmt.Errorf("create device nodes %s", err) + } + if err := SetupPtmx(rootfs, console, mountConfig.MountLabel); err != nil { + return err + } + + // stdin, stdout and stderr could be pointing to /dev/null from parent namespace. + // Re-open them inside this namespace. + if err := reOpenDevNull(rootfs); err != nil { + return fmt.Errorf("Failed to reopen /dev/null %s", err) + } + + if err := setupDevSymlinks(rootfs); err != nil { + return fmt.Errorf("dev symlinks %s", err) + } + + if err := syscall.Chdir(rootfs); err != nil { + return fmt.Errorf("chdir into %s %s", rootfs, err) + } + + if mountConfig.NoPivotRoot { + err = MsMoveRoot(rootfs) + } else { + err = PivotRoot(rootfs) + } + if err != nil { + return err + } + + if mountConfig.ReadonlyFs { + if err := SetReadonly(); err != nil { + return fmt.Errorf("set readonly %s", err) + } + } + + syscall.Umask(0022) + + return nil +} + +// mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts +// inside the mount namespace +func mountSystem(rootfs string, sysReadonly bool, mountConfig *MountConfig) error { + for _, m := range newSystemMounts(rootfs, mountConfig.MountLabel, sysReadonly, mountConfig.Mounts) { + if err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) { + return fmt.Errorf("mkdirall %s %s", m.path, err) + } + if err := syscall.Mount(m.source, m.path, m.device, uintptr(m.flags), m.data); err != nil { + return fmt.Errorf("mounting %s into %s %s", m.source, m.path, err) + } + } + return nil +} + +func createIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } else { + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + } + return nil +} + +func setupDevSymlinks(rootfs string) error { + var links = [][2]string{ + {"/proc/self/fd", "/dev/fd"}, + {"/proc/self/fd/0", "/dev/stdin"}, + {"/proc/self/fd/1", "/dev/stdout"}, + {"/proc/self/fd/2", "/dev/stderr"}, + } + + // kcore support can be toggled with CONFIG_PROC_KCORE; only create a symlink + // in /dev if it exists in /proc. + if _, err := os.Stat("/proc/kcore"); err == nil { + links = append(links, [2]string{"/proc/kcore", "/dev/kcore"}) + } + + for _, link := range links { + var ( + src = link[0] + dst = filepath.Join(rootfs, link[1]) + ) + + if err := os.Symlink(src, dst); err != nil && !os.IsExist(err) { + return fmt.Errorf("symlink %s %s %s", src, dst, err) + } + } + + return nil +} + +func setupBindmounts(rootfs string, mountConfig *MountConfig) error { + bindMounts := mountConfig.Mounts + for _, m := range bindMounts.OfType("bind") { + var ( + flags = syscall.MS_BIND | syscall.MS_REC + dest = filepath.Join(rootfs, m.Destination) + ) + if !m.Writable { + flags = flags | syscall.MS_RDONLY + } + + stat, err := os.Stat(m.Source) + if err != nil { + return err + } + + dest, err = symlink.FollowSymlinkInScope(dest, rootfs) + if err != nil { + return err + } + + if err := createIfNotExists(dest, stat.IsDir()); err != nil { + return fmt.Errorf("Creating new bind-mount target, %s", err) + } + + if err := syscall.Mount(m.Source, dest, "bind", uintptr(flags), ""); err != nil { + return fmt.Errorf("mounting %s into %s %s", m.Source, dest, err) + } + if !m.Writable { + if err := syscall.Mount(m.Source, dest, "bind", uintptr(flags|syscall.MS_REMOUNT), ""); err != nil { + return fmt.Errorf("remounting %s into %s %s", m.Source, dest, err) + } + } + if m.Relabel != "" { + if err := label.Relabel(m.Source, mountConfig.MountLabel, m.Relabel); err != nil { + return fmt.Errorf("relabeling %s to %s %s", m.Source, mountConfig.MountLabel, err) + } + } + if m.Private { + if err := syscall.Mount("", dest, "none", uintptr(syscall.MS_PRIVATE), ""); err != nil { + return fmt.Errorf("mounting %s private %s", dest, err) + } + } + } + return nil +} + +// TODO: this is crappy right now and should be cleaned up with a better way of handling system and +// standard bind mounts allowing them to be more dynamic +func newSystemMounts(rootfs, mountLabel string, sysReadonly bool, mounts Mounts) []mount { + systemMounts := []mount{ + {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, + {source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: label.FormatMountLabel("mode=755", mountLabel)}, + {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)}, + {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}, + } + + sysMountFlags := defaultMountFlags + if sysReadonly { + sysMountFlags |= syscall.MS_RDONLY + } + systemMounts = append(systemMounts, mount{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: sysMountFlags}) + + return systemMounts +} + +// Is stdin, stdout or stderr were to be pointing to '/dev/null', +// this method will make them point to '/dev/null' from within this namespace. +func reOpenDevNull(rootfs string) error { + var stat, devNullStat syscall.Stat_t + file, err := os.Open(filepath.Join(rootfs, "/dev/null")) + if err != nil { + return fmt.Errorf("Failed to open /dev/null - %s", err) + } + defer file.Close() + if err = syscall.Fstat(int(file.Fd()), &devNullStat); err != nil { + return fmt.Errorf("Failed to stat /dev/null - %s", err) + } + for fd := 0; fd < 3; fd++ { + if err = syscall.Fstat(fd, &stat); err != nil { + return fmt.Errorf("Failed to stat fd %d - %s", fd, err) + } + if stat.Rdev == devNullStat.Rdev { + // Close and re-open the fd. + if err = syscall.Dup2(int(file.Fd()), fd); err != nil { + return fmt.Errorf("Failed to dup fd %d to fd %d - %s", file.Fd(), fd) + } + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/msmoveroot.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/msmoveroot.go new file mode 100644 index 00000000..94afd3a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/msmoveroot.go @@ -0,0 +1,20 @@ +// +build linux + +package mount + +import ( + "fmt" + "syscall" +) + +func MsMoveRoot(rootfs string) error { + if err := syscall.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil { + return fmt.Errorf("mount move %s into / %s", rootfs, err) + } + + if err := syscall.Chroot("."); err != nil { + return fmt.Errorf("chroot . %s", err) + } + + return syscall.Chdir("/") +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes.go new file mode 100644 index 00000000..6a984e33 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes.go @@ -0,0 +1,52 @@ +// +build linux + +package nodes + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/docker/libcontainer/devices" +) + +// Create the device nodes in the container. +func CreateDeviceNodes(rootfs string, nodesToCreate []*devices.Device) error { + oldMask := syscall.Umask(0000) + defer syscall.Umask(oldMask) + + for _, node := range nodesToCreate { + if err := CreateDeviceNode(rootfs, node); err != nil { + return err + } + } + return nil +} + +// Creates the device node in the rootfs of the container. +func CreateDeviceNode(rootfs string, node *devices.Device) error { + var ( + dest = filepath.Join(rootfs, node.Path) + parent = filepath.Dir(dest) + ) + + if err := os.MkdirAll(parent, 0755); err != nil { + return err + } + + fileMode := node.FileMode + switch node.Type { + case 'c': + fileMode |= syscall.S_IFCHR + case 'b': + fileMode |= syscall.S_IFBLK + default: + return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path) + } + + if err := syscall.Mknod(dest, uint32(fileMode), devices.Mkdev(node.MajorNumber, node.MinorNumber)); err != nil && !os.IsExist(err) { + return fmt.Errorf("mknod %s %s", node.Path, err) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes_unsupported.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes_unsupported.go new file mode 100644 index 00000000..83660715 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package nodes + +import ( + "errors" + + "github.com/docker/libcontainer/devices" +) + +func CreateDeviceNodes(rootfs string, nodesToCreate []*devices.Device) error { + return errors.New("Unsupported method") +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/pivotroot.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/pivotroot.go new file mode 100644 index 00000000..a88ed4a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/pivotroot.go @@ -0,0 +1,34 @@ +// +build linux + +package mount + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" +) + +func PivotRoot(rootfs string) error { + pivotDir, err := ioutil.TempDir(rootfs, ".pivot_root") + if err != nil { + return fmt.Errorf("can't create pivot_root dir %s, error %v", pivotDir, err) + } + + if err := syscall.PivotRoot(rootfs, pivotDir); err != nil { + return fmt.Errorf("pivot_root %s", err) + } + + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("chdir / %s", err) + } + + // path to pivot dir now changed, update + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { + return fmt.Errorf("unmount pivot_root dir %s", err) + } + + return os.Remove(pivotDir) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/ptmx.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/ptmx.go new file mode 100644 index 00000000..c316481a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/ptmx.go @@ -0,0 +1,30 @@ +// +build linux + +package mount + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/libcontainer/console" +) + +func SetupPtmx(rootfs, consolePath, mountLabel string) error { + ptmx := filepath.Join(rootfs, "dev/ptmx") + if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) { + return err + } + + if err := os.Symlink("pts/ptmx", ptmx); err != nil { + return fmt.Errorf("symlink dev ptmx %s", err) + } + + if consolePath != "" { + if err := console.Setup(rootfs, consolePath, mountLabel); err != nil { + return err + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/readonly.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/readonly.go new file mode 100644 index 00000000..9b4a6f70 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/readonly.go @@ -0,0 +1,11 @@ +// +build linux + +package mount + +import ( + "syscall" +) + +func SetReadonly() error { + return syscall.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "") +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/remount.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/remount.go new file mode 100644 index 00000000..99a01209 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/remount.go @@ -0,0 +1,31 @@ +// +build linux + +package mount + +import "syscall" + +func RemountProc() error { + if err := syscall.Unmount("/proc", syscall.MNT_DETACH); err != nil { + return err + } + + if err := syscall.Mount("proc", "/proc", "proc", uintptr(defaultMountFlags), ""); err != nil { + return err + } + + return nil +} + +func RemountSys() error { + if err := syscall.Unmount("/sys", syscall.MNT_DETACH); err != nil { + if err != syscall.EINVAL { + return err + } + } else { + if err := syscall.Mount("sysfs", "/sys", "sysfs", uintptr(defaultMountFlags), ""); err != nil { + return err + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/types.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/types.go new file mode 100644 index 00000000..063bbac1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/types.go @@ -0,0 +1,49 @@ +package mount + +import ( + "errors" + + "github.com/docker/libcontainer/devices" +) + +type MountConfig struct { + // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs + // This is a common option when the container is running in ramdisk + NoPivotRoot bool `json:"no_pivot_root,omitempty"` + + // ReadonlyFs will remount the container's rootfs as readonly where only externally mounted + // bind mounts are writtable + ReadonlyFs bool `json:"readonly_fs,omitempty"` + + // Mounts specify additional source and destination paths that will be mounted inside the container's + // rootfs and mount namespace if specified + Mounts Mounts `json:"mounts,omitempty"` + + // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well! + DeviceNodes []*devices.Device `json:"device_nodes,omitempty"` + + MountLabel string `json:"mount_label,omitempty"` +} + +type Mount struct { + Type string `json:"type,omitempty"` + Source string `json:"source,omitempty"` // Source path, in the host namespace + Destination string `json:"destination,omitempty"` // Destination path, in the container + Writable bool `json:"writable,omitempty"` + Relabel string `json:"relabel,omitempty"` // Relabel source if set, "z" indicates shared, "Z" indicates unshared + Private bool `json:"private,omitempty"` +} + +type Mounts []Mount + +var ErrUnsupported = errors.New("Unsupported method") + +func (s Mounts) OfType(t string) Mounts { + out := Mounts{} + for _, m := range s { + if m.Type == t { + out = append(out, m) + } + } + return out +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/create.go b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/create.go new file mode 100644 index 00000000..15a844bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/create.go @@ -0,0 +1,10 @@ +package namespaces + +import ( + "os" + "os/exec" + + "github.com/docker/libcontainer" +) + +type CreateCommand func(container *libcontainer.Config, console, rootfs, dataPath, init string, childPipe *os.File, args []string) *exec.Cmd diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/exec.go b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/exec.go new file mode 100644 index 00000000..6f3838fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/exec.go @@ -0,0 +1,194 @@ +// +build linux + +package namespaces + +import ( + "io" + "os" + "os/exec" + "syscall" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/cgroups/fs" + "github.com/docker/libcontainer/cgroups/systemd" + "github.com/docker/libcontainer/network" + "github.com/docker/libcontainer/syncpipe" + "github.com/docker/libcontainer/system" +) + +// TODO(vishh): This is part of the libcontainer API and it does much more than just namespaces related work. +// Move this to libcontainer package. +// Exec performs setup outside of a namespace so that a container can be +// executed. Exec is a high level function for working with container namespaces. +func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Writer, console string, rootfs, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) { + var ( + err error + ) + + // create a pipe so that we can syncronize with the namespaced process and + // pass the veth name to the child + syncPipe, err := syncpipe.NewSyncPipe() + if err != nil { + return -1, err + } + defer syncPipe.Close() + + command := createCommand(container, console, rootfs, dataPath, os.Args[0], syncPipe.Child(), args) + // Note: these are only used in non-tty mode + // if there is a tty for the container it will be opened within the namespace and the + // fds will be duped to stdin, stdiout, and stderr + command.Stdin = stdin + command.Stdout = stdout + command.Stderr = stderr + + if err := command.Start(); err != nil { + return -1, err + } + + // Now we passed the pipe to the child, close our side + syncPipe.CloseChild() + + started, err := system.GetProcessStartTime(command.Process.Pid) + if err != nil { + return -1, err + } + + // Do this before syncing with child so that no children + // can escape the cgroup + cleaner, err := SetupCgroups(container, command.Process.Pid) + if err != nil { + command.Process.Kill() + command.Wait() + return -1, err + } + if cleaner != nil { + defer cleaner.Cleanup() + } + + var networkState network.NetworkState + if err := InitializeNetworking(container, command.Process.Pid, syncPipe, &networkState); err != nil { + command.Process.Kill() + command.Wait() + return -1, err + } + + state := &libcontainer.State{ + InitPid: command.Process.Pid, + InitStartTime: started, + NetworkState: networkState, + } + + if err := libcontainer.SaveState(dataPath, state); err != nil { + command.Process.Kill() + command.Wait() + return -1, err + } + defer libcontainer.DeleteState(dataPath) + + // Sync with child + if err := syncPipe.ReadFromChild(); err != nil { + command.Process.Kill() + command.Wait() + return -1, err + } + + if startCallback != nil { + startCallback() + } + + if err := command.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { + return -1, err + } + } + + return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil +} + +// DefaultCreateCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces +// defined on the container's configuration and use the current binary as the init with the +// args provided +// +// console: the /dev/console to setup inside the container +// init: the program executed inside the namespaces +// root: the path to the container json file and information +// pipe: sync pipe to synchronize the parent and child processes +// args: the arguments to pass to the container to run as the user's program +func DefaultCreateCommand(container *libcontainer.Config, console, rootfs, dataPath, init string, pipe *os.File, args []string) *exec.Cmd { + // get our binary name from arg0 so we can always reexec ourself + env := []string{ + "console=" + console, + "pipe=3", + "data_path=" + dataPath, + } + + /* + TODO: move user and wd into env + if user != "" { + env = append(env, "user="+user) + } + if workingDir != "" { + env = append(env, "wd="+workingDir) + } + */ + + command := exec.Command(init, append([]string{"init"}, args...)...) + // make sure the process is executed inside the context of the rootfs + command.Dir = rootfs + command.Env = append(os.Environ(), env...) + + if command.SysProcAttr == nil { + command.SysProcAttr = &syscall.SysProcAttr{} + } + command.SysProcAttr.Cloneflags = uintptr(GetNamespaceFlags(container.Namespaces)) + + command.SysProcAttr.Pdeathsig = syscall.SIGKILL + command.ExtraFiles = []*os.File{pipe} + + return command +} + +// SetupCgroups applies the cgroup restrictions to the process running in the container based +// on the container's configuration +func SetupCgroups(container *libcontainer.Config, nspid int) (cgroups.ActiveCgroup, error) { + if container.Cgroups != nil { + c := container.Cgroups + + if systemd.UseSystemd() { + return systemd.Apply(c, nspid) + } + + return fs.Apply(c, nspid) + } + + return nil, nil +} + +// InitializeNetworking creates the container's network stack outside of the namespace and moves +// interfaces into the container's net namespaces if necessary +func InitializeNetworking(container *libcontainer.Config, nspid int, pipe *syncpipe.SyncPipe, networkState *network.NetworkState) error { + for _, config := range container.Networks { + strategy, err := network.GetStrategy(config.Type) + if err != nil { + return err + } + if err := strategy.Create((*network.Network)(config), nspid, networkState); err != nil { + return err + } + } + return pipe.SendToChild(networkState) +} + +// GetNamespaceFlags parses the container's Namespaces options to set the correct +// flags on clone, unshare, and setns +func GetNamespaceFlags(namespaces map[string]bool) (flag int) { + for key, enabled := range namespaces { + if enabled { + if ns := GetNamespace(key); ns != nil { + flag |= ns.Value + } + } + } + return flag +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/execin.go b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/execin.go new file mode 100644 index 00000000..3a385cb7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/execin.go @@ -0,0 +1,118 @@ +// +build linux + +package namespaces + +import ( + "encoding/json" + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/label" + "github.com/docker/libcontainer/system" + "io" + "os" + "os/exec" + "strconv" + "syscall" +) + +// Runs the command under 'args' inside an existing container referred to by 'container'. +// Returns the exitcode of the command upon success and appropriate error on failure. +func RunIn(container *libcontainer.Config, state *libcontainer.State, args []string, nsinitPath string, stdin io.Reader, stdout, stderr io.Writer, console string, startCallback func(*exec.Cmd)) (int, error) { + initArgs, err := getNsEnterCommand(strconv.Itoa(state.InitPid), container, console, args) + if err != nil { + return -1, err + } + + cmd := exec.Command(nsinitPath, initArgs...) + // Note: these are only used in non-tty mode + // if there is a tty for the container it will be opened within the namespace and the + // fds will be duped to stdin, stdiout, and stderr + cmd.Stdin = stdin + cmd.Stdout = stdout + cmd.Stderr = stderr + + if err := cmd.Start(); err != nil { + return -1, err + } + if startCallback != nil { + startCallback(cmd) + } + + if err := cmd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { + return -1, err + } + } + + return cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil +} + +// ExecIn uses an existing pid and joins the pid's namespaces with the new command. +func ExecIn(container *libcontainer.Config, state *libcontainer.State, args []string) error { + // Enter the namespace and then finish setup + args, err := getNsEnterCommand(strconv.Itoa(state.InitPid), container, "", args) + if err != nil { + return err + } + + finalArgs := append([]string{os.Args[0]}, args...) + + if err := system.Execv(finalArgs[0], finalArgs[0:], os.Environ()); err != nil { + return err + } + + panic("unreachable") +} + +func getContainerJson(container *libcontainer.Config) (string, error) { + // TODO(vmarmol): If this gets too long, send it over a pipe to the child. + // Marshall the container into JSON since it won't be available in the namespace. + containerJson, err := json.Marshal(container) + if err != nil { + return "", err + } + return string(containerJson), nil +} + +func getNsEnterCommand(initPid string, container *libcontainer.Config, console string, args []string) ([]string, error) { + containerJson, err := getContainerJson(container) + if err != nil { + return nil, err + } + + out := []string{ + "nsenter", + "--nspid", initPid, + "--containerjson", containerJson, + } + + if console != "" { + out = append(out, "--console", console) + } + out = append(out, "--") + out = append(out, args...) + + return out, nil +} + +// Run a command in a container after entering the namespace. +func NsEnter(container *libcontainer.Config, args []string) error { + // clear the current processes env and replace it with the environment + // defined on the container + if err := LoadContainerEnvironment(container); err != nil { + return err + } + if err := FinalizeNamespace(container); err != nil { + return err + } + + if container.ProcessLabel != "" { + if err := label.SetProcessLabel(container.ProcessLabel); err != nil { + return err + } + } + + if err := system.Execv(args[0], args[0:], container.Env); err != nil { + return err + } + panic("unreachable") +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/init.go b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/init.go new file mode 100644 index 00000000..e43db396 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/init.go @@ -0,0 +1,257 @@ +// +build linux + +package namespaces + +import ( + "fmt" + "os" + "runtime" + "strings" + "syscall" + + "github.com/docker/docker/pkg/user" + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/apparmor" + "github.com/docker/libcontainer/console" + "github.com/docker/libcontainer/label" + "github.com/docker/libcontainer/mount" + "github.com/docker/libcontainer/netlink" + "github.com/docker/libcontainer/network" + "github.com/docker/libcontainer/security/capabilities" + "github.com/docker/libcontainer/security/restrict" + "github.com/docker/libcontainer/syncpipe" + "github.com/docker/libcontainer/system" + "github.com/docker/libcontainer/utils" +) + +// TODO(vishh): This is part of the libcontainer API and it does much more than just namespaces related work. +// Move this to libcontainer package. +// Init is the init process that first runs inside a new namespace to setup mounts, users, networking, +// and other options required for the new container. +func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syncPipe *syncpipe.SyncPipe, args []string) (err error) { + defer func() { + if err != nil { + syncPipe.ReportChildError(err) + } + }() + + rootfs, err := utils.ResolveRootfs(uncleanRootfs) + if err != nil { + return err + } + + // clear the current processes env and replace it with the environment + // defined on the container + if err := LoadContainerEnvironment(container); err != nil { + return err + } + + // We always read this as it is a way to sync with the parent as well + networkState, err := syncPipe.ReadFromParent() + if err != nil { + return err + } + + if consolePath != "" { + if err := console.OpenAndDup(consolePath); err != nil { + return err + } + } + if _, err := syscall.Setsid(); err != nil { + return fmt.Errorf("setsid %s", err) + } + if consolePath != "" { + if err := system.Setctty(); err != nil { + return fmt.Errorf("setctty %s", err) + } + } + if err := setupNetwork(container, networkState); err != nil { + return fmt.Errorf("setup networking %s", err) + } + if err := setupRoute(container); err != nil { + return fmt.Errorf("setup route %s", err) + } + + label.Init() + + if err := mount.InitializeMountNamespace(rootfs, + consolePath, + container.RestrictSys, + (*mount.MountConfig)(container.MountConfig)); err != nil { + return fmt.Errorf("setup mount namespace %s", err) + } + + if container.Hostname != "" { + if err := syscall.Sethostname([]byte(container.Hostname)); err != nil { + return fmt.Errorf("sethostname %s", err) + } + } + + runtime.LockOSThread() + + if err := apparmor.ApplyProfile(container.AppArmorProfile); err != nil { + return fmt.Errorf("set apparmor profile %s: %s", container.AppArmorProfile, err) + } + + if err := label.SetProcessLabel(container.ProcessLabel); err != nil { + return fmt.Errorf("set process label %s", err) + } + + // TODO: (crosbymichael) make this configurable at the Config level + if container.RestrictSys { + if err := restrict.Restrict("proc/sys", "proc/sysrq-trigger", "proc/irq", "proc/bus"); err != nil { + return err + } + } + + pdeathSignal, err := system.GetParentDeathSignal() + if err != nil { + return fmt.Errorf("get parent death signal %s", err) + } + + if err := FinalizeNamespace(container); err != nil { + return fmt.Errorf("finalize namespace %s", err) + } + + // FinalizeNamespace can change user/group which clears the parent death + // signal, so we restore it here. + if err := RestoreParentDeathSignal(pdeathSignal); err != nil { + return fmt.Errorf("restore parent death signal %s", err) + } + + return system.Execv(args[0], args[0:], container.Env) +} + +// RestoreParentDeathSignal sets the parent death signal to old. +func RestoreParentDeathSignal(old int) error { + if old == 0 { + return nil + } + + current, err := system.GetParentDeathSignal() + if err != nil { + return fmt.Errorf("get parent death signal %s", err) + } + + if old == current { + return nil + } + + if err := system.ParentDeathSignal(uintptr(old)); err != nil { + return fmt.Errorf("set parent death signal %s", err) + } + + // Signal self if parent is already dead. Does nothing if running in a new + // PID namespace, as Getppid will always return 0. + if syscall.Getppid() == 1 { + return syscall.Kill(syscall.Getpid(), syscall.SIGKILL) + } + + return nil +} + +// SetupUser changes the groups, gid, and uid for the user inside the container +func SetupUser(u string) error { + uid, gid, suppGids, err := user.GetUserGroupSupplementary(u, syscall.Getuid(), syscall.Getgid()) + if err != nil { + return fmt.Errorf("get supplementary groups %s", err) + } + + if err := syscall.Setgroups(suppGids); err != nil { + return fmt.Errorf("setgroups %s", err) + } + + if err := syscall.Setgid(gid); err != nil { + return fmt.Errorf("setgid %s", err) + } + + if err := syscall.Setuid(uid); err != nil { + return fmt.Errorf("setuid %s", err) + } + + return nil +} + +// setupVethNetwork uses the Network config if it is not nil to initialize +// the new veth interface inside the container for use by changing the name to eth0 +// setting the MTU and IP address along with the default gateway +func setupNetwork(container *libcontainer.Config, networkState *network.NetworkState) error { + for _, config := range container.Networks { + strategy, err := network.GetStrategy(config.Type) + if err != nil { + return err + } + + err1 := strategy.Initialize((*network.Network)(config), networkState) + if err1 != nil { + return err1 + } + } + return nil +} + +func setupRoute(container *libcontainer.Config) error { + for _, config := range container.Routes { + if err := netlink.AddRoute(config.Destination, config.Source, config.Gateway, config.InterfaceName); err != nil { + return err + } + } + return nil +} + +// FinalizeNamespace drops the caps, sets the correct user +// and working dir, and closes any leaky file descriptors +// before execing the command inside the namespace +func FinalizeNamespace(container *libcontainer.Config) error { + // Ensure that all non-standard fds we may have accidentally + // inherited are marked close-on-exec so they stay out of the + // container + if err := utils.CloseExecFrom(3); err != nil { + return fmt.Errorf("close open file descriptors %s", err) + } + + // drop capabilities in bounding set before changing user + if err := capabilities.DropBoundingSet(container.Capabilities); err != nil { + return fmt.Errorf("drop bounding set %s", err) + } + + // preserve existing capabilities while we change users + if err := system.SetKeepCaps(); err != nil { + return fmt.Errorf("set keep caps %s", err) + } + + if err := SetupUser(container.User); err != nil { + return fmt.Errorf("setup user %s", err) + } + + if err := system.ClearKeepCaps(); err != nil { + return fmt.Errorf("clear keep caps %s", err) + } + + // drop all other capabilities + if err := capabilities.DropCapabilities(container.Capabilities); err != nil { + return fmt.Errorf("drop capabilities %s", err) + } + + if container.WorkingDir != "" { + if err := syscall.Chdir(container.WorkingDir); err != nil { + return fmt.Errorf("chdir to %s %s", container.WorkingDir, err) + } + } + + return nil +} + +func LoadContainerEnvironment(container *libcontainer.Config) error { + os.Clearenv() + for _, pair := range container.Env { + p := strings.SplitN(pair, "=", 2) + if len(p) < 2 { + return fmt.Errorf("invalid environment '%v'", pair) + } + if err := os.Setenv(p[0], p[1]); err != nil { + return err + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter.go b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter.go new file mode 100644 index 00000000..cddfa442 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter.go @@ -0,0 +1,223 @@ +// +build linux + +package namespaces + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const kBufSize = 256; + +void get_args(int *argc, char ***argv) { + // Read argv + int fd = open("/proc/self/cmdline", O_RDONLY); + + // Read the whole commandline. + ssize_t contents_size = 0; + ssize_t contents_offset = 0; + char *contents = NULL; + ssize_t bytes_read = 0; + do { + contents_size += kBufSize; + contents = (char *) realloc(contents, contents_size); + bytes_read = read(fd, contents + contents_offset, contents_size - contents_offset); + contents_offset += bytes_read; + } while (bytes_read > 0); + close(fd); + + // Parse the commandline into an argv. /proc/self/cmdline has \0 delimited args. + ssize_t i; + *argc = 0; + for (i = 0; i < contents_offset; i++) { + if (contents[i] == '\0') { + (*argc)++; + } + } + *argv = (char **) malloc(sizeof(char *) * ((*argc) + 1)); + int idx; + for (idx = 0; idx < (*argc); idx++) { + (*argv)[idx] = contents; + contents += strlen(contents) + 1; + } + (*argv)[*argc] = NULL; +} + +// Use raw setns syscall for versions of glibc that don't include it (namely glibc-2.12) +#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 14 +#define _GNU_SOURCE +#include +#include "syscall.h" +#ifdef SYS_setns +int setns(int fd, int nstype) { + return syscall(SYS_setns, fd, nstype); +} +#endif +#endif + +void print_usage() { + fprintf(stderr, " nsenter --nspid --containerjson -- cmd1 arg1 arg2...\n"); +} + +void nsenter() { + int argc; + char **argv; + get_args(&argc, &argv); + + // Ignore if this is not for us. + if (argc < 2 || strcmp(argv[1], "nsenter") != 0) { + return; + } + + // USAGE: nsenter ... + if (argc < 6) { + fprintf(stderr, "nsenter: Incorrect usage, not enough arguments\n"); + exit(1); + } + + static const struct option longopts[] = { + { "nspid", required_argument, NULL, 'n' }, + { "containerjson", required_argument, NULL, 'c' }, + { "console", required_argument, NULL, 't' }, + { NULL, 0, NULL, 0 } + }; + + int c; + pid_t init_pid = -1; + char *init_pid_str = NULL; + char *container_json = NULL; + char *console = NULL; + while ((c = getopt_long_only(argc, argv, "n:s:c:", longopts, NULL)) != -1) { + switch (c) { + case 'n': + init_pid_str = optarg; + break; + case 'c': + container_json = optarg; + break; + case 't': + console = optarg; + break; + } + } + + if (container_json == NULL || init_pid_str == NULL) { + print_usage(); + exit(1); + } + + init_pid = strtol(init_pid_str, NULL, 10); + if (errno != 0 || init_pid <= 0) { + fprintf(stderr, "nsenter: Failed to parse PID from \"%s\" with error: \"%s\"\n", init_pid_str, strerror(errno)); + print_usage(); + exit(1); + } + + argc -= 3; + argv += 3; + + if (setsid() == -1) { + fprintf(stderr, "setsid failed. Error: %s\n", strerror(errno)); + exit(1); + } + + // before we setns we need to dup the console + int consolefd = -1; + if (console != NULL) { + consolefd = open(console, O_RDWR); + if (consolefd < 0) { + fprintf(stderr, "nsenter: failed to open console %s %s\n", console, strerror(errno)); + exit(1); + } + } + + // Setns on all supported namespaces. + char ns_dir[PATH_MAX]; + memset(ns_dir, 0, PATH_MAX); + snprintf(ns_dir, PATH_MAX - 1, "/proc/%d/ns/", init_pid); + struct dirent *dent; + DIR *dir = opendir(ns_dir); + if (dir == NULL) { + fprintf(stderr, "nsenter: Failed to open directory \"%s\" with error: \"%s\"\n", ns_dir, strerror(errno)); + exit(1); + } + + while((dent = readdir(dir)) != NULL) { + if(strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0 || strcmp(dent->d_name, "user") == 0) { + continue; + } + + // Get and open the namespace for the init we are joining.. + char buf[PATH_MAX]; + memset(buf, 0, PATH_MAX); + snprintf(buf, PATH_MAX - 1, "%s%s", ns_dir, dent->d_name); + int fd = open(buf, O_RDONLY); + if (fd == -1) { + fprintf(stderr, "nsenter: Failed to open ns file \"%s\" for ns \"%s\" with error: \"%s\"\n", buf, dent->d_name, strerror(errno)); + exit(1); + } + + // Set the namespace. + if (setns(fd, 0) == -1) { + fprintf(stderr, "nsenter: Failed to setns for \"%s\" with error: \"%s\"\n", dent->d_name, strerror(errno)); + exit(1); + } + close(fd); + } + closedir(dir); + + // We must fork to actually enter the PID namespace. + int child = fork(); + if (child == 0) { + if (consolefd != -1) { + if (dup2(consolefd, STDIN_FILENO) != 0) { + fprintf(stderr, "nsenter: failed to dup 0 %s\n", strerror(errno)); + exit(1); + } + if (dup2(consolefd, STDOUT_FILENO) != STDOUT_FILENO) { + fprintf(stderr, "nsenter: failed to dup 1 %s\n", strerror(errno)); + exit(1); + } + if (dup2(consolefd, STDERR_FILENO) != STDERR_FILENO) { + fprintf(stderr, "nsenter: failed to dup 2 %s\n", strerror(errno)); + exit(1); + } +} + + // Finish executing, let the Go runtime take over. + return; + } else { + // Parent, wait for the child. + int status = 0; + if (waitpid(child, &status, 0) == -1) { + fprintf(stderr, "nsenter: Failed to waitpid with error: \"%s\"\n", strerror(errno)); + exit(1); + } + + // Forward the child's exit code or re-send its death signal. + if (WIFEXITED(status)) { + exit(WEXITSTATUS(status)); + } else if (WIFSIGNALED(status)) { + kill(getpid(), WTERMSIG(status)); + } + exit(1); + } + + return; +} + +__attribute__((constructor)) init() { + nsenter(); +} +*/ +import "C" diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types.go b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types.go new file mode 100644 index 00000000..16ce981e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types.go @@ -0,0 +1,50 @@ +package namespaces + +import "errors" + +type ( + Namespace struct { + Key string `json:"key,omitempty"` + Value int `json:"value,omitempty"` + File string `json:"file,omitempty"` + } + Namespaces []*Namespace +) + +// namespaceList is used to convert the libcontainer types +// into the names of the files located in /proc//ns/* for +// each namespace +var ( + namespaceList = Namespaces{} + ErrUnkownNamespace = errors.New("Unknown namespace") + ErrUnsupported = errors.New("Unsupported method") +) + +func (ns *Namespace) String() string { + return ns.Key +} + +func GetNamespace(key string) *Namespace { + for _, ns := range namespaceList { + if ns.Key == key { + cpy := *ns + return &cpy + } + } + return nil +} + +// Contains returns true if the specified Namespace is +// in the slice +func (n Namespaces) Contains(ns string) bool { + return n.Get(ns) != nil +} + +func (n Namespaces) Get(ns string) *Namespace { + for _, nsp := range n { + if nsp != nil && nsp.Key == ns { + return nsp + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types_linux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types_linux.go new file mode 100644 index 00000000..d3079944 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types_linux.go @@ -0,0 +1,16 @@ +package namespaces + +import ( + "syscall" +) + +func init() { + namespaceList = Namespaces{ + {Key: "NEWNS", Value: syscall.CLONE_NEWNS, File: "mnt"}, + {Key: "NEWUTS", Value: syscall.CLONE_NEWUTS, File: "uts"}, + {Key: "NEWIPC", Value: syscall.CLONE_NEWIPC, File: "ipc"}, + {Key: "NEWUSER", Value: syscall.CLONE_NEWUSER, File: "user"}, + {Key: "NEWPID", Value: syscall.CLONE_NEWPID, File: "pid"}, + {Key: "NEWNET", Value: syscall.CLONE_NEWNET, File: "net"}, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types_test.go new file mode 100644 index 00000000..4d0a72c9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/types_test.go @@ -0,0 +1,30 @@ +package namespaces + +import ( + "testing" +) + +func TestNamespacesContains(t *testing.T) { + ns := Namespaces{ + GetNamespace("NEWPID"), + GetNamespace("NEWNS"), + GetNamespace("NEWUTS"), + } + + if ns.Contains("NEWNET") { + t.Fatal("namespaces should not contain NEWNET") + } + + if !ns.Contains("NEWPID") { + t.Fatal("namespaces should contain NEWPID but does not") + } + + withNil := Namespaces{ + GetNamespace("UNDEFINED"), // this element will be nil + GetNamespace("NEWPID"), + } + + if !withNil.Contains("NEWPID") { + t.Fatal("namespaces should contain NEWPID but does not") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/MAINTAINERS new file mode 100644 index 00000000..1cb55136 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Guillaume J. Charmes (@creack) diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink.go b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink.go new file mode 100644 index 00000000..5cc75625 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink.go @@ -0,0 +1,23 @@ +// Packet netlink provide access to low level Netlink sockets and messages. +// +// Actual implementations are in: +// netlink_linux.go +// netlink_darwin.go +package netlink + +import ( + "errors" + "net" +) + +var ( + ErrWrongSockType = errors.New("Wrong socket type") + ErrShortResponse = errors.New("Got short response from netlink") +) + +// A Route is a subnet associated with the interface to reach it. +type Route struct { + *net.IPNet + Iface *net.Interface + Default bool +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux.go new file mode 100644 index 00000000..6b43fdf0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux.go @@ -0,0 +1,1002 @@ +package netlink + +import ( + "encoding/binary" + "fmt" + "net" + "sync/atomic" + "syscall" + "unsafe" +) + +const ( + IFNAMSIZ = 16 + DEFAULT_CHANGE = 0xFFFFFFFF + IFLA_INFO_KIND = 1 + IFLA_INFO_DATA = 2 + VETH_INFO_PEER = 1 + IFLA_NET_NS_FD = 28 + SIOC_BRADDBR = 0x89a0 + SIOC_BRDELBR = 0x89a1 + SIOC_BRADDIF = 0x89a2 +) + +var nextSeqNr uint32 + +type ifreqHwaddr struct { + IfrnName [IFNAMSIZ]byte + IfruHwaddr syscall.RawSockaddr +} + +type ifreqIndex struct { + IfrnName [IFNAMSIZ]byte + IfruIndex int32 +} + +type ifreqFlags struct { + IfrnName [IFNAMSIZ]byte + Ifruflags uint16 +} + +func nativeEndian() binary.ByteOrder { + var x uint32 = 0x01020304 + if *(*byte)(unsafe.Pointer(&x)) == 0x01 { + return binary.BigEndian + } + return binary.LittleEndian +} + +func getIpFamily(ip net.IP) int { + if len(ip) <= net.IPv4len { + return syscall.AF_INET + } + if ip.To4() != nil { + return syscall.AF_INET + } + return syscall.AF_INET6 +} + +type NetlinkRequestData interface { + Len() int + ToWireFormat() []byte +} + +type IfInfomsg struct { + syscall.IfInfomsg +} + +func newIfInfomsg(family int) *IfInfomsg { + return &IfInfomsg{ + IfInfomsg: syscall.IfInfomsg{ + Family: uint8(family), + }, + } +} + +func newIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { + msg := newIfInfomsg(family) + parent.children = append(parent.children, msg) + return msg +} + +func (msg *IfInfomsg) ToWireFormat() []byte { + native := nativeEndian() + + length := syscall.SizeofIfInfomsg + b := make([]byte, length) + b[0] = msg.Family + b[1] = 0 + native.PutUint16(b[2:4], msg.Type) + native.PutUint32(b[4:8], uint32(msg.Index)) + native.PutUint32(b[8:12], msg.Flags) + native.PutUint32(b[12:16], msg.Change) + return b +} + +func (msg *IfInfomsg) Len() int { + return syscall.SizeofIfInfomsg +} + +type IfAddrmsg struct { + syscall.IfAddrmsg +} + +func newIfAddrmsg(family int) *IfAddrmsg { + return &IfAddrmsg{ + IfAddrmsg: syscall.IfAddrmsg{ + Family: uint8(family), + }, + } +} + +func (msg *IfAddrmsg) ToWireFormat() []byte { + native := nativeEndian() + + length := syscall.SizeofIfAddrmsg + b := make([]byte, length) + b[0] = msg.Family + b[1] = msg.Prefixlen + b[2] = msg.Flags + b[3] = msg.Scope + native.PutUint32(b[4:8], msg.Index) + return b +} + +func (msg *IfAddrmsg) Len() int { + return syscall.SizeofIfAddrmsg +} + +type RtMsg struct { + syscall.RtMsg +} + +func newRtMsg() *RtMsg { + return &RtMsg{ + RtMsg: syscall.RtMsg{ + Table: syscall.RT_TABLE_MAIN, + Scope: syscall.RT_SCOPE_UNIVERSE, + Protocol: syscall.RTPROT_BOOT, + Type: syscall.RTN_UNICAST, + }, + } +} + +func (msg *RtMsg) ToWireFormat() []byte { + native := nativeEndian() + + length := syscall.SizeofRtMsg + b := make([]byte, length) + b[0] = msg.Family + b[1] = msg.Dst_len + b[2] = msg.Src_len + b[3] = msg.Tos + b[4] = msg.Table + b[5] = msg.Protocol + b[6] = msg.Scope + b[7] = msg.Type + native.PutUint32(b[8:12], msg.Flags) + return b +} + +func (msg *RtMsg) Len() int { + return syscall.SizeofRtMsg +} + +func rtaAlignOf(attrlen int) int { + return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1) +} + +type RtAttr struct { + syscall.RtAttr + Data []byte + children []NetlinkRequestData +} + +func newRtAttr(attrType int, data []byte) *RtAttr { + return &RtAttr{ + RtAttr: syscall.RtAttr{ + Type: uint16(attrType), + }, + children: []NetlinkRequestData{}, + Data: data, + } +} + +func newRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr { + attr := newRtAttr(attrType, data) + parent.children = append(parent.children, attr) + return attr +} + +func (a *RtAttr) Len() int { + l := 0 + for _, child := range a.children { + l += child.Len() + syscall.SizeofRtAttr + } + if l == 0 { + l++ + } + return rtaAlignOf(l + len(a.Data)) +} + +func (a *RtAttr) ToWireFormat() []byte { + native := nativeEndian() + + length := a.Len() + buf := make([]byte, rtaAlignOf(length+syscall.SizeofRtAttr)) + + if a.Data != nil { + copy(buf[4:], a.Data) + } else { + next := 4 + for _, child := range a.children { + childBuf := child.ToWireFormat() + copy(buf[next:], childBuf) + next += rtaAlignOf(len(childBuf)) + } + } + + if l := uint16(rtaAlignOf(length)); l != 0 { + native.PutUint16(buf[0:2], l+1) + } + native.PutUint16(buf[2:4], a.Type) + + return buf +} + +type NetlinkRequest struct { + syscall.NlMsghdr + Data []NetlinkRequestData +} + +func (rr *NetlinkRequest) ToWireFormat() []byte { + native := nativeEndian() + + length := rr.Len + dataBytes := make([][]byte, len(rr.Data)) + for i, data := range rr.Data { + dataBytes[i] = data.ToWireFormat() + length += uint32(len(dataBytes[i])) + } + b := make([]byte, length) + native.PutUint32(b[0:4], length) + native.PutUint16(b[4:6], rr.Type) + native.PutUint16(b[6:8], rr.Flags) + native.PutUint32(b[8:12], rr.Seq) + native.PutUint32(b[12:16], rr.Pid) + + next := 16 + for _, data := range dataBytes { + copy(b[next:], data) + next += len(data) + } + return b +} + +func (rr *NetlinkRequest) AddData(data NetlinkRequestData) { + if data != nil { + rr.Data = append(rr.Data, data) + } +} + +func newNetlinkRequest(proto, flags int) *NetlinkRequest { + return &NetlinkRequest{ + NlMsghdr: syscall.NlMsghdr{ + Len: uint32(syscall.NLMSG_HDRLEN), + Type: uint16(proto), + Flags: syscall.NLM_F_REQUEST | uint16(flags), + Seq: atomic.AddUint32(&nextSeqNr, 1), + }, + } +} + +type NetlinkSocket struct { + fd int + lsa syscall.SockaddrNetlink +} + +func getNetlinkSocket() (*NetlinkSocket, error) { + fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, syscall.NETLINK_ROUTE) + if err != nil { + return nil, err + } + s := &NetlinkSocket{ + fd: fd, + } + s.lsa.Family = syscall.AF_NETLINK + if err := syscall.Bind(fd, &s.lsa); err != nil { + syscall.Close(fd) + return nil, err + } + + return s, nil +} + +func (s *NetlinkSocket) Close() { + syscall.Close(s.fd) +} + +func (s *NetlinkSocket) Send(request *NetlinkRequest) error { + if err := syscall.Sendto(s.fd, request.ToWireFormat(), 0, &s.lsa); err != nil { + return err + } + return nil +} + +func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { + rb := make([]byte, syscall.Getpagesize()) + nr, _, err := syscall.Recvfrom(s.fd, rb, 0) + if err != nil { + return nil, err + } + if nr < syscall.NLMSG_HDRLEN { + return nil, ErrShortResponse + } + rb = rb[:nr] + return syscall.ParseNetlinkMessage(rb) +} + +func (s *NetlinkSocket) GetPid() (uint32, error) { + lsa, err := syscall.Getsockname(s.fd) + if err != nil { + return 0, err + } + switch v := lsa.(type) { + case *syscall.SockaddrNetlink: + return v.Pid, nil + } + return 0, ErrWrongSockType +} + +func (s *NetlinkSocket) HandleAck(seq uint32) error { + native := nativeEndian() + + pid, err := s.GetPid() + if err != nil { + return err + } + +done: + for { + msgs, err := s.Receive() + if err != nil { + return err + } + for _, m := range msgs { + if m.Header.Seq != seq { + return fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, seq) + } + if m.Header.Pid != pid { + return fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid) + } + if m.Header.Type == syscall.NLMSG_DONE { + break done + } + if m.Header.Type == syscall.NLMSG_ERROR { + error := int32(native.Uint32(m.Data[0:4])) + if error == 0 { + break done + } + return syscall.Errno(-error) + } + } + } + + return nil +} + +// Add a new route table entry. +func AddRoute(destination, source, gateway, device string) error { + if destination == "" && source == "" && gateway == "" { + return fmt.Errorf("one of destination, source or gateway must not be blank") + } + + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + msg := newRtMsg() + currentFamily := -1 + var rtAttrs []*RtAttr + + if destination != "" { + destIP, destNet, err := net.ParseCIDR(destination) + if err != nil { + return fmt.Errorf("destination CIDR %s couldn't be parsed", destination) + } + destFamily := getIpFamily(destIP) + currentFamily = destFamily + destLen, bits := destNet.Mask.Size() + if destLen == 0 && bits == 0 { + return fmt.Errorf("destination CIDR %s generated a non-canonical Mask", destination) + } + msg.Family = uint8(destFamily) + msg.Dst_len = uint8(destLen) + var destData []byte + if destFamily == syscall.AF_INET { + destData = destIP.To4() + } else { + destData = destIP.To16() + } + rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_DST, destData)) + } + + if source != "" { + srcIP, srcNet, err := net.ParseCIDR(source) + if err != nil { + return fmt.Errorf("source CIDR %s couldn't be parsed", source) + } + srcFamily := getIpFamily(srcIP) + if currentFamily != -1 && currentFamily != srcFamily { + return fmt.Errorf("source and destination ip were not the same IP family") + } + currentFamily = srcFamily + srcLen, bits := srcNet.Mask.Size() + if srcLen == 0 && bits == 0 { + return fmt.Errorf("source CIDR %s generated a non-canonical Mask", source) + } + msg.Family = uint8(srcFamily) + msg.Src_len = uint8(srcLen) + var srcData []byte + if srcFamily == syscall.AF_INET { + srcData = srcIP.To4() + } else { + srcData = srcIP.To16() + } + rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_SRC, srcData)) + } + + if gateway != "" { + gwIP := net.ParseIP(gateway) + if gwIP == nil { + return fmt.Errorf("gateway IP %s couldn't be parsed", gateway) + } + gwFamily := getIpFamily(gwIP) + if currentFamily != -1 && currentFamily != gwFamily { + return fmt.Errorf("gateway, source, and destination ip were not the same IP family") + } + msg.Family = uint8(gwFamily) + var gwData []byte + if gwFamily == syscall.AF_INET { + gwData = gwIP.To4() + } else { + gwData = gwIP.To16() + } + rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_GATEWAY, gwData)) + } + + wb.AddData(msg) + for _, attr := range rtAttrs { + wb.AddData(attr) + } + + var ( + native = nativeEndian() + b = make([]byte, 4) + ) + iface, err := net.InterfaceByName(device) + if err != nil { + return err + } + native.PutUint32(b, uint32(iface.Index)) + + wb.AddData(newRtAttr(syscall.RTA_OIF, b)) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} + +// Add a new default gateway. Identical to: +// ip route add default via $ip +func AddDefaultGw(ip, device string) error { + return AddRoute("", "", ip, device) +} + +// Bring up a particular network interface +func NetworkLinkUp(iface *net.Interface) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Change = syscall.IFF_UP + msg.Flags = syscall.IFF_UP + msg.Index = int32(iface.Index) + wb.AddData(msg) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +func NetworkLinkDown(iface *net.Interface) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Change = syscall.IFF_UP + msg.Flags = 0 & ^syscall.IFF_UP + msg.Index = int32(iface.Index) + wb.AddData(msg) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +func NetworkSetMTU(iface *net.Interface, mtu int) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Type = syscall.RTM_SETLINK + msg.Flags = syscall.NLM_F_REQUEST + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + + var ( + b = make([]byte, 4) + native = nativeEndian() + ) + native.PutUint32(b, uint32(mtu)) + + data := newRtAttr(syscall.IFLA_MTU, b) + wb.AddData(data) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} + +// same as ip link set $name master $master +func NetworkSetMaster(iface, master *net.Interface) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Type = syscall.RTM_SETLINK + msg.Flags = syscall.NLM_F_REQUEST + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + + var ( + b = make([]byte, 4) + native = nativeEndian() + ) + native.PutUint32(b, uint32(master.Index)) + + data := newRtAttr(syscall.IFLA_MASTER, b) + wb.AddData(data) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +func NetworkSetNsPid(iface *net.Interface, nspid int) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Type = syscall.RTM_SETLINK + msg.Flags = syscall.NLM_F_REQUEST + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + + var ( + b = make([]byte, 4) + native = nativeEndian() + ) + native.PutUint32(b, uint32(nspid)) + + data := newRtAttr(syscall.IFLA_NET_NS_PID, b) + wb.AddData(data) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +func NetworkSetNsFd(iface *net.Interface, fd int) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Type = syscall.RTM_SETLINK + msg.Flags = syscall.NLM_F_REQUEST + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + + var ( + b = make([]byte, 4) + native = nativeEndian() + ) + native.PutUint32(b, uint32(fd)) + + data := newRtAttr(IFLA_NET_NS_FD, b) + wb.AddData(data) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Add an Ip address to an interface. This is identical to: +// ip addr add $ip/$ipNet dev $iface +func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + family := getIpFamily(ip) + + wb := newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + msg := newIfAddrmsg(family) + msg.Index = uint32(iface.Index) + prefixLen, _ := ipNet.Mask.Size() + msg.Prefixlen = uint8(prefixLen) + wb.AddData(msg) + + var ipData []byte + if family == syscall.AF_INET { + ipData = ip.To4() + } else { + ipData = ip.To16() + } + + localData := newRtAttr(syscall.IFA_LOCAL, ipData) + wb.AddData(localData) + + addrData := newRtAttr(syscall.IFA_ADDRESS, ipData) + wb.AddData(addrData) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +func zeroTerminated(s string) []byte { + return []byte(s + "\000") +} + +func nonZeroTerminated(s string) []byte { + return []byte(s) +} + +// Add a new network link of a specified type. This is identical to +// running: ip add link $name type $linkType +func NetworkLinkAdd(name string, linkType string) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + wb.AddData(msg) + + if name != "" { + nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name)) + wb.AddData(nameData) + } + + kindData := newRtAttr(IFLA_INFO_KIND, nonZeroTerminated(linkType)) + + infoData := newRtAttr(syscall.IFLA_LINKINFO, kindData.ToWireFormat()) + wb.AddData(infoData) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Returns an array of IPNet for all the currently routed subnets on ipv4 +// This is similar to the first column of "ip route" output +func NetworkGetRoutes() ([]Route, error) { + native := nativeEndian() + + s, err := getNetlinkSocket() + if err != nil { + return nil, err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + wb.AddData(msg) + + if err := s.Send(wb); err != nil { + return nil, err + } + + pid, err := s.GetPid() + if err != nil { + return nil, err + } + + res := make([]Route, 0) + +done: + for { + msgs, err := s.Receive() + if err != nil { + return nil, err + } + for _, m := range msgs { + if m.Header.Seq != wb.Seq { + return nil, fmt.Errorf("Wrong Seq nr %d, expected 1", m.Header.Seq) + } + if m.Header.Pid != pid { + return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid) + } + if m.Header.Type == syscall.NLMSG_DONE { + break done + } + if m.Header.Type == syscall.NLMSG_ERROR { + error := int32(native.Uint32(m.Data[0:4])) + if error == 0 { + break done + } + return nil, syscall.Errno(-error) + } + if m.Header.Type != syscall.RTM_NEWROUTE { + continue + } + + var r Route + + msg := (*RtMsg)(unsafe.Pointer(&m.Data[0:syscall.SizeofRtMsg][0])) + + if msg.Flags&syscall.RTM_F_CLONED != 0 { + // Ignore cloned routes + continue + } + + if msg.Table != syscall.RT_TABLE_MAIN { + // Ignore non-main tables + continue + } + + if msg.Family != syscall.AF_INET { + // Ignore non-ipv4 routes + continue + } + + if msg.Dst_len == 0 { + // Default routes + r.Default = true + } + + attrs, err := syscall.ParseNetlinkRouteAttr(&m) + if err != nil { + return nil, err + } + for _, attr := range attrs { + switch attr.Attr.Type { + case syscall.RTA_DST: + ip := attr.Value + r.IPNet = &net.IPNet{ + IP: ip, + Mask: net.CIDRMask(int(msg.Dst_len), 8*len(ip)), + } + case syscall.RTA_OIF: + index := int(native.Uint32(attr.Value[0:4])) + r.Iface, _ = net.InterfaceByIndex(index) + } + } + if r.Default || r.IPNet != nil { + res = append(res, r) + } + } + } + + return res, nil +} + +func getIfSocket() (fd int, err error) { + for _, socket := range []int{ + syscall.AF_INET, + syscall.AF_PACKET, + syscall.AF_INET6, + } { + if fd, err = syscall.Socket(socket, syscall.SOCK_DGRAM, 0); err == nil { + break + } + } + if err == nil { + return fd, nil + } + return -1, err +} + +func NetworkChangeName(iface *net.Interface, newName string) error { + if len(newName) >= IFNAMSIZ { + return fmt.Errorf("Interface name %s too long", newName) + } + + fd, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(fd) + + data := [IFNAMSIZ * 2]byte{} + // the "-1"s here are very important for ensuring we get proper null + // termination of our new C strings + copy(data[:IFNAMSIZ-1], iface.Name) + copy(data[IFNAMSIZ:IFNAMSIZ*2-1], newName) + + if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.SIOCSIFNAME, uintptr(unsafe.Pointer(&data[0]))); errno != 0 { + return errno + } + return nil +} + +func NetworkCreateVethPair(name1, name2 string) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + wb.AddData(msg) + + nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name1)) + wb.AddData(nameData) + + nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) + newRtAttrChild(nest1, IFLA_INFO_KIND, zeroTerminated("veth")) + nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) + nest3 := newRtAttrChild(nest2, VETH_INFO_PEER, nil) + + newIfInfomsgChild(nest3, syscall.AF_UNSPEC) + newRtAttrChild(nest3, syscall.IFLA_IFNAME, zeroTerminated(name2)) + + wb.AddData(nest1) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} + +// Create the actual bridge device. This is more backward-compatible than +// netlink.NetworkLinkAdd and works on RHEL 6. +func CreateBridge(name string, setMacAddr bool) error { + if len(name) >= IFNAMSIZ { + return fmt.Errorf("Interface name %s too long", name) + } + + s, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(s) + + nameBytePtr, err := syscall.BytePtrFromString(name) + if err != nil { + return err + } + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), SIOC_BRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { + return err + } + if setMacAddr { + return setBridgeMacAddress(s, name) + } + return nil +} + +// Delete the actual bridge device. +func DeleteBridge(name string) error { + s, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(s) + + nameBytePtr, err := syscall.BytePtrFromString(name) + if err != nil { + return err + } + + var ifr ifreqFlags + copy(ifr.IfrnName[:len(ifr.IfrnName)-1], []byte(name)) + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), + syscall.SIOCSIFFLAGS, uintptr(unsafe.Pointer(&ifr))); err != 0 { + return err + } + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), + SIOC_BRDELBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { + return err + } + return nil +} + +// Add a slave to abridge device. This is more backward-compatible than +// netlink.NetworkSetMaster and works on RHEL 6. +func AddToBridge(iface, master *net.Interface) error { + if len(master.Name) >= IFNAMSIZ { + return fmt.Errorf("Interface name %s too long", master.Name) + } + + s, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(s) + + ifr := ifreqIndex{} + copy(ifr.IfrnName[:len(ifr.IfrnName)-1], master.Name) + ifr.IfruIndex = int32(iface.Index) + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), SIOC_BRADDIF, uintptr(unsafe.Pointer(&ifr))); err != 0 { + return err + } + + return nil +} + +func setBridgeMacAddress(s int, name string) error { + if len(name) >= IFNAMSIZ { + return fmt.Errorf("Interface name %s too long", name) + } + + ifr := ifreqHwaddr{} + ifr.IfruHwaddr.Family = syscall.ARPHRD_ETHER + copy(ifr.IfrnName[:len(ifr.IfrnName)-1], name) + + for i := 0; i < 6; i++ { + ifr.IfruHwaddr.Data[i] = randIfrDataByte() + } + + ifr.IfruHwaddr.Data[0] &^= 0x1 // clear multicast bit + ifr.IfruHwaddr.Data[0] |= 0x2 // set local assignment bit (IEEE802) + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), syscall.SIOCSIFHWADDR, uintptr(unsafe.Pointer(&ifr))); err != 0 { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_arm.go b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_arm.go new file mode 100644 index 00000000..7789ae27 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_arm.go @@ -0,0 +1,9 @@ +package netlink + +import ( + "math/rand" +) + +func randIfrDataByte() uint8 { + return uint8(rand.Intn(255)) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go new file mode 100644 index 00000000..23c4a927 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go @@ -0,0 +1,11 @@ +// +build !arm + +package netlink + +import ( + "math/rand" +) + +func randIfrDataByte() int8 { + return int8(rand.Intn(255)) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go new file mode 100644 index 00000000..ee61d5e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go @@ -0,0 +1,55 @@ +package netlink + +import ( + "net" + "testing" +) + +func TestCreateBridgeWithMac(t *testing.T) { + if testing.Short() { + return + } + + name := "testbridge" + + if err := CreateBridge(name, true); err != nil { + t.Fatal(err) + } + + if _, err := net.InterfaceByName(name); err != nil { + t.Fatal(err) + } + + // cleanup and tests + + if err := DeleteBridge(name); err != nil { + t.Fatal(err) + } + + if _, err := net.InterfaceByName(name); err == nil { + t.Fatal("expected error getting interface because bridge was deleted") + } +} + +func TestCreateVethPair(t *testing.T) { + if testing.Short() { + return + } + + var ( + name1 = "veth1" + name2 = "veth2" + ) + + if err := NetworkCreateVethPair(name1, name2); err != nil { + t.Fatal(err) + } + + if _, err := net.InterfaceByName(name1); err != nil { + t.Fatal(err) + } + + if _, err := net.InterfaceByName(name2); err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go new file mode 100644 index 00000000..f428933c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go @@ -0,0 +1,76 @@ +// +build !linux + +package netlink + +import ( + "errors" + "net" +) + +var ( + ErrNotImplemented = errors.New("not implemented") +) + +func NetworkGetRoutes() ([]Route, error) { + return nil, ErrNotImplemented +} + +func NetworkLinkAdd(name string, linkType string) error { + return ErrNotImplemented +} + +func NetworkLinkUp(iface *net.Interface) error { + return ErrNotImplemented +} + +func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { + return ErrNotImplemented +} + +func AddRoute(destination, source, gateway, device string) error { + return ErrNotImplemented +} + +func AddDefaultGw(ip, device string) error { + return ErrNotImplemented +} + +func NetworkSetMTU(iface *net.Interface, mtu int) error { + return ErrNotImplemented +} + +func NetworkCreateVethPair(name1, name2 string) error { + return ErrNotImplemented +} + +func NetworkChangeName(iface *net.Interface, newName string) error { + return ErrNotImplemented +} + +func NetworkSetNsFd(iface *net.Interface, fd int) error { + return ErrNotImplemented +} + +func NetworkSetNsPid(iface *net.Interface, nspid int) error { + return ErrNotImplemented +} + +func NetworkSetMaster(iface, master *net.Interface) error { + return ErrNotImplemented +} + +func NetworkLinkDown(iface *net.Interface) error { + return ErrNotImplemented +} + +func CreateBridge(name string, setMacAddr bool) error { + return ErrNotImplemented +} + +func DeleteBridge(name string) error { + return ErrNotImplemented +} + +func AddToBridge(iface, master *net.Interface) error { + return ErrNotImplemented +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/network/loopback.go b/Godeps/_workspace/src/github.com/docker/libcontainer/network/loopback.go new file mode 100644 index 00000000..1667b4d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/network/loopback.go @@ -0,0 +1,23 @@ +// +build linux + +package network + +import ( + "fmt" +) + +// Loopback is a network strategy that provides a basic loopback device +type Loopback struct { +} + +func (l *Loopback) Create(n *Network, nspid int, networkState *NetworkState) error { + return nil +} + +func (l *Loopback) Initialize(config *Network, networkState *NetworkState) error { + // Do not set the MTU on the loopback interface - use the default. + if err := InterfaceUp("lo"); err != nil { + return fmt.Errorf("lo up %s", err) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/network/netns.go b/Godeps/_workspace/src/github.com/docker/libcontainer/network/netns.go new file mode 100644 index 00000000..1ff75064 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/network/netns.go @@ -0,0 +1,37 @@ +// +build linux + +package network + +import ( + "fmt" + "os" + "syscall" + + "github.com/docker/libcontainer/system" +) + +// crosbymichael: could make a network strategy that instead of returning veth pair names it returns a pid to an existing network namespace +type NetNS struct { +} + +func (v *NetNS) Create(n *Network, nspid int, networkState *NetworkState) error { + networkState.NsPath = n.NsPath + return nil +} + +func (v *NetNS) Initialize(config *Network, networkState *NetworkState) error { + if networkState.NsPath == "" { + return fmt.Errorf("nspath does is not specified in NetworkState") + } + + f, err := os.OpenFile(networkState.NsPath, os.O_RDONLY, 0) + if err != nil { + return fmt.Errorf("failed get network namespace fd: %v", err) + } + + if err := system.Setns(f.Fd(), syscall.CLONE_NEWNET); err != nil { + return fmt.Errorf("failed to setns current network namespace: %v", err) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/network/network.go b/Godeps/_workspace/src/github.com/docker/libcontainer/network/network.go new file mode 100644 index 00000000..48eeec60 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/network/network.go @@ -0,0 +1,81 @@ +// +build linux + +package network + +import ( + "net" + + "github.com/docker/libcontainer/netlink" +) + +func InterfaceUp(name string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkLinkUp(iface) +} + +func InterfaceDown(name string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkLinkDown(iface) +} + +func ChangeInterfaceName(old, newName string) error { + iface, err := net.InterfaceByName(old) + if err != nil { + return err + } + return netlink.NetworkChangeName(iface, newName) +} + +func CreateVethPair(name1, name2 string) error { + return netlink.NetworkCreateVethPair(name1, name2) +} + +func SetInterfaceInNamespacePid(name string, nsPid int) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkSetNsPid(iface, nsPid) +} + +func SetInterfaceMaster(name, master string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + masterIface, err := net.InterfaceByName(master) + if err != nil { + return err + } + return netlink.AddToBridge(iface, masterIface) +} + +func SetDefaultGateway(ip, ifaceName string) error { + return netlink.AddDefaultGw(ip, ifaceName) +} + +func SetInterfaceIp(name string, rawIp string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + ip, ipNet, err := net.ParseCIDR(rawIp) + if err != nil { + return err + } + return netlink.NetworkLinkAddIp(iface, ip, ipNet) +} + +func SetMtu(name string, mtu int) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkSetMTU(iface, mtu) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/network/stats.go b/Godeps/_workspace/src/github.com/docker/libcontainer/network/stats.go new file mode 100644 index 00000000..c8ece5c7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/network/stats.go @@ -0,0 +1,69 @@ +package network + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" +) + +type NetworkStats struct { + RxBytes uint64 `json:"rx_bytes"` + RxPackets uint64 `json:"rx_packets"` + RxErrors uint64 `json:"rx_errors"` + RxDropped uint64 `json:"rx_dropped"` + TxBytes uint64 `json:"tx_bytes"` + TxPackets uint64 `json:"tx_packets"` + TxErrors uint64 `json:"tx_errors"` + TxDropped uint64 `json:"tx_dropped"` +} + +// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo. +func GetStats(networkState *NetworkState) (*NetworkStats, error) { + // This can happen if the network runtime information is missing - possible if the container was created by an old version of libcontainer. + if networkState.VethHost == "" { + return &NetworkStats{}, nil + } + data, err := readSysfsNetworkStats(networkState.VethHost) + if err != nil { + return nil, err + } + + // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container. + return &NetworkStats{ + RxBytes: data["tx_bytes"], + RxPackets: data["tx_packets"], + RxErrors: data["tx_errors"], + RxDropped: data["tx_dropped"], + TxBytes: data["rx_bytes"], + TxPackets: data["rx_packets"], + TxErrors: data["rx_errors"], + TxDropped: data["rx_dropped"], + }, nil +} + +// Reads all the statistics available under /sys/class/net//statistics as a map with file name as key and data as integers. +func readSysfsNetworkStats(ethInterface string) (map[string]uint64, error) { + out := make(map[string]uint64) + + fullPath := filepath.Join("/sys/class/net", ethInterface, "statistics/") + err := filepath.Walk(fullPath, func(path string, _ os.FileInfo, _ error) error { + // skip fullPath. + if path == fullPath { + return nil + } + base := filepath.Base(path) + data, err := ioutil.ReadFile(path) + if err != nil { + return err + } + value, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) + if err != nil { + return err + } + out[base] = value + return nil + }) + return out, err +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/network/strategy.go b/Godeps/_workspace/src/github.com/docker/libcontainer/network/strategy.go new file mode 100644 index 00000000..be5ec93b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/network/strategy.go @@ -0,0 +1,35 @@ +// +build linux + +package network + +import ( + "errors" +) + +var ( + ErrNotValidStrategyType = errors.New("not a valid network strategy type") +) + +var strategies = map[string]NetworkStrategy{ + "veth": &Veth{}, + "loopback": &Loopback{}, + "netns": &NetNS{}, +} + +// NetworkStrategy represents a specific network configuration for +// a container's networking stack +type NetworkStrategy interface { + Create(*Network, int, *NetworkState) error + Initialize(*Network, *NetworkState) error +} + +// GetStrategy returns the specific network strategy for the +// provided type. If no strategy is registered for the type an +// ErrNotValidStrategyType is returned. +func GetStrategy(tpe string) (NetworkStrategy, error) { + s, exists := strategies[tpe] + if !exists { + return nil, ErrNotValidStrategyType + } + return s, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/network/types.go b/Godeps/_workspace/src/github.com/docker/libcontainer/network/types.go new file mode 100644 index 00000000..3b7a4e39 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/network/types.go @@ -0,0 +1,41 @@ +package network + +// Network defines configuration for a container's networking stack +// +// The network configuration can be omited from a container causing the +// container to be setup with the host's networking stack +type Network struct { + // Type sets the networks type, commonly veth and loopback + Type string `json:"type,omitempty"` + + // Path to network namespace + NsPath string `json:"ns_path,omitempty"` + + // The bridge to use. + Bridge string `json:"bridge,omitempty"` + + // Prefix for the veth interfaces. + VethPrefix string `json:"veth_prefix,omitempty"` + + // Address contains the IP and mask to set on the network interface + Address string `json:"address,omitempty"` + + // Gateway sets the gateway address that is used as the default for the interface + Gateway string `json:"gateway,omitempty"` + + // Mtu sets the mtu value for the interface and will be mirrored on both the host and + // container's interfaces if a pair is created, specifically in the case of type veth + // Note: This does not apply to loopback interfaces. + Mtu int `json:"mtu,omitempty"` +} + +// Struct describing the network specific runtime state that will be maintained by libcontainer for all running containers +// Do not depend on it outside of libcontainer. +type NetworkState struct { + // The name of the veth interface on the Host. + VethHost string `json:"veth_host,omitempty"` + // The name of the veth interface created inside the container for the child. + VethChild string `json:"veth_child,omitempty"` + // Net namespace path. + NsPath string `json:"ns_path,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/network/veth.go b/Godeps/_workspace/src/github.com/docker/libcontainer/network/veth.go new file mode 100644 index 00000000..fcafd85c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/network/veth.go @@ -0,0 +1,95 @@ +// +build linux + +package network + +import ( + "fmt" + + "github.com/docker/libcontainer/utils" +) + +// Veth is a network strategy that uses a bridge and creates +// a veth pair, one that stays outside on the host and the other +// is placed inside the container's namespace +type Veth struct { +} + +const defaultDevice = "eth0" + +func (v *Veth) Create(n *Network, nspid int, networkState *NetworkState) error { + var ( + bridge = n.Bridge + prefix = n.VethPrefix + ) + if bridge == "" { + return fmt.Errorf("bridge is not specified") + } + if prefix == "" { + return fmt.Errorf("veth prefix is not specified") + } + name1, name2, err := createVethPair(prefix) + if err != nil { + return err + } + if err := SetInterfaceMaster(name1, bridge); err != nil { + return err + } + if err := SetMtu(name1, n.Mtu); err != nil { + return err + } + if err := InterfaceUp(name1); err != nil { + return err + } + if err := SetInterfaceInNamespacePid(name2, nspid); err != nil { + return err + } + networkState.VethHost = name1 + networkState.VethChild = name2 + + return nil +} + +func (v *Veth) Initialize(config *Network, networkState *NetworkState) error { + var vethChild = networkState.VethChild + if vethChild == "" { + return fmt.Errorf("vethChild is not specified") + } + if err := InterfaceDown(vethChild); err != nil { + return fmt.Errorf("interface down %s %s", vethChild, err) + } + if err := ChangeInterfaceName(vethChild, defaultDevice); err != nil { + return fmt.Errorf("change %s to %s %s", vethChild, defaultDevice, err) + } + if err := SetInterfaceIp(defaultDevice, config.Address); err != nil { + return fmt.Errorf("set %s ip %s", defaultDevice, err) + } + if err := SetMtu(defaultDevice, config.Mtu); err != nil { + return fmt.Errorf("set %s mtu to %d %s", defaultDevice, config.Mtu, err) + } + if err := InterfaceUp(defaultDevice); err != nil { + return fmt.Errorf("%s up %s", defaultDevice, err) + } + if config.Gateway != "" { + if err := SetDefaultGateway(config.Gateway, defaultDevice); err != nil { + return fmt.Errorf("set gateway to %s on device %s failed with %s", config.Gateway, defaultDevice, err) + } + } + return nil +} + +// createVethPair will automatically generage two random names for +// the veth pair and ensure that they have been created +func createVethPair(prefix string) (name1 string, name2 string, err error) { + name1, err = utils.GenerateRandomName(prefix, 4) + if err != nil { + return + } + name2, err = utils.GenerateRandomName(prefix, 4) + if err != nil { + return + } + if err = CreateVethPair(name1, name2); err != nil { + return + } + return +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/cli.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/cli.go new file mode 100644 index 00000000..d4235aef --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/cli.go @@ -0,0 +1,42 @@ +package nsinit + +import ( + "log" + "os" + + "github.com/codegangsta/cli" +) + +var logPath = os.Getenv("log") + +func preload(context *cli.Context) error { + if logPath != "" { + if err := openLog(logPath); err != nil { + return err + } + } + + return nil +} + +func NsInit() { + app := cli.NewApp() + app.Name = "nsinit" + app.Version = "0.1" + app.Author = "libcontainer maintainers" + + app.Before = preload + app.Commands = []cli.Command{ + execCommand, + initCommand, + statsCommand, + configCommand, + nsenterCommand, + pauseCommand, + unpauseCommand, + } + + if err := app.Run(os.Args); err != nil { + log.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/config.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/config.go new file mode 100644 index 00000000..5beb04ac --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/config.go @@ -0,0 +1,29 @@ +package nsinit + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/codegangsta/cli" +) + +var configCommand = cli.Command{ + Name: "config", + Usage: "display the container configuration", + Action: configAction, +} + +func configAction(context *cli.Context) { + container, err := loadContainer() + if err != nil { + log.Fatal(err) + } + + data, err := json.MarshalIndent(container, "", "\t") + if err != nil { + log.Fatal(err) + } + + fmt.Printf("%s", data) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/exec.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/exec.go new file mode 100644 index 00000000..5d410466 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/exec.go @@ -0,0 +1,186 @@ +package nsinit + +import ( + "fmt" + "io" + "log" + "os" + "os/exec" + "os/signal" + "syscall" + + "github.com/codegangsta/cli" + "github.com/docker/docker/pkg/term" + "github.com/docker/libcontainer" + consolepkg "github.com/docker/libcontainer/console" + "github.com/docker/libcontainer/namespaces" +) + +var execCommand = cli.Command{ + Name: "exec", + Usage: "execute a new command inside a container", + Action: execAction, +} + +func execAction(context *cli.Context) { + var exitCode int + + container, err := loadContainer() + if err != nil { + log.Fatal(err) + } + + state, err := libcontainer.GetState(dataPath) + if err != nil && !os.IsNotExist(err) { + log.Fatalf("unable to read state.json: %s", err) + } + + if state != nil { + exitCode, err = runIn(container, state, []string(context.Args())) + } else { + exitCode, err = startContainer(container, dataPath, []string(context.Args())) + } + + if err != nil { + log.Fatalf("failed to exec: %s", err) + } + + os.Exit(exitCode) +} + +func runIn(container *libcontainer.Config, state *libcontainer.State, args []string) (int, error) { + var ( + master *os.File + console string + err error + + stdin = os.Stdin + stdout = os.Stdout + stderr = os.Stderr + sigc = make(chan os.Signal, 10) + ) + + signal.Notify(sigc) + + if container.Tty { + stdin = nil + stdout = nil + stderr = nil + + master, console, err = consolepkg.CreateMasterAndConsole() + if err != nil { + log.Fatal(err) + } + + go io.Copy(master, os.Stdin) + go io.Copy(os.Stdout, master) + + state, err := term.SetRawTerminal(os.Stdin.Fd()) + if err != nil { + log.Fatal(err) + } + + defer term.RestoreTerminal(os.Stdin.Fd(), state) + } + + startCallback := func(cmd *exec.Cmd) { + go func() { + resizeTty(master) + + for sig := range sigc { + switch sig { + case syscall.SIGWINCH: + resizeTty(master) + default: + cmd.Process.Signal(sig) + } + } + }() + } + + return namespaces.RunIn(container, state, args, os.Args[0], stdin, stdout, stderr, console, startCallback) +} + +// startContainer starts the container. Returns the exit status or -1 and an +// error. +// +// Signals sent to the current process will be forwarded to container. +func startContainer(container *libcontainer.Config, dataPath string, args []string) (int, error) { + var ( + cmd *exec.Cmd + sigc = make(chan os.Signal, 10) + ) + + signal.Notify(sigc) + + createCommand := func(container *libcontainer.Config, console, rootfs, dataPath, init string, pipe *os.File, args []string) *exec.Cmd { + cmd = namespaces.DefaultCreateCommand(container, console, rootfs, dataPath, init, pipe, args) + if logPath != "" { + cmd.Env = append(cmd.Env, fmt.Sprintf("log=%s", logPath)) + } + return cmd + } + + var ( + master *os.File + console string + err error + + stdin = os.Stdin + stdout = os.Stdout + stderr = os.Stderr + ) + + if container.Tty { + stdin = nil + stdout = nil + stderr = nil + + master, console, err = consolepkg.CreateMasterAndConsole() + if err != nil { + return -1, err + } + + go io.Copy(master, os.Stdin) + go io.Copy(os.Stdout, master) + + state, err := term.SetRawTerminal(os.Stdin.Fd()) + if err != nil { + return -1, err + } + + defer term.RestoreTerminal(os.Stdin.Fd(), state) + } + + startCallback := func() { + go func() { + resizeTty(master) + + for sig := range sigc { + switch sig { + case syscall.SIGWINCH: + resizeTty(master) + default: + cmd.Process.Signal(sig) + } + } + }() + } + + return namespaces.Exec(container, stdin, stdout, stderr, console, "", dataPath, args, createCommand, startCallback) +} + +func resizeTty(master *os.File) { + if master == nil { + return + } + + ws, err := term.GetWinsize(os.Stdin.Fd()) + if err != nil { + return + } + + if err := term.SetWinsize(master.Fd(), ws); err != nil { + return + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/init.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/init.go new file mode 100644 index 00000000..0dd96411 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/init.go @@ -0,0 +1,49 @@ +package nsinit + +import ( + "log" + "os" + "strconv" + + "github.com/codegangsta/cli" + "github.com/docker/libcontainer/namespaces" + "github.com/docker/libcontainer/syncpipe" +) + +var ( + dataPath = os.Getenv("data_path") + console = os.Getenv("console") + rawPipeFd = os.Getenv("pipe") + + initCommand = cli.Command{ + Name: "init", + Usage: "runs the init process inside the namespace", + Action: initAction, + } +) + +func initAction(context *cli.Context) { + container, err := loadContainer() + if err != nil { + log.Fatal(err) + } + + rootfs, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + + pipeFd, err := strconv.Atoi(rawPipeFd) + if err != nil { + log.Fatal(err) + } + + syncPipe, err := syncpipe.NewSyncPipeFromFd(0, uintptr(pipeFd)) + if err != nil { + log.Fatalf("unable to create sync pipe: %s", err) + } + + if err := namespaces.Init(container, rootfs, console, syncPipe, []string(context.Args())); err != nil { + log.Fatalf("unable to initialize for container: %s", err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/nsenter.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/nsenter.go new file mode 100644 index 00000000..11d646eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/nsenter.go @@ -0,0 +1,41 @@ +package nsinit + +import ( + "log" + + "github.com/codegangsta/cli" + "github.com/docker/libcontainer/namespaces" +) + +var nsenterCommand = cli.Command{ + Name: "nsenter", + Usage: "init process for entering an existing namespace", + Action: nsenterAction, + Flags: []cli.Flag{ + cli.IntFlag{Name: "nspid"}, + cli.StringFlag{Name: "containerjson"}, + cli.StringFlag{Name: "console"}, + }, +} + +func nsenterAction(context *cli.Context) { + args := context.Args() + + if len(args) == 0 { + args = []string{"/bin/bash"} + } + + container, err := loadContainerFromJson(context.String("containerjson")) + if err != nil { + log.Fatalf("unable to load container: %s", err) + } + + nspid := context.Int("nspid") + if nspid <= 0 { + log.Fatalf("cannot enter into namespaces without valid pid: %q", nspid) + } + + if err := namespaces.NsEnter(container, args); err != nil { + log.Fatalf("failed to nsenter: %s", err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/nsinit/nsinit.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/nsinit/nsinit.go new file mode 100644 index 00000000..816c4da5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/nsinit/nsinit.go @@ -0,0 +1,7 @@ +package main + +import "github.com/docker/libcontainer/nsinit" + +func main() { + nsinit.NsInit() +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/pause.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/pause.go new file mode 100644 index 00000000..492a0e85 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/pause.go @@ -0,0 +1,49 @@ +package nsinit + +import ( + "log" + + "github.com/codegangsta/cli" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/cgroups/fs" + "github.com/docker/libcontainer/cgroups/systemd" +) + +var pauseCommand = cli.Command{ + Name: "pause", + Usage: "pause the container's processes", + Action: pauseAction, +} + +var unpauseCommand = cli.Command{ + Name: "unpause", + Usage: "unpause the container's processes", + Action: unpauseAction, +} + +func pauseAction(context *cli.Context) { + if err := toggle(cgroups.Frozen); err != nil { + log.Fatal(err) + } +} + +func unpauseAction(context *cli.Context) { + if err := toggle(cgroups.Thawed); err != nil { + log.Fatal(err) + } +} + +func toggle(state cgroups.FreezerState) error { + container, err := loadContainer() + if err != nil { + return err + } + + if systemd.UseSystemd() { + err = systemd.Freeze(container.Cgroups, state) + } else { + err = fs.Freeze(container.Cgroups, state) + } + + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/stats.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/stats.go new file mode 100644 index 00000000..3e59305b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/stats.go @@ -0,0 +1,39 @@ +package nsinit + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/codegangsta/cli" + "github.com/docker/libcontainer" +) + +var statsCommand = cli.Command{ + Name: "stats", + Usage: "display statistics for the container", + Action: statsAction, +} + +func statsAction(context *cli.Context) { + container, err := loadContainer() + if err != nil { + log.Fatal(err) + } + + state, err := libcontainer.GetState(dataPath) + if err != nil { + log.Fatal(err) + } + + stats, err := libcontainer.GetStats(container, state) + if err != nil { + log.Fatal(err) + } + data, err := json.MarshalIndent(stats, "", "\t") + if err != nil { + log.Fatal(err) + } + + fmt.Printf("%s", data) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/utils.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/utils.go new file mode 100644 index 00000000..8525ba9a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/utils.go @@ -0,0 +1,46 @@ +package nsinit + +import ( + "encoding/json" + "log" + "os" + "path/filepath" + + "github.com/docker/libcontainer" +) + +func loadContainer() (*libcontainer.Config, error) { + f, err := os.Open(filepath.Join(dataPath, "container.json")) + if err != nil { + return nil, err + } + defer f.Close() + + var container *libcontainer.Config + if err := json.NewDecoder(f).Decode(&container); err != nil { + return nil, err + } + + return container, nil +} + +func openLog(name string) error { + f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755) + if err != nil { + return err + } + + log.SetOutput(f) + + return nil +} + +func loadContainerFromJson(rawData string) (*libcontainer.Config, error) { + var container *libcontainer.Config + + if err := json.Unmarshal([]byte(rawData), &container); err != nil { + return nil, err + } + + return container, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/process.go b/Godeps/_workspace/src/github.com/docker/libcontainer/process.go new file mode 100644 index 00000000..489666a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/process.go @@ -0,0 +1,27 @@ +package libcontainer + +import "io" + +// Configuration for a process to be run inside a container. +type ProcessConfig struct { + // The command to be run followed by any arguments. + Args []string + + // Map of environment variables to their values. + Env []string + + // Stdin is a pointer to a reader which provides the standard input stream. + // Stdout is a pointer to a writer which receives the standard output stream. + // Stderr is a pointer to a writer which receives the standard error stream. + // + // If a reader or writer is nil, the input stream is assumed to be empty and the output is + // discarded. + // + // The readers and writers, if supplied, are closed when the process terminates. Their Close + // methods should be idempotent. + // + // Stdout and Stderr may refer to the same writer in which case the output is interspersed. + Stdin io.ReadCloser + Stdout io.WriteCloser + Stderr io.WriteCloser +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/README.md b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/README.md new file mode 100644 index 00000000..4ccc6cde --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/README.md @@ -0,0 +1,5 @@ +These configuration files can be used with `nsinit` to quickly develop, test, +and experiment with features of libcontainer. + +When consuming these configuration files, copy them into your rootfs and rename +the file to `container.json` for use with `nsinit`. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/apparmor.json b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/apparmor.json new file mode 100644 index 00000000..f739df10 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/apparmor.json @@ -0,0 +1,196 @@ +{ + "capabilities": [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL" + ], + "cgroups": { + "allowed_devices": [ + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 98 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 1, + "path": "/dev/console", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "path": "/dev/tty0", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "minor_number": 1, + "path": "/dev/tty1", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 136, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 2, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 10, + "minor_number": 200, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "name": "docker-koye", + "parent": "docker" + }, + "restrict_sys": true, + "apparmor_profile": "docker-default", + "mount_config": { + "device_nodes": [ + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ] + }, + "environment": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=koye", + "TERM=xterm" + ], + "hostname": "koye", + "namespaces": { + "NEWIPC": true, + "NEWNET": true, + "NEWNS": true, + "NEWPID": true, + "NEWUTS": true + }, + "networks": [ + { + "address": "127.0.0.1/0", + "gateway": "localhost", + "mtu": 1500, + "type": "loopback" + } + ], + "tty": true, + "user": "daemon" +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/attach_to_bridge.json b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/attach_to_bridge.json new file mode 100644 index 00000000..0795e6c1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/attach_to_bridge.json @@ -0,0 +1,202 @@ +{ + "capabilities": [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL" + ], + "cgroups": { + "allowed_devices": [ + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 98 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 1, + "path": "/dev/console", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "path": "/dev/tty0", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "minor_number": 1, + "path": "/dev/tty1", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 136, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 2, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 10, + "minor_number": 200, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "name": "docker-koye", + "parent": "docker" + }, + "restrict_sys": true, + "mount_config": { + "device_nodes": [ + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ] + }, + "environment": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=koye", + "TERM=xterm" + ], + "hostname": "koye", + "namespaces": { + "NEWIPC": true, + "NEWNET": true, + "NEWNS": true, + "NEWPID": true, + "NEWUTS": true + }, + "networks": [ + { + "address": "127.0.0.1/0", + "gateway": "localhost", + "mtu": 1500, + "type": "loopback" + }, + { + "address": "172.17.0.101/16", + "bridge": "docker0", + "veth_prefix": "veth", + "gateway": "172.17.42.1", + "mtu": 1500, + "type": "veth" + } + ], + "tty": true +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/minimal.json b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/minimal.json new file mode 100644 index 00000000..c08c9967 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/minimal.json @@ -0,0 +1,195 @@ +{ + "capabilities": [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL" + ], + "cgroups": { + "allowed_devices": [ + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 98 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 1, + "path": "/dev/console", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "path": "/dev/tty0", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "minor_number": 1, + "path": "/dev/tty1", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 136, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 2, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 10, + "minor_number": 200, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "name": "docker-koye", + "parent": "docker" + }, + "restrict_sys": true, + "mount_config": { + "device_nodes": [ + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ] + }, + "environment": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=koye", + "TERM=xterm" + ], + "hostname": "koye", + "namespaces": { + "NEWIPC": true, + "NEWNET": true, + "NEWNS": true, + "NEWPID": true, + "NEWUTS": true + }, + "networks": [ + { + "address": "127.0.0.1/0", + "gateway": "localhost", + "mtu": 1500, + "type": "loopback" + } + ], + "tty": true, + "user": "daemon" +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/selinux.json b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/selinux.json new file mode 100644 index 00000000..ce383e2c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/selinux.json @@ -0,0 +1,197 @@ +{ + "capabilities": [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL" + ], + "cgroups": { + "allowed_devices": [ + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 98 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 1, + "path": "/dev/console", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "path": "/dev/tty0", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "minor_number": 1, + "path": "/dev/tty1", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 136, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 2, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 10, + "minor_number": 200, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "name": "docker-koye", + "parent": "docker" + }, + "restrict_sys": true, + "process_label": "system_u:system_r:svirt_lxc_net_t:s0:c164,c475", + "mount_config": { + "mount_label": "system_u:system_r:svirt_lxc_net_t:s0:c164,c475", + "device_nodes": [ + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ] + }, + "environment": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=koye", + "TERM=xterm" + ], + "hostname": "koye", + "namespaces": { + "NEWIPC": true, + "NEWNET": true, + "NEWNS": true, + "NEWPID": true, + "NEWUTS": true + }, + "networks": [ + { + "address": "127.0.0.1/0", + "gateway": "localhost", + "mtu": 1500, + "type": "loopback" + } + ], + "tty": true, + "user": "daemon" +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/capabilities.go b/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/capabilities.go new file mode 100644 index 00000000..7aef5fa6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/capabilities.go @@ -0,0 +1,56 @@ +package capabilities + +import ( + "os" + + "github.com/syndtr/gocapability/capability" +) + +const allCapabilityTypes = capability.CAPS | capability.BOUNDS + +// DropBoundingSet drops the capability bounding set to those specified in the +// container configuration. +func DropBoundingSet(capabilities []string) error { + c, err := capability.NewPid(os.Getpid()) + if err != nil { + return err + } + + keep := getEnabledCapabilities(capabilities) + c.Clear(capability.BOUNDS) + c.Set(capability.BOUNDS, keep...) + + if err := c.Apply(capability.BOUNDS); err != nil { + return err + } + + return nil +} + +// DropCapabilities drops all capabilities for the current process except those specified in the container configuration. +func DropCapabilities(capList []string) error { + c, err := capability.NewPid(os.Getpid()) + if err != nil { + return err + } + + keep := getEnabledCapabilities(capList) + c.Clear(allCapabilityTypes) + c.Set(allCapabilityTypes, keep...) + + if err := c.Apply(allCapabilityTypes); err != nil { + return err + } + return nil +} + +// getEnabledCapabilities returns the capabilities that should not be dropped by the container. +func getEnabledCapabilities(capList []string) []capability.Cap { + keep := []capability.Cap{} + for _, capability := range capList { + if c := GetCapability(capability); c != nil { + keep = append(keep, c.Value) + } + } + return keep +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/types.go b/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/types.go new file mode 100644 index 00000000..a960b804 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/types.go @@ -0,0 +1,88 @@ +package capabilities + +import "github.com/syndtr/gocapability/capability" + +type ( + CapabilityMapping struct { + Key string `json:"key,omitempty"` + Value capability.Cap `json:"value,omitempty"` + } + Capabilities []*CapabilityMapping +) + +func (c *CapabilityMapping) String() string { + return c.Key +} + +func GetCapability(key string) *CapabilityMapping { + for _, capp := range capabilityList { + if capp.Key == key { + cpy := *capp + return &cpy + } + } + return nil +} + +func GetAllCapabilities() []string { + output := make([]string, len(capabilityList)) + for i, capability := range capabilityList { + output[i] = capability.String() + } + return output +} + +// Contains returns true if the specified Capability is +// in the slice +func (c Capabilities) contains(capp string) bool { + return c.get(capp) != nil +} + +func (c Capabilities) get(capp string) *CapabilityMapping { + for _, cap := range c { + if cap.Key == capp { + return cap + } + } + return nil +} + +var capabilityList = Capabilities{ + {Key: "SETPCAP", Value: capability.CAP_SETPCAP}, + {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE}, + {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO}, + {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT}, + {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN}, + {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE}, + {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE}, + {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME}, + {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG}, + {Key: "MKNOD", Value: capability.CAP_MKNOD}, + {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE}, + {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL}, + {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE}, + {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN}, + {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN}, + {Key: "SYSLOG", Value: capability.CAP_SYSLOG}, + {Key: "CHOWN", Value: capability.CAP_CHOWN}, + {Key: "NET_RAW", Value: capability.CAP_NET_RAW}, + {Key: "DAC_OVERRIDE", Value: capability.CAP_DAC_OVERRIDE}, + {Key: "FOWNER", Value: capability.CAP_FOWNER}, + {Key: "DAC_READ_SEARCH", Value: capability.CAP_DAC_READ_SEARCH}, + {Key: "FSETID", Value: capability.CAP_FSETID}, + {Key: "KILL", Value: capability.CAP_KILL}, + {Key: "SETGID", Value: capability.CAP_SETGID}, + {Key: "SETUID", Value: capability.CAP_SETUID}, + {Key: "LINUX_IMMUTABLE", Value: capability.CAP_LINUX_IMMUTABLE}, + {Key: "NET_BIND_SERVICE", Value: capability.CAP_NET_BIND_SERVICE}, + {Key: "NET_BROADCAST", Value: capability.CAP_NET_BROADCAST}, + {Key: "IPC_LOCK", Value: capability.CAP_IPC_LOCK}, + {Key: "IPC_OWNER", Value: capability.CAP_IPC_OWNER}, + {Key: "SYS_CHROOT", Value: capability.CAP_SYS_CHROOT}, + {Key: "SYS_PTRACE", Value: capability.CAP_SYS_PTRACE}, + {Key: "SYS_BOOT", Value: capability.CAP_SYS_BOOT}, + {Key: "LEASE", Value: capability.CAP_LEASE}, + {Key: "SETFCAP", Value: capability.CAP_SETFCAP}, + {Key: "WAKE_ALARM", Value: capability.CAP_WAKE_ALARM}, + {Key: "BLOCK_SUSPEND", Value: capability.CAP_BLOCK_SUSPEND}, +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/types_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/types_test.go new file mode 100644 index 00000000..06e8a2b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/types_test.go @@ -0,0 +1,19 @@ +package capabilities + +import ( + "testing" +) + +func TestCapabilitiesContains(t *testing.T) { + caps := Capabilities{ + GetCapability("MKNOD"), + GetCapability("SETPCAP"), + } + + if caps.contains("SYS_ADMIN") { + t.Fatal("capabilities should not contain SYS_ADMIN") + } + if !caps.contains("MKNOD") { + t.Fatal("capabilities should contain MKNOD but does not") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/security/restrict/restrict.go b/Godeps/_workspace/src/github.com/docker/libcontainer/security/restrict/restrict.go new file mode 100644 index 00000000..dd765b1f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/security/restrict/restrict.go @@ -0,0 +1,53 @@ +// +build linux + +package restrict + +import ( + "fmt" + "os" + "syscall" + "time" +) + +const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV + +func mountReadonly(path string) error { + for i := 0; i < 5; i++ { + if err := syscall.Mount("", path, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil && !os.IsNotExist(err) { + switch err { + case syscall.EINVAL: + // Probably not a mountpoint, use bind-mount + if err := syscall.Mount(path, path, "", syscall.MS_BIND, ""); err != nil { + return err + } + + return syscall.Mount(path, path, "", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC|defaultMountFlags, "") + case syscall.EBUSY: + time.Sleep(100 * time.Millisecond) + continue + default: + return err + } + } + + return nil + } + + return fmt.Errorf("unable to mount %s as readonly max retries reached", path) +} + +// This has to be called while the container still has CAP_SYS_ADMIN (to be able to perform mounts). +// However, afterwards, CAP_SYS_ADMIN should be dropped (otherwise the user will be able to revert those changes). +func Restrict(mounts ...string) error { + for _, dest := range mounts { + if err := mountReadonly(dest); err != nil { + return fmt.Errorf("unable to remount %s readonly: %s", dest, err) + } + } + + if err := syscall.Mount("/dev/null", "/proc/kcore", "", syscall.MS_BIND, ""); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to bind-mount /dev/null over /proc/kcore: %s", err) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/security/restrict/unsupported.go b/Godeps/_workspace/src/github.com/docker/libcontainer/security/restrict/unsupported.go new file mode 100644 index 00000000..464e8d49 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/security/restrict/unsupported.go @@ -0,0 +1,9 @@ +// +build !linux + +package restrict + +import "fmt" + +func Restrict() error { + return fmt.Errorf("not supported") +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/selinux/selinux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/selinux/selinux.go new file mode 100644 index 00000000..bfa79578 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/selinux/selinux.go @@ -0,0 +1,442 @@ +// +build linux + +package selinux + +import ( + "bufio" + "crypto/rand" + "encoding/binary" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "syscall" + + "github.com/docker/docker/pkg/mount" + "github.com/docker/libcontainer/system" +) + +const ( + Enforcing = 1 + Permissive = 0 + Disabled = -1 + selinuxDir = "/etc/selinux/" + selinuxConfig = selinuxDir + "config" + selinuxTypeTag = "SELINUXTYPE" + selinuxTag = "SELINUX" + selinuxPath = "/sys/fs/selinux" + xattrNameSelinux = "security.selinux" + stRdOnly = 0x01 +) + +var ( + assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) + spaceRegex = regexp.MustCompile(`^([^=]+) (.*)$`) + mcsList = make(map[string]bool) + selinuxfs = "unknown" + selinuxEnabled = false + selinuxEnabledChecked = false +) + +type SELinuxContext map[string]string + +// SetDisabled disables selinux support for the package +func SetDisabled() { + selinuxEnabled, selinuxEnabledChecked = false, true +} + +func getSelinuxMountPoint() string { + if selinuxfs != "unknown" { + return selinuxfs + } + selinuxfs = "" + + mounts, err := mount.GetMounts() + if err != nil { + return selinuxfs + } + for _, mount := range mounts { + if mount.Fstype == "selinuxfs" { + selinuxfs = mount.Mountpoint + break + } + } + if selinuxfs != "" { + var buf syscall.Statfs_t + syscall.Statfs(selinuxfs, &buf) + if (buf.Flags & stRdOnly) == 1 { + selinuxfs = "" + } + } + return selinuxfs +} + +func SelinuxEnabled() bool { + if selinuxEnabledChecked { + return selinuxEnabled + } + selinuxEnabledChecked = true + if fs := getSelinuxMountPoint(); fs != "" { + if con, _ := Getcon(); con != "kernel" { + selinuxEnabled = true + } + } + return selinuxEnabled +} + +func readConfig(target string) (value string) { + var ( + val, key string + bufin *bufio.Reader + ) + + in, err := os.Open(selinuxConfig) + if err != nil { + return "" + } + defer in.Close() + + bufin = bufio.NewReader(in) + + for done := false; !done; { + var line string + if line, err = bufin.ReadString('\n'); err != nil { + if err != io.EOF { + return "" + } + done = true + } + line = strings.TrimSpace(line) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + if groups := assignRegex.FindStringSubmatch(line); groups != nil { + key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) + if key == target { + return strings.Trim(val, "\"") + } + } + } + return "" +} + +func getSELinuxPolicyRoot() string { + return selinuxDir + readConfig(selinuxTypeTag) +} + +func readCon(name string) (string, error) { + var val string + + in, err := os.Open(name) + if err != nil { + return "", err + } + defer in.Close() + + _, err = fmt.Fscanf(in, "%s", &val) + return val, err +} + +func Setfilecon(path string, scon string) error { + return system.Lsetxattr(path, xattrNameSelinux, []byte(scon), 0) +} + +// Return the SELinux label for this path +func Getfilecon(path string) (string, error) { + con, err := system.Lgetxattr(path, xattrNameSelinux) + return string(con), err +} + +func Setfscreatecon(scon string) error { + return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()), scon) +} + +func Getfscreatecon() (string, error) { + return readCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid())) +} + +// Return the SELinux label of the current process thread. +func Getcon() (string, error) { + return readCon(fmt.Sprintf("/proc/self/task/%d/attr/current", syscall.Gettid())) +} + +func Getpidcon(pid int) (string, error) { + return readCon(fmt.Sprintf("/proc/%d/attr/current", pid)) +} + +func Getexeccon() (string, error) { + return readCon("/proc/self/attr/exec") +} + +func writeCon(name string, val string) error { + if !SelinuxEnabled() { + return nil + } + out, err := os.OpenFile(name, os.O_WRONLY, 0) + if err != nil { + return err + } + defer out.Close() + + if val != "" { + _, err = out.Write([]byte(val)) + } else { + _, err = out.Write(nil) + } + return err +} + +func Setexeccon(scon string) error { + return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), scon) +} + +func (c SELinuxContext) Get() string { + return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"]) +} + +func NewContext(scon string) SELinuxContext { + c := make(SELinuxContext) + + if len(scon) != 0 { + con := strings.SplitN(scon, ":", 4) + c["user"] = con[0] + c["role"] = con[1] + c["type"] = con[2] + c["level"] = con[3] + } + return c +} + +func ReserveLabel(scon string) { + if len(scon) != 0 { + con := strings.SplitN(scon, ":", 4) + mcsAdd(con[3]) + } +} + +func SelinuxGetEnforce() int { + var enforce int + + enforceS, err := readCon(fmt.Sprintf("%s/enforce", selinuxPath)) + if err != nil { + return -1 + } + + enforce, err = strconv.Atoi(string(enforceS)) + if err != nil { + return -1 + } + return enforce +} + +func SelinuxGetEnforceMode() int { + switch readConfig(selinuxTag) { + case "enforcing": + return Enforcing + case "permissive": + return Permissive + } + return Disabled +} + +func mcsAdd(mcs string) error { + if mcsList[mcs] { + return fmt.Errorf("MCS Label already exists") + } + mcsList[mcs] = true + return nil +} + +func mcsDelete(mcs string) { + mcsList[mcs] = false +} + +func mcsExists(mcs string) bool { + return mcsList[mcs] +} + +func IntToMcs(id int, catRange uint32) string { + var ( + SETSIZE = int(catRange) + TIER = SETSIZE + ORD = id + ) + + if id < 1 || id > 523776 { + return "" + } + + for ORD > TIER { + ORD = ORD - TIER + TIER -= 1 + } + TIER = SETSIZE - TIER + ORD = ORD + TIER + return fmt.Sprintf("s0:c%d,c%d", TIER, ORD) +} + +func uniqMcs(catRange uint32) string { + var ( + n uint32 + c1, c2 uint32 + mcs string + ) + + for { + binary.Read(rand.Reader, binary.LittleEndian, &n) + c1 = n % catRange + binary.Read(rand.Reader, binary.LittleEndian, &n) + c2 = n % catRange + if c1 == c2 { + continue + } else { + if c1 > c2 { + t := c1 + c1 = c2 + c2 = t + } + } + mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2) + if err := mcsAdd(mcs); err != nil { + continue + } + break + } + return mcs +} + +func FreeLxcContexts(scon string) { + if len(scon) != 0 { + con := strings.SplitN(scon, ":", 4) + mcsDelete(con[3]) + } +} + +func GetLxcContexts() (processLabel string, fileLabel string) { + var ( + val, key string + bufin *bufio.Reader + ) + + if !SelinuxEnabled() { + return "", "" + } + lxcPath := fmt.Sprintf("%s/contexts/lxc_contexts", getSELinuxPolicyRoot()) + in, err := os.Open(lxcPath) + if err != nil { + return "", "" + } + defer in.Close() + + bufin = bufio.NewReader(in) + + for done := false; !done; { + var line string + if line, err = bufin.ReadString('\n'); err != nil { + if err == io.EOF { + done = true + } else { + goto exit + } + } + line = strings.TrimSpace(line) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + if groups := assignRegex.FindStringSubmatch(line); groups != nil { + key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) + if key == "process" { + processLabel = strings.Trim(val, "\"") + } + if key == "file" { + fileLabel = strings.Trim(val, "\"") + } + } + } + + if processLabel == "" || fileLabel == "" { + return "", "" + } + +exit: + // mcs := IntToMcs(os.Getpid(), 1024) + mcs := uniqMcs(1024) + scon := NewContext(processLabel) + scon["level"] = mcs + processLabel = scon.Get() + scon = NewContext(fileLabel) + scon["level"] = mcs + fileLabel = scon.Get() + return processLabel, fileLabel +} + +func SecurityCheckContext(val string) error { + return writeCon(fmt.Sprintf("%s.context", selinuxPath), val) +} + +func CopyLevel(src, dest string) (string, error) { + if !SelinuxEnabled() { + return "", nil + } + if src == "" { + return "", nil + } + if err := SecurityCheckContext(src); err != nil { + return "", err + } + if err := SecurityCheckContext(dest); err != nil { + return "", err + } + scon := NewContext(src) + tcon := NewContext(dest) + mcsDelete(tcon["level"]) + mcsAdd(scon["level"]) + tcon["level"] = scon["level"] + return tcon.Get(), nil +} + +// Prevent users from relabing system files +func badPrefix(fpath string) error { + var badprefixes = []string{"/usr"} + + for _, prefix := range badprefixes { + if fpath == prefix || strings.HasPrefix(fpath, fmt.Sprintf("%s/", prefix)) { + return fmt.Errorf("Relabeling content in %s is not allowed.", prefix) + } + } + return nil +} + +// Change the fpath file object to the SELinux label scon. +// If the fpath is a directory and recurse is true Chcon will walk the +// directory tree setting the label +func Chcon(fpath string, scon string, recurse bool) error { + if !SelinuxEnabled() { + return nil + } + if err := badPrefix(fpath); err != nil { + return err + } + callback := func(p string, info os.FileInfo, err error) error { + return Setfilecon(p, scon) + } + + if recurse { + return filepath.Walk(fpath, callback) + } + + return Setfilecon(fpath, scon) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/selinux/selinux_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/selinux/selinux_test.go new file mode 100644 index 00000000..34c34974 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/selinux/selinux_test.go @@ -0,0 +1,64 @@ +// +build linux + +package selinux_test + +import ( + "os" + "testing" + + "github.com/docker/libcontainer/selinux" +) + +func testSetfilecon(t *testing.T) { + if selinux.SelinuxEnabled() { + tmp := "selinux_test" + out, _ := os.OpenFile(tmp, os.O_WRONLY, 0) + out.Close() + err := selinux.Setfilecon(tmp, "system_u:object_r:bin_t:s0") + if err != nil { + t.Log("Setfilecon failed") + t.Fatal(err) + } + os.Remove(tmp) + } +} + +func TestSELinux(t *testing.T) { + var ( + err error + plabel, flabel string + ) + + if selinux.SelinuxEnabled() { + t.Log("Enabled") + plabel, flabel = selinux.GetLxcContexts() + t.Log(plabel) + t.Log(flabel) + selinux.FreeLxcContexts(plabel) + plabel, flabel = selinux.GetLxcContexts() + t.Log(plabel) + t.Log(flabel) + selinux.FreeLxcContexts(plabel) + t.Log("getenforce ", selinux.SelinuxGetEnforce()) + t.Log("getenforcemode ", selinux.SelinuxGetEnforceMode()) + pid := os.Getpid() + t.Log("PID:%d MCS:%s\n", pid, selinux.IntToMcs(pid, 1023)) + err = selinux.Setfscreatecon("unconfined_u:unconfined_r:unconfined_t:s0") + if err == nil { + t.Log(selinux.Getfscreatecon()) + } else { + t.Log("setfscreatecon failed", err) + t.Fatal(err) + } + err = selinux.Setfscreatecon("") + if err == nil { + t.Log(selinux.Getfscreatecon()) + } else { + t.Log("setfscreatecon failed", err) + t.Fatal(err) + } + t.Log(selinux.Getpidcon(1)) + } else { + t.Log("Disabled") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/state.go b/Godeps/_workspace/src/github.com/docker/libcontainer/state.go new file mode 100644 index 00000000..ee5d14d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/state.go @@ -0,0 +1,74 @@ +package libcontainer + +import ( + "encoding/json" + "os" + "path/filepath" + + "github.com/docker/libcontainer/network" +) + +// State represents a running container's state +type State struct { + // InitPid is the init process id in the parent namespace + InitPid int `json:"init_pid,omitempty"` + + // InitStartTime is the init process start time + InitStartTime string `json:"init_start_time,omitempty"` + + // Network runtime state. + NetworkState network.NetworkState `json:"network_state,omitempty"` +} + +// The running state of the container. +type RunState int + +const ( + // The name of the runtime state file + stateFile = "state.json" + + // The container exists and is running. + Running RunState = iota + + // The container exists, it is in the process of being paused. + Pausing + + // The container exists, but all its processes are paused. + Paused + + // The container does not exist. + Destroyed +) + +// SaveState writes the container's runtime state to a state.json file +// in the specified path +func SaveState(basePath string, state *State) error { + f, err := os.Create(filepath.Join(basePath, stateFile)) + if err != nil { + return err + } + defer f.Close() + + return json.NewEncoder(f).Encode(state) +} + +// GetState reads the state.json file for a running container +func GetState(basePath string) (*State, error) { + f, err := os.Open(filepath.Join(basePath, stateFile)) + if err != nil { + return nil, err + } + defer f.Close() + + var state *State + if err := json.NewDecoder(f).Decode(&state); err != nil { + return nil, err + } + + return state, nil +} + +// DeleteState deletes the state.json file +func DeleteState(basePath string) error { + return os.Remove(filepath.Join(basePath, stateFile)) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/syncpipe/sync_pipe.go b/Godeps/_workspace/src/github.com/docker/libcontainer/syncpipe/sync_pipe.go new file mode 100644 index 00000000..10a21a9e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/syncpipe/sync_pipe.go @@ -0,0 +1,102 @@ +package syncpipe + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "syscall" + + "github.com/docker/libcontainer/network" +) + +// SyncPipe allows communication to and from the child processes +// to it's parent and allows the two independent processes to +// syncronize their state. +type SyncPipe struct { + parent, child *os.File +} + +func NewSyncPipeFromFd(parentFd, childFd uintptr) (*SyncPipe, error) { + s := &SyncPipe{} + + if parentFd > 0 { + s.parent = os.NewFile(parentFd, "parentPipe") + } else if childFd > 0 { + s.child = os.NewFile(childFd, "childPipe") + } else { + return nil, fmt.Errorf("no valid sync pipe fd specified") + } + + return s, nil +} + +func (s *SyncPipe) Child() *os.File { + return s.child +} + +func (s *SyncPipe) Parent() *os.File { + return s.parent +} + +func (s *SyncPipe) SendToChild(networkState *network.NetworkState) error { + data, err := json.Marshal(networkState) + if err != nil { + return err + } + + s.parent.Write(data) + + return syscall.Shutdown(int(s.parent.Fd()), syscall.SHUT_WR) +} + +func (s *SyncPipe) ReadFromChild() error { + data, err := ioutil.ReadAll(s.parent) + if err != nil { + return err + } + + if len(data) > 0 { + return fmt.Errorf("%s", data) + } + + return nil +} + +func (s *SyncPipe) ReadFromParent() (*network.NetworkState, error) { + data, err := ioutil.ReadAll(s.child) + if err != nil { + return nil, fmt.Errorf("error reading from sync pipe %s", err) + } + var networkState *network.NetworkState + if len(data) > 0 { + if err := json.Unmarshal(data, &networkState); err != nil { + return nil, err + } + } + return networkState, nil +} + +func (s *SyncPipe) ReportChildError(err error) { + s.child.Write([]byte(err.Error())) + s.CloseChild() +} + +func (s *SyncPipe) Close() error { + if s.parent != nil { + s.parent.Close() + } + + if s.child != nil { + s.child.Close() + } + + return nil +} + +func (s *SyncPipe) CloseChild() { + if s.child != nil { + s.child.Close() + s.child = nil + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/syncpipe/sync_pipe_linux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/syncpipe/sync_pipe_linux.go new file mode 100644 index 00000000..bea4b52f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/syncpipe/sync_pipe_linux.go @@ -0,0 +1,20 @@ +package syncpipe + +import ( + "os" + "syscall" +) + +func NewSyncPipe() (s *SyncPipe, err error) { + s = &SyncPipe{} + + fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0) + if err != nil { + return nil, err + } + + s.child = os.NewFile(uintptr(fds[0]), "child syncpipe") + s.parent = os.NewFile(uintptr(fds[1]), "parent syncpipe") + + return s, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/syncpipe/sync_pipe_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/syncpipe/sync_pipe_test.go new file mode 100644 index 00000000..3f99a7d1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/syncpipe/sync_pipe_test.go @@ -0,0 +1,61 @@ +package syncpipe + +import ( + "fmt" + "testing" + + "github.com/docker/libcontainer/network" +) + +func TestSendErrorFromChild(t *testing.T) { + pipe, err := NewSyncPipe() + if err != nil { + t.Fatal(err) + } + defer func() { + if err := pipe.Close(); err != nil { + t.Fatal(err) + } + }() + + expected := "something bad happened" + + pipe.ReportChildError(fmt.Errorf(expected)) + + childError := pipe.ReadFromChild() + if childError == nil { + t.Fatal("expected an error to be returned but did not receive anything") + } + + if childError.Error() != expected { + t.Fatalf("expected %q but received error message %q", expected, childError.Error()) + } +} + +func TestSendPayloadToChild(t *testing.T) { + pipe, err := NewSyncPipe() + if err != nil { + t.Fatal(err) + } + + defer func() { + if err := pipe.Close(); err != nil { + t.Fatal(err) + } + }() + + expected := "libcontainer" + + if err := pipe.SendToChild(&network.NetworkState{VethHost: expected}); err != nil { + t.Fatal(err) + } + + payload, err := pipe.ReadFromParent() + if err != nil { + t.Fatal(err) + } + + if payload.VethHost != expected { + t.Fatalf("expected veth host %q but received %q", expected, payload.VethHost) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/system/linux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/system/linux.go new file mode 100644 index 00000000..c07ef153 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/system/linux.go @@ -0,0 +1,60 @@ +// +build linux + +package system + +import ( + "os/exec" + "syscall" + "unsafe" +) + +func Execv(cmd string, args []string, env []string) error { + name, err := exec.LookPath(cmd) + if err != nil { + return err + } + + return syscall.Exec(name, args, env) +} + +func ParentDeathSignal(sig uintptr) error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 { + return err + } + return nil +} + +func GetParentDeathSignal() (int, error) { + var sig int + + _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0) + + if err != 0 { + return -1, err + } + + return sig, nil +} + +func SetKeepCaps() error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 1, 0); err != 0 { + return err + } + + return nil +} + +func ClearKeepCaps() error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 0, 0); err != 0 { + return err + } + + return nil +} + +func Setctty() error { + if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/system/proc.go b/Godeps/_workspace/src/github.com/docker/libcontainer/system/proc.go new file mode 100644 index 00000000..37808a29 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/system/proc.go @@ -0,0 +1,27 @@ +package system + +import ( + "io/ioutil" + "path/filepath" + "strconv" + "strings" +) + +// look in /proc to find the process start time so that we can verify +// that this pid has started after ourself +func GetProcessStartTime(pid int) (string, error) { + data, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) + if err != nil { + return "", err + } + + parts := strings.Split(string(data), " ") + // the starttime is located at pos 22 + // from the man page + // + // starttime %llu (was %lu before Linux 2.6) + // (22) The time the process started after system boot. In kernels before Linux 2.6, this + // value was expressed in jiffies. Since Linux 2.6, the value is expressed in clock ticks + // (divide by sysconf(_SC_CLK_TCK)). + return parts[22-1], nil // starts at 1 +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/system/setns_linux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/system/setns_linux.go new file mode 100644 index 00000000..32821ee2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/system/setns_linux.go @@ -0,0 +1,31 @@ +package system + +import ( + "fmt" + "runtime" + "syscall" +) + +// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092 +// +// We need different setns values for the different platforms and arch +// We are declaring the macro here because the SETNS syscall does not exist in th stdlib +var setNsMap = map[string]uintptr{ + "linux/386": 346, + "linux/amd64": 308, + "linux/arm": 374, +} + +func Setns(fd uintptr, flags uintptr) error { + ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] + if !exists { + return fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) + } + + _, _, err := syscall.RawSyscall(ns, fd, flags, 0) + if err != 0 { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/system/sysconfig.go b/Godeps/_workspace/src/github.com/docker/libcontainer/system/sysconfig.go new file mode 100644 index 00000000..3e2f43b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/system/sysconfig.go @@ -0,0 +1,12 @@ +// +build linux,cgo + +package system + +/* +#include +*/ +import "C" + +func GetClockTicks() int { + return int(C.sysconf(C._SC_CLK_TCK)) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/system/sysconfig_notcgo.go b/Godeps/_workspace/src/github.com/docker/libcontainer/system/sysconfig_notcgo.go new file mode 100644 index 00000000..4bbb6989 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/system/sysconfig_notcgo.go @@ -0,0 +1,8 @@ +// +build linux,!cgo + +package system + +func GetClockTicks() int { + // TODO figure out a better alternative for platforms where we're missing cgo + return 100 +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/system/xattrs_linux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/system/xattrs_linux.go new file mode 100644 index 00000000..00edb201 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/system/xattrs_linux.go @@ -0,0 +1,59 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// Returns a nil slice and nil error if the xattr is not set +func Lgetxattr(path string, attr string) ([]byte, error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + dest := make([]byte, 128) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno == syscall.ENODATA { + return nil, nil + } + if errno == syscall.ERANGE { + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + } + if errno != 0 { + return nil, errno + } + + return dest[:sz], nil +} + +var _zero uintptr + +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/types.go b/Godeps/_workspace/src/github.com/docker/libcontainer/types.go new file mode 100644 index 00000000..c341137e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/types.go @@ -0,0 +1,11 @@ +package libcontainer + +import ( + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/network" +) + +type ContainerStats struct { + NetworkStats *network.NetworkStats `json:"network_stats,omitempty"` + CgroupStats *cgroups.Stats `json:"cgroup_stats,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/utils/utils.go b/Godeps/_workspace/src/github.com/docker/libcontainer/utils/utils.go new file mode 100644 index 00000000..76184ce0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/utils/utils.go @@ -0,0 +1,55 @@ +package utils + +import ( + "crypto/rand" + "encoding/hex" + "io" + "io/ioutil" + "path/filepath" + "strconv" + "syscall" +) + +// GenerateRandomName returns a new name joined with a prefix. This size +// specified is used to truncate the randomly generated value +func GenerateRandomName(prefix string, size int) (string, error) { + id := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, id); err != nil { + return "", err + } + return prefix + hex.EncodeToString(id)[:size], nil +} + +// ResolveRootfs ensures that the current working directory is +// not a symlink and returns the absolute path to the rootfs +func ResolveRootfs(uncleanRootfs string) (string, error) { + rootfs, err := filepath.Abs(uncleanRootfs) + if err != nil { + return "", err + } + return filepath.EvalSymlinks(rootfs) +} + +func CloseExecFrom(minFd int) error { + fdList, err := ioutil.ReadDir("/proc/self/fd") + if err != nil { + return err + } + for _, fi := range fdList { + fd, err := strconv.Atoi(fi.Name()) + if err != nil { + // ignore non-numeric file names + continue + } + + if fd < minFd { + // ignore descriptors lower than our specified minimum + continue + } + + // intentionally ignore errors from syscall.CloseOnExec + syscall.CloseOnExec(fd) + // the cases where this might fail are basically file descriptors that have already been closed (including and especially the one that was created when ioutil.ReadDir did the "opendir" syscall) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml new file mode 100644 index 00000000..cfcd173a --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml @@ -0,0 +1,13 @@ +language: go +go: + - 1.1.2 + - 1.2 + - 1.3 + - tip +env: + - GOARCH=amd64 + - GOARCH=386 +install: + - go get -d ./... +script: + - go test ./... diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS new file mode 100644 index 00000000..d42a14a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS @@ -0,0 +1,33 @@ +# This is the official list of go-dockerclient authors for copyright purposes. + +Andrews Medina +Andy Goldstein +Ben McCann +Cezar Sa Espinola +Cheah Chu Yeow +cheneydeng +Ed +Eric Anderson +Flavia Missi +Francisco Souza +Jari Kolehmainen +Jason Wilder +Jean-Baptiste Dalido +Jeff Mitchell +Jeffrey Hulten +Lucas Clemente +Omeid Matten +Paul Morie +Peter Jihoon Kim +Philippe Lafoucrière +Rafe Colton +Salvador Gironès +Simon Eskildsen +Simon Menke +Skolos +Soulou +Sridhar Ratnakumar +Summer Mousa +Tarsis Azevedo +Tim Schindler +Wiliam Souza diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE new file mode 100644 index 00000000..f4130a5b --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE @@ -0,0 +1,6 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +You can find the Docker license int the following link: +https://raw2.github.com/dotcloud/docker/master/LICENSE diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE new file mode 100644 index 00000000..7a6d8bb6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2014, go-dockerclient authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown new file mode 100644 index 00000000..7b4e959e --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown @@ -0,0 +1,41 @@ +#go-dockerclient + +[![Build Status](https://drone.io/github.com/fsouza/go-dockerclient/status.png)](https://drone.io/github.com/fsouza/go-dockerclient/latest) +[![Build Status](https://travis-ci.org/fsouza/go-dockerclient.png)](https://travis-ci.org/fsouza/go-dockerclient) + +[![GoDoc](http://godoc.org/github.com/fsouza/go-dockerclient?status.png)](http://godoc.org/github.com/fsouza/go-dockerclient) + +This package presents a client for the Docker remote API. + +For more details, check the [remote API documentation](http://docs.docker.io/en/latest/reference/api/docker_remote_api/). + +## Example + + package main + + import ( + "fmt" + "github.com/fsouza/go-dockerclient" + ) + + func main() { + endpoint := "unix:///var/run/docker.sock" + client, _ := docker.NewClient(endpoint) + imgs, _ := client.ListImages(true) + for _, img := range imgs { + fmt.Println("ID: ", img.ID) + fmt.Println("RepoTags: ", img.RepoTags) + fmt.Println("Created: ", img.Created) + fmt.Println("Size: ", img.Size) + fmt.Println("VirtualSize: ", img.VirtualSize) + fmt.Println("ParentId: ", img.ParentId) + fmt.Println("Repository: ", img.Repository) + } + } + +## Developing + +You can run the tests with: + + go get -d ./... + go test ./... diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go new file mode 100644 index 00000000..79260731 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go @@ -0,0 +1,36 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import "fmt" + +type ChangeType int + +const ( + ChangeModify ChangeType = iota + ChangeAdd + ChangeDelete +) + +// Change represents a change in a container. +// +// See http://goo.gl/DpGyzK for more details. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + var kind string + switch change.Kind { + case ChangeModify: + kind = "C" + case ChangeAdd: + kind = "A" + case ChangeDelete: + kind = "D" + } + return fmt.Sprintf("%s %s", kind, change.Path) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go new file mode 100644 index 00000000..7c2ec30f --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go @@ -0,0 +1,26 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "testing" +) + +func TestChangeString(t *testing.T) { + var tests = []struct { + change Change + expected string + }{ + {Change{"/etc/passwd", ChangeModify}, "C /etc/passwd"}, + {Change{"/etc/passwd", ChangeAdd}, "A /etc/passwd"}, + {Change{"/etc/passwd", ChangeDelete}, "D /etc/passwd"}, + {Change{"/etc/passwd", 33}, " /etc/passwd"}, + } + for _, tt := range tests { + if got := tt.change.String(); got != tt.expected { + t.Errorf("Change.String(): want %q. Got %q.", tt.expected, got) + } + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go new file mode 100644 index 00000000..681b7bf7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go @@ -0,0 +1,568 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package docker provides a client for the Docker remote API. +// +// See http://goo.gl/mxyql for more details on the remote API. +package docker + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httputil" + "net/url" + "reflect" + "strconv" + "strings" + "sync" + + "github.com/fsouza/go-dockerclient/utils" +) + +const userAgent = "go-dockerclient" + +var ( + // ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL. + ErrInvalidEndpoint = errors.New("invalid endpoint") + + // ErrConnectionRefused is returned when the client cannot connect to the given endpoint. + ErrConnectionRefused = errors.New("cannot connect to Docker endpoint") + + apiVersion_1_12, _ = NewApiVersion("1.12") +) + +// ApiVersion is an internal representation of a version of the Remote API. +type ApiVersion []int + +// NewApiVersion returns an instance of ApiVersion for the given string. +// +// The given string must be in the form .., where , +// and are integer numbers. +func NewApiVersion(input string) (ApiVersion, error) { + if !strings.Contains(input, ".") { + return nil, fmt.Errorf("Unable to parse version %q", input) + } + arr := strings.Split(input, ".") + ret := make(ApiVersion, len(arr)) + var err error + for i, val := range arr { + ret[i], err = strconv.Atoi(val) + if err != nil { + return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val) + } + } + return ret, nil +} + +func (version ApiVersion) String() string { + var str string + for i, val := range version { + str += strconv.Itoa(val) + if i < len(version)-1 { + str += "." + } + } + return str +} + +func (version ApiVersion) LessThan(other ApiVersion) bool { + return version.compare(other) < 0 +} + +func (version ApiVersion) LessThanOrEqualTo(other ApiVersion) bool { + return version.compare(other) <= 0 +} + +func (version ApiVersion) GreaterThan(other ApiVersion) bool { + return version.compare(other) > 0 +} + +func (version ApiVersion) GreaterThanOrEqualTo(other ApiVersion) bool { + return version.compare(other) >= 0 +} + +func (version ApiVersion) compare(other ApiVersion) int { + for i, v := range version { + if i <= len(other)-1 { + otherVersion := other[i] + + if v < otherVersion { + return -1 + } else if v > otherVersion { + return 1 + } + } + } + if len(version) > len(other) { + return 1 + } + if len(version) < len(other) { + return -1 + } + return 0 +} + +// Client is the basic type of this package. It provides methods for +// interaction with the API. +type Client struct { + SkipServerVersionCheck bool + + endpoint string + endpointURL *url.URL + eventMonitor *eventMonitoringState + client *http.Client + requestedApiVersion ApiVersion + serverApiVersion ApiVersion + expectedApiVersion ApiVersion +} + +// NewClient returns a Client instance ready for communication with the given +// server endpoint. It will use the latest remote API version available in the +// server. +func NewClient(endpoint string) (*Client, error) { + client, err := NewVersionedClient(endpoint, "") + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = true + return client, nil +} + +// NewVersionedClient returns a Client instance ready for communication with +// the given server endpoint, using a specific remote API version. +func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) { + u, err := parseEndpoint(endpoint) + if err != nil { + return nil, err + } + + var requestedApiVersion ApiVersion + if strings.Contains(apiVersionString, ".") { + requestedApiVersion, err = NewApiVersion(apiVersionString) + if err != nil { + return nil, err + } + } + + return &Client{ + endpoint: endpoint, + endpointURL: u, + client: http.DefaultClient, + eventMonitor: new(eventMonitoringState), + requestedApiVersion: requestedApiVersion, + }, nil +} + +func (c *Client) checkApiVersion() error { + serverApiVersionString, err := c.getServerApiVersionString() + if err != nil { + return err + } + c.serverApiVersion, err = NewApiVersion(serverApiVersionString) + if err != nil { + return err + } + if c.requestedApiVersion == nil { + c.expectedApiVersion = c.serverApiVersion + } else { + c.expectedApiVersion = c.requestedApiVersion + } + return nil +} + +func parseApiVersionString(input string) (version uint16, err error) { + version = 0 + + if !strings.Contains(input, ".") { + return 0, fmt.Errorf("Unable to parse version '%s'", input) + } + + arr := strings.Split(input, ".") + + major, err := strconv.Atoi(arr[0]) + if err != nil { + return version, err + } + + minor, err := strconv.Atoi(arr[1]) + if err != nil { + return version, err + } + + version = uint16(major)<<8 | uint16(minor) + return version, nil +} + +// Ping pings the docker server +// +// See http://goo.gl/stJENm for more details. +func (c *Client) Ping() error { + path := "/_ping" + body, status, err := c.do("GET", path, nil) + if err != nil { + return err + } + if status != http.StatusOK { + return newError(status, body) + } + return nil +} + +func (c *Client) getServerApiVersionString() (version string, err error) { + body, status, err := c.do("GET", "/version", nil) + if err != nil { + return "", err + } + if status != http.StatusOK { + return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", status) + } + + var versionResponse map[string]string + err = json.Unmarshal(body, &versionResponse) + if err != nil { + return "", err + } + + version = versionResponse["ApiVersion"] + return version, nil +} + +func (c *Client) do(method, path string, data interface{}) ([]byte, int, error) { + var params io.Reader + if data != nil { + buf, err := json.Marshal(data) + if err != nil { + return nil, -1, err + } + params = bytes.NewBuffer(buf) + } + + if path != "/version" && !c.SkipServerVersionCheck && c.expectedApiVersion == nil { + err := c.checkApiVersion() + if err != nil { + return nil, -1, err + } + } + + req, err := http.NewRequest(method, c.getURL(path), params) + if err != nil { + return nil, -1, err + } + req.Header.Set("User-Agent", userAgent) + if data != nil { + req.Header.Set("Content-Type", "application/json") + } else if method == "POST" { + req.Header.Set("Content-Type", "plain/text") + } + var resp *http.Response + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if protocol == "unix" { + dial, err := net.Dial(protocol, address) + if err != nil { + return nil, -1, err + } + defer dial.Close() + clientconn := httputil.NewClientConn(dial, nil) + resp, err = clientconn.Do(req) + if err != nil { + return nil, -1, err + } + defer clientconn.Close() + } else { + resp, err = c.client.Do(req) + } + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return nil, -1, ErrConnectionRefused + } + return nil, -1, err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, -1, err + } + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, resp.StatusCode, newError(resp.StatusCode, body) + } + return body, resp.StatusCode, nil +} + +func (c *Client) stream(method, path string, setRawTerminal bool, headers map[string]string, in io.Reader, stdout, stderr io.Writer) error { + if (method == "POST" || method == "PUT") && in == nil { + in = bytes.NewReader(nil) + } + if path != "/version" && !c.SkipServerVersionCheck && c.expectedApiVersion == nil { + err := c.checkApiVersion() + if err != nil { + return err + } + } + req, err := http.NewRequest(method, c.getURL(path), in) + if err != nil { + return err + } + req.Header.Set("User-Agent", userAgent) + if method == "POST" { + req.Header.Set("Content-Type", "plain/text") + } + for key, val := range headers { + req.Header.Set(key, val) + } + var resp *http.Response + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if stdout == nil { + stdout = ioutil.Discard + } + if stderr == nil { + stderr = ioutil.Discard + } + if protocol == "unix" { + dial, err := net.Dial(protocol, address) + if err != nil { + return err + } + clientconn := httputil.NewClientConn(dial, nil) + resp, err = clientconn.Do(req) + defer clientconn.Close() + } else { + resp, err = c.client.Do(req) + } + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return ErrConnectionRefused + } + return err + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + return newError(resp.StatusCode, body) + } + if resp.Header.Get("Content-Type") == "application/json" { + dec := json.NewDecoder(resp.Body) + for { + var m jsonMessage + if err := dec.Decode(&m); err == io.EOF { + break + } else if err != nil { + return err + } + if m.Stream != "" { + fmt.Fprint(stdout, m.Stream) + } else if m.Progress != "" { + fmt.Fprintf(stdout, "%s %s\r", m.Status, m.Progress) + } else if m.Error != "" { + return errors.New(m.Error) + } + if m.Status != "" { + fmt.Fprintln(stdout, m.Status) + } + } + } else { + if setRawTerminal { + _, err = io.Copy(stdout, resp.Body) + } else { + _, err = utils.StdCopy(stdout, stderr, resp.Body) + } + return err + } + return nil +} + +func (c *Client) hijack(method, path string, success chan struct{}, setRawTerminal bool, in io.Reader, stderr, stdout io.Writer) error { + if path != "/version" && !c.SkipServerVersionCheck && c.expectedApiVersion == nil { + err := c.checkApiVersion() + if err != nil { + return err + } + } + if stdout == nil { + stdout = ioutil.Discard + } + if stderr == nil { + stderr = ioutil.Discard + } + req, err := http.NewRequest(method, c.getURL(path), nil) + if err != nil { + return err + } + req.Header.Set("Content-Type", "plain/text") + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if protocol != "unix" { + protocol = "tcp" + address = c.endpointURL.Host + } + dial, err := net.Dial(protocol, address) + if err != nil { + return err + } + defer dial.Close() + clientconn := httputil.NewClientConn(dial, nil) + clientconn.Do(req) + if success != nil { + success <- struct{}{} + <-success + } + rwc, br := clientconn.Hijack() + var wg sync.WaitGroup + wg.Add(2) + errs := make(chan error, 2) + go func() { + var err error + if setRawTerminal { + _, err = io.Copy(stdout, br) + } else { + _, err = utils.StdCopy(stdout, stderr, br) + } + errs <- err + wg.Done() + }() + go func() { + var err error + if in != nil { + _, err = io.Copy(rwc, in) + } + rwc.(interface { + CloseWrite() error + }).CloseWrite() + errs <- err + wg.Done() + }() + wg.Wait() + close(errs) + if err := <-errs; err != nil { + return err + } + return nil +} + +func (c *Client) getURL(path string) string { + urlStr := strings.TrimRight(c.endpointURL.String(), "/") + if c.endpointURL.Scheme == "unix" { + urlStr = "" + } + + if c.requestedApiVersion != nil { + return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedApiVersion, path) + } else { + return fmt.Sprintf("%s%s", urlStr, path) + } +} + +type jsonMessage struct { + Status string `json:"status,omitempty"` + Progress string `json:"progress,omitempty"` + Error string `json:"error,omitempty"` + Stream string `json:"stream,omitempty"` +} + +func queryString(opts interface{}) string { + if opts == nil { + return "" + } + value := reflect.ValueOf(opts) + if value.Kind() == reflect.Ptr { + value = value.Elem() + } + if value.Kind() != reflect.Struct { + return "" + } + items := url.Values(map[string][]string{}) + for i := 0; i < value.NumField(); i++ { + field := value.Type().Field(i) + if field.PkgPath != "" { + continue + } + key := field.Tag.Get("qs") + if key == "" { + key = strings.ToLower(field.Name) + } else if key == "-" { + continue + } + v := value.Field(i) + switch v.Kind() { + case reflect.Bool: + if v.Bool() { + items.Add(key, "1") + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if v.Int() > 0 { + items.Add(key, strconv.FormatInt(v.Int(), 10)) + } + case reflect.Float32, reflect.Float64: + if v.Float() > 0 { + items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64)) + } + case reflect.String: + if v.String() != "" { + items.Add(key, v.String()) + } + case reflect.Ptr: + if !v.IsNil() { + if b, err := json.Marshal(v.Interface()); err == nil { + items.Add(key, string(b)) + } + } + } + } + return items.Encode() +} + +// Error represents failures in the API. It represents a failure from the API. +type Error struct { + Status int + Message string +} + +func newError(status int, body []byte) *Error { + return &Error{Status: status, Message: string(body)} +} + +func (e *Error) Error() string { + return fmt.Sprintf("API error (%d): %s", e.Status, e.Message) +} + +func parseEndpoint(endpoint string) (*url.URL, error) { + u, err := url.Parse(endpoint) + if err != nil { + return nil, ErrInvalidEndpoint + } + if u.Scheme == "tcp" { + u.Scheme = "http" + } + if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" { + return nil, ErrInvalidEndpoint + } + if u.Scheme != "unix" { + _, port, err := net.SplitHostPort(u.Host) + if err != nil { + if e, ok := err.(*net.AddrError); ok { + if e.Err == "missing port in address" { + return u, nil + } + } + return nil, ErrInvalidEndpoint + } + number, err := strconv.ParseInt(port, 10, 64) + if err == nil && number > 0 && number < 65536 { + return u, nil + } + } else { + return u, nil // we don't need port when using a unix socket + } + return nil, ErrInvalidEndpoint +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go new file mode 100644 index 00000000..53edd6c2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go @@ -0,0 +1,285 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "testing" +) + +func TestNewAPIClient(t *testing.T) { + endpoint := "http://localhost:4243" + client, err := NewClient(endpoint) + if err != nil { + t.Fatal(err) + } + if client.endpoint != endpoint { + t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) + } + if client.client != http.DefaultClient { + t.Errorf("Expected http.Client %#v. Got %#v.", http.DefaultClient, client.client) + } + + // test unix socket endpoints + endpoint = "unix:///var/run/docker.sock" + client, err = NewClient(endpoint) + if err != nil { + t.Fatal(err) + } + if client.endpoint != endpoint { + t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) + } + if !client.SkipServerVersionCheck { + t.Error("Expected SkipServerVersionCheck to be true, got false") + } + if client.requestedApiVersion != nil { + t.Errorf("Expected requestedApiVersion to be nil, got %#v.", client.requestedApiVersion) + } +} + +func TestNewVersionedClient(t *testing.T) { + endpoint := "http://localhost:4243" + client, err := NewVersionedClient(endpoint, "1.12") + if err != nil { + t.Fatal(err) + } + if client.endpoint != endpoint { + t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) + } + if client.client != http.DefaultClient { + t.Errorf("Expected http.Client %#v. Got %#v.", http.DefaultClient, client.client) + } + if reqVersion := client.requestedApiVersion.String(); reqVersion != "1.12" { + t.Errorf("Wrong requestApiVersion. Want %q. Got %q.", "1.12", reqVersion) + } + if client.SkipServerVersionCheck { + t.Error("Expected SkipServerVersionCheck to be false, got true") + } +} + +func TestNewClientInvalidEndpoint(t *testing.T) { + cases := []string{ + "htp://localhost:3243", "http://localhost:a", "localhost:8080", + "", "localhost", "http://localhost:8080:8383", "http://localhost:65536", + "https://localhost:-20", + } + for _, c := range cases { + client, err := NewClient(c) + if client != nil { + t.Errorf("Want client for invalid endpoint, got %#v.", client) + } + if !reflect.DeepEqual(err, ErrInvalidEndpoint) { + t.Errorf("NewClient(%q): Got invalid error for invalid endpoint. Want %#v. Got %#v.", c, ErrInvalidEndpoint, err) + } + } +} + +func TestGetURL(t *testing.T) { + var tests = []struct { + endpoint string + path string + expected string + }{ + {"http://localhost:4243/", "/", "http://localhost:4243/"}, + {"http://localhost:4243", "/", "http://localhost:4243/"}, + {"http://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"}, + {"tcp://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"}, + {"http://localhost:4243/////", "/", "http://localhost:4243/"}, + {"unix:///var/run/docker.socket", "/containers", "/containers"}, + } + for _, tt := range tests { + client, _ := NewClient(tt.endpoint) + client.endpoint = tt.endpoint + client.SkipServerVersionCheck = true + got := client.getURL(tt.path) + if got != tt.expected { + t.Errorf("getURL(%q): Got %s. Want %s.", tt.path, got, tt.expected) + } + } +} + +func TestError(t *testing.T) { + err := newError(400, []byte("bad parameter")) + expected := Error{Status: 400, Message: "bad parameter"} + if !reflect.DeepEqual(expected, *err) { + t.Errorf("Wrong error type. Want %#v. Got %#v.", expected, *err) + } + message := "API error (400): bad parameter" + if err.Error() != message { + t.Errorf("Wrong error message. Want %q. Got %q.", message, err.Error()) + } +} + +func TestQueryString(t *testing.T) { + v := float32(2.4) + f32QueryString := fmt.Sprintf("w=%s&x=10&y=10.35", strconv.FormatFloat(float64(v), 'f', -1, 64)) + jsonPerson := url.QueryEscape(`{"Name":"gopher","age":4}`) + var tests = []struct { + input interface{} + want string + }{ + {&ListContainersOptions{All: true}, "all=1"}, + {ListContainersOptions{All: true}, "all=1"}, + {ListContainersOptions{Before: "something"}, "before=something"}, + {ListContainersOptions{Before: "something", Since: "other"}, "before=something&since=other"}, + {dumb{X: 10, Y: 10.35000}, "x=10&y=10.35"}, + {dumb{W: v, X: 10, Y: 10.35000}, f32QueryString}, + {dumb{X: 10, Y: 10.35000, Z: 10}, "x=10&y=10.35&zee=10"}, + {dumb{v: 4, X: 10, Y: 10.35000}, "x=10&y=10.35"}, + {dumb{T: 10, Y: 10.35000}, "y=10.35"}, + {dumb{Person: &person{Name: "gopher", Age: 4}}, "p=" + jsonPerson}, + {nil, ""}, + {10, ""}, + {"not_a_struct", ""}, + } + for _, tt := range tests { + got := queryString(tt.input) + if got != tt.want { + t.Errorf("queryString(%v). Want %q. Got %q.", tt.input, tt.want, got) + } + } +} + +func TestNewApiVersionFailures(t *testing.T) { + var tests = []struct { + input string + expectedError string + }{ + {"1-0", `Unable to parse version "1-0"`}, + {"1.0-beta", `Unable to parse version "1.0-beta": "0-beta" is not an integer`}, + } + for _, tt := range tests { + v, err := NewApiVersion(tt.input) + if v != nil { + t.Errorf("Expected version, got %v.", v) + } + if err.Error() != tt.expectedError { + t.Errorf("NewApiVersion(%q): wrong error. Want %q. Got %q", tt.input, tt.expectedError, err.Error()) + } + } +} + +func TestApiVersions(t *testing.T) { + var tests = []struct { + a string + b string + expectedALessThanB bool + expectedALessThanOrEqualToB bool + expectedAGreaterThanB bool + expectedAGreaterThanOrEqualToB bool + }{ + {"1.11", "1.11", false, true, false, true}, + {"1.10", "1.11", true, true, false, false}, + {"1.11", "1.10", false, false, true, true}, + + {"1.9", "1.11", true, true, false, false}, + {"1.11", "1.9", false, false, true, true}, + + {"1.1.1", "1.1", false, false, true, true}, + {"1.1", "1.1.1", true, true, false, false}, + + {"2.1", "1.1.1", false, false, true, true}, + {"2.1", "1.3.1", false, false, true, true}, + {"1.1.1", "2.1", true, true, false, false}, + {"1.3.1", "2.1", true, true, false, false}, + } + + for _, tt := range tests { + a, _ := NewApiVersion(tt.a) + b, _ := NewApiVersion(tt.b) + + if tt.expectedALessThanB && !a.LessThan(b) { + t.Errorf("Expected %#v < %#v", a, b) + } + if tt.expectedALessThanOrEqualToB && !a.LessThanOrEqualTo(b) { + t.Errorf("Expected %#v <= %#v", a, b) + } + if tt.expectedAGreaterThanB && !a.GreaterThan(b) { + t.Errorf("Expected %#v > %#v", a, b) + } + if tt.expectedAGreaterThanOrEqualToB && !a.GreaterThanOrEqualTo(b) { + t.Errorf("Expected %#v >= %#v", a, b) + } + } +} + +func TestPing(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + err := client.Ping() + if err != nil { + t.Fatal(err) + } +} + +func TestPingFailing(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusInternalServerError} + client := newTestClient(fakeRT) + err := client.Ping() + if err == nil { + t.Fatal("Expected non nil error, got nil") + } + expectedErrMsg := "API error (500): " + if err.Error() != expectedErrMsg { + t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error()) + } +} + +func TestPingFailingWrongStatus(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusAccepted} + client := newTestClient(fakeRT) + err := client.Ping() + if err == nil { + t.Fatal("Expected non nil error, got nil") + } + expectedErrMsg := "API error (202): " + if err.Error() != expectedErrMsg { + t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error()) + } +} + +type FakeRoundTripper struct { + message string + status int + requests []*http.Request +} + +func (rt *FakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + body := strings.NewReader(rt.message) + rt.requests = append(rt.requests, r) + return &http.Response{ + StatusCode: rt.status, + Body: ioutil.NopCloser(body), + }, nil +} + +func (rt *FakeRoundTripper) Reset() { + rt.requests = nil +} + +type person struct { + Name string + Age int `json:"age"` +} + +type dumb struct { + T int `qs:"-"` + v int + W float32 + X int + Y float64 + Z int `qs:"zee"` + Person *person `qs:"p"` +} + +type fakeEndpointURL struct { + Scheme string +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go new file mode 100644 index 00000000..25e1fad5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go @@ -0,0 +1,633 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// ListContainersOptions specify parameters to the ListContainers function. +// +// See http://goo.gl/QpCnDN for more details. +type ListContainersOptions struct { + All bool + Size bool + Limit int + Since string + Before string +} + +type APIPort struct { + PrivatePort int64 + PublicPort int64 + Type string + IP string +} + +// APIContainers represents a container. +// +// See http://goo.gl/QeFH7U for more details. +type APIContainers struct { + ID string `json:"Id"` + Image string + Command string + Created int64 + Status string + Ports []APIPort + SizeRw int64 + SizeRootFs int64 + Names []string +} + +// ListContainers returns a slice of containers matching the given criteria. +// +// See http://goo.gl/QpCnDN for more details. +func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) { + path := "/containers/json?" + queryString(opts) + body, _, err := c.do("GET", path, nil) + if err != nil { + return nil, err + } + var containers []APIContainers + err = json.Unmarshal(body, &containers) + if err != nil { + return nil, err + } + return containers, nil +} + +// Port represents the port number and the protocol, in the form +// /. For example: 80/tcp. +type Port string + +// Port returns the number of the port. +func (p Port) Port() string { + return strings.Split(string(p), "/")[0] +} + +// Proto returns the name of the protocol. +func (p Port) Proto() string { + parts := strings.Split(string(p), "/") + if len(parts) == 1 { + return "tcp" + } + return parts[1] +} + +// State represents the state of a container. +type State struct { + Running bool + Paused bool + Pid int + ExitCode int + StartedAt time.Time + FinishedAt time.Time +} + +// String returns the string representation of a state. +func (s *State) String() string { + if s.Running { + if s.Paused { + return "paused" + } + return fmt.Sprintf("Up %s", time.Now().UTC().Sub(s.StartedAt)) + } + return fmt.Sprintf("Exit %d", s.ExitCode) +} + +type PortBinding struct { + HostIp string + HostPort string +} + +type PortMapping map[string]string + +type NetworkSettings struct { + IPAddress string + IPPrefixLen int + Gateway string + Bridge string + PortMapping map[string]PortMapping + Ports map[Port][]PortBinding +} + +func (settings *NetworkSettings) PortMappingAPI() []APIPort { + var mapping []APIPort + for port, bindings := range settings.Ports { + p, _ := parsePort(port.Port()) + if len(bindings) == 0 { + mapping = append(mapping, APIPort{ + PublicPort: int64(p), + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + p, _ := parsePort(port.Port()) + h, _ := parsePort(binding.HostPort) + mapping = append(mapping, APIPort{ + PrivatePort: int64(p), + PublicPort: int64(h), + Type: port.Proto(), + IP: binding.HostIp, + }) + } + } + return mapping +} + +func parsePort(rawPort string) (int, error) { + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +type Config struct { + Hostname string + Domainname string + User string + Memory uint64 + MemorySwap uint64 + CpuShares int64 + AttachStdin bool + AttachStdout bool + AttachStderr bool + PortSpecs []string + ExposedPorts map[Port]struct{} + Tty bool + OpenStdin bool + StdinOnce bool + Env []string + Cmd []string + Dns []string // For Docker API v1.9 and below only + Image string + Volumes map[string]struct{} + VolumesFrom string + WorkingDir string + Entrypoint []string + NetworkDisabled bool +} + +type Container struct { + ID string + + Created time.Time + + Path string + Args []string + + Config *Config + State State + Image string + + NetworkSettings *NetworkSettings + + SysInitPath string + ResolvConfPath string + HostnamePath string + HostsPath string + Name string + Driver string + + Volumes map[string]string + VolumesRW map[string]bool + HostConfig *HostConfig +} + +// InspectContainer returns information about a container by its ID. +// +// See http://goo.gl/2o52Sx for more details. +func (c *Client) InspectContainer(id string) (*Container, error) { + path := "/containers/" + id + "/json" + body, status, err := c.do("GET", path, nil) + if status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: id} + } + if err != nil { + return nil, err + } + var container Container + err = json.Unmarshal(body, &container) + if err != nil { + return nil, err + } + return &container, nil +} + +// ContainerChanges returns changes in the filesystem of the given container. +// +// See http://goo.gl/DpGyzK for more details. +func (c *Client) ContainerChanges(id string) ([]Change, error) { + path := "/containers/" + id + "/changes" + body, status, err := c.do("GET", path, nil) + if status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: id} + } + if err != nil { + return nil, err + } + var changes []Change + err = json.Unmarshal(body, &changes) + if err != nil { + return nil, err + } + return changes, nil +} + +// CreateContainerOptions specify parameters to the CreateContainer function. +// +// See http://goo.gl/WPPYtB for more details. +type CreateContainerOptions struct { + Name string + Config *Config `qs:"-"` +} + +// CreateContainer creates a new container, returning the container instance, +// or an error in case of failure. +// +// See http://goo.gl/tjihUc for more details. +func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) { + path := "/containers/create?" + queryString(opts) + body, status, err := c.do("POST", path, opts.Config) + if status == http.StatusNotFound { + return nil, ErrNoSuchImage + } + if err != nil { + return nil, err + } + var container Container + err = json.Unmarshal(body, &container) + if err != nil { + return nil, err + } + + container.Name = opts.Name + + return &container, nil +} + +type KeyValuePair struct { + Key string + Value string +} + +type HostConfig struct { + Binds []string + ContainerIDFile string + LxcConf []KeyValuePair + Privileged bool + PortBindings map[Port][]PortBinding + Links []string + PublishAllPorts bool + Dns []string // For Docker API v1.10 and above only + DnsSearch []string + VolumesFrom []string + NetworkMode string +} + +// StartContainer starts a container, returning an errror in case of failure. +// +// See http://goo.gl/y5GZlE for more details. +func (c *Client) StartContainer(id string, hostConfig *HostConfig) error { + if hostConfig == nil { + hostConfig = &HostConfig{} + } + path := "/containers/" + id + "/start" + _, status, err := c.do("POST", path, hostConfig) + if status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + if err != nil { + return err + } + return nil +} + +// StopContainer stops a container, killing it after the given timeout (in +// seconds). +// +// See http://goo.gl/X2mj8t for more details. +func (c *Client) StopContainer(id string, timeout uint) error { + path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout) + _, status, err := c.do("POST", path, nil) + if status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + if err != nil { + return err + } + return nil +} + +// RestartContainer stops a container, killing it after the given timeout (in +// seconds), during the stop process. +// +// See http://goo.gl/zms73Z for more details. +func (c *Client) RestartContainer(id string, timeout uint) error { + path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout) + _, status, err := c.do("POST", path, nil) + if status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + if err != nil { + return err + } + return nil +} + +// PauseContainer pauses the given container. +// +// See http://goo.gl/AM5t42 for more details. +func (c *Client) PauseContainer(id string) error { + path := fmt.Sprintf("/containers/%s/pause", id) + _, status, err := c.do("POST", path, nil) + if status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + if err != nil { + return err + } + return nil +} + +// UnpauseContainer pauses the given container. +// +// See http://goo.gl/eBrNSL for more details. +func (c *Client) UnpauseContainer(id string) error { + path := fmt.Sprintf("/containers/%s/unpause", id) + _, status, err := c.do("POST", path, nil) + if status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + if err != nil { + return err + } + return nil +} + +// KillContainerOptions represents the set of options that can be used in a +// call to KillContainer. +type KillContainerOptions struct { + // The ID of the container. + ID string `qs:"-"` + + // The signal to send to the container. When omitted, Docker server + // will assume SIGKILL. + Signal Signal +} + +// KillContainer kills a container, returning an error in case of failure. +// +// See http://goo.gl/DPbbBy for more details. +func (c *Client) KillContainer(opts KillContainerOptions) error { + path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts) + _, status, err := c.do("POST", path, nil) + if status == http.StatusNotFound { + return &NoSuchContainer{ID: opts.ID} + } + if err != nil { + return err + } + return nil +} + +// RemoveContainerOptions encapsulates options to remove a container. +type RemoveContainerOptions struct { + // The ID of the container. + ID string `qs:"-"` + + // A flag that indicates whether Docker should remove the volumes + // associated to the container. + RemoveVolumes bool `qs:"v"` + + // A flag that indicates whether Docker should remove the container + // even if it is currently running. + Force bool +} + +// RemoveContainer removes a container, returning an error in case of failure. +// +// See http://goo.gl/PBvGdU for more details. +func (c *Client) RemoveContainer(opts RemoveContainerOptions) error { + path := "/containers/" + opts.ID + "?" + queryString(opts) + _, status, err := c.do("DELETE", path, nil) + if status == http.StatusNotFound { + return &NoSuchContainer{ID: opts.ID} + } + if err != nil { + return err + } + return nil +} + +// CopyFromContainerOptions is the set of options that can be used when copying +// files or folders from a container. +// +// See http://goo.gl/mnxRMl for more details. +type CopyFromContainerOptions struct { + OutputStream io.Writer `json:"-"` + Container string `json:"-"` + Resource string +} + +// CopyFromContainer copy files or folders from a container, using a given +// resource. +// +// See http://goo.gl/mnxRMl for more details. +func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error { + if opts.Container == "" { + return &NoSuchContainer{ID: opts.Container} + } + url := fmt.Sprintf("/containers/%s/copy", opts.Container) + body, status, err := c.do("POST", url, opts) + if status == http.StatusNotFound { + return &NoSuchContainer{ID: opts.Container} + } + if err != nil { + return err + } + io.Copy(opts.OutputStream, bytes.NewBuffer(body)) + return nil +} + +// WaitContainer blocks until the given container stops, return the exit code +// of the container status. +// +// See http://goo.gl/gnHJL2 for more details. +func (c *Client) WaitContainer(id string) (int, error) { + body, status, err := c.do("POST", "/containers/"+id+"/wait", nil) + if status == http.StatusNotFound { + return 0, &NoSuchContainer{ID: id} + } + if err != nil { + return 0, err + } + var r struct{ StatusCode int } + err = json.Unmarshal(body, &r) + if err != nil { + return 0, err + } + return r.StatusCode, nil +} + +// CommitContainerOptions aggregates parameters to the CommitContainer method. +// +// See http://goo.gl/628gxm for more details. +type CommitContainerOptions struct { + Container string + Repository string `qs:"repo"` + Tag string + Message string `qs:"m"` + Author string + Run *Config `qs:"-"` +} + +// CommitContainer creates a new image from a container's changes. +// +// See http://goo.gl/628gxm for more details. +func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) { + path := "/commit?" + queryString(opts) + body, status, err := c.do("POST", path, opts.Run) + if status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: opts.Container} + } + if err != nil { + return nil, err + } + var image Image + err = json.Unmarshal(body, &image) + if err != nil { + return nil, err + } + return &image, nil +} + +// AttachToContainerOptions is the set of options that can be used when +// attaching to a container. +// +// See http://goo.gl/oPzcqH for more details. +type AttachToContainerOptions struct { + Container string `qs:"-"` + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + ErrorStream io.Writer `qs:"-"` + + // Get container logs, sending it to OutputStream. + Logs bool + + // Stream the response? + Stream bool + + // Attach to stdin, and use InputStream. + Stdin bool + + // Attach to stdout, and use OutputStream. + Stdout bool + + // Attach to stderr, and use ErrorStream. + Stderr bool + + // If set, after a successful connect, a sentinel will be sent and then the + // client will block on receive before continuing. + // + // It must be an unbuffered channel. Using a buffered channel can lead + // to unexpected behavior. + Success chan struct{} + + // Use raw terminal? Usually true when the container contains a TTY. + RawTerminal bool `qs:"-"` +} + +// AttachToContainer attaches to a container, using the given options. +// +// See http://goo.gl/oPzcqH for more details. +func (c *Client) AttachToContainer(opts AttachToContainerOptions) error { + if opts.Container == "" { + return &NoSuchContainer{ID: opts.Container} + } + path := "/containers/" + opts.Container + "/attach?" + queryString(opts) + return c.hijack("POST", path, opts.Success, opts.RawTerminal, opts.InputStream, opts.ErrorStream, opts.OutputStream) +} + +// LogsOptions represents the set of options used when getting logs from a +// container. +// +// See http://goo.gl/rLhKSU for more details. +type LogsOptions struct { + Container string `qs:"-"` + OutputStream io.Writer `qs:"-"` + ErrorStream io.Writer `qs:"-"` + Follow bool + Stdout bool + Stderr bool + Timestamps bool + Tail string + + // Use raw terminal? Usually true when the container contains a TTY. + RawTerminal bool `qs:"-"` +} + +// Logs gets stdout and stderr logs from the specified container. +// +// See http://goo.gl/rLhKSU for more details. +func (c *Client) Logs(opts LogsOptions) error { + if opts.Container == "" { + return &NoSuchContainer{ID: opts.Container} + } + if opts.Tail == "" { + opts.Tail = "all" + } + path := "/containers/" + opts.Container + "/logs?" + queryString(opts) + return c.stream("GET", path, opts.RawTerminal, nil, nil, opts.OutputStream, opts.ErrorStream) +} + +// ResizeContainerTTY resizes the terminal to the given height and width. +func (c *Client) ResizeContainerTTY(id string, height, width int) error { + params := make(url.Values) + params.Set("h", strconv.Itoa(height)) + params.Set("w", strconv.Itoa(width)) + _, _, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), nil) + return err +} + +// ExportContainerOptions is the set of parameters to the ExportContainer +// method. +// +// See http://goo.gl/Lqk0FZ for more details. +type ExportContainerOptions struct { + ID string + OutputStream io.Writer +} + +// ExportContainer export the contents of container id as tar archive +// and prints the exported contents to stdout. +// +// See http://goo.gl/Lqk0FZ for more details. +func (c *Client) ExportContainer(opts ExportContainerOptions) error { + if opts.ID == "" { + return NoSuchContainer{ID: opts.ID} + } + url := fmt.Sprintf("/containers/%s/export", opts.ID) + return c.stream("GET", url, true, nil, nil, opts.OutputStream, nil) +} + +// NoSuchContainer is the error returned when a given container does not exist. +type NoSuchContainer struct { + ID string +} + +func (err NoSuchContainer) Error() string { + return "No such container: " + err.ID +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go new file mode 100644 index 00000000..fbe9d1f7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go @@ -0,0 +1,1281 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "testing" + "time" +) + +func TestStateString(t *testing.T) { + started := time.Now().Add(-3 * time.Hour) + var tests = []struct { + input State + expected string + }{ + {State{Running: true, Paused: true}, "^paused$"}, + {State{Running: true, StartedAt: started}, "^Up 3h.*$"}, + {State{Running: false, ExitCode: 7}, "^Exit 7$"}, + } + for _, tt := range tests { + re := regexp.MustCompile(tt.expected) + if got := tt.input.String(); !re.MatchString(got) { + t.Errorf("State.String(): wrong result. Want %q. Got %q.", tt.expected, got) + } + } +} + +func TestListContainers(t *testing.T) { + jsonContainers := `[ + { + "Id": "8dfafdbc3a40", + "Image": "base:latest", + "Command": "echo 1", + "Created": 1367854155, + "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Status": "Exit 0" + }, + { + "Id": "9cd87474be90", + "Image": "base:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Status": "Exit 0" + }, + { + "Id": "3176a2479c92", + "Image": "base:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Ports":[{"PrivatePort": 2221, "PublicPort": 3331, "Type": "tcp"}], + "Status": "Exit 0" + }, + { + "Id": "4cb07b47f9fb", + "Image": "base:latest", + "Command": "echo 444444444444444444444444444444444", + "Ports":[{"PrivatePort": 2223, "PublicPort": 3332, "Type": "tcp"}], + "Created": 1367854152, + "Status": "Exit 0" + } +]` + var expected []APIContainers + err := json.Unmarshal([]byte(jsonContainers), &expected) + if err != nil { + t.Fatal(err) + } + client := newTestClient(&FakeRoundTripper{message: jsonContainers, status: http.StatusOK}) + containers, err := client.ListContainers(ListContainersOptions{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(containers, expected) { + t.Errorf("ListContainers: Expected %#v. Got %#v.", expected, containers) + } +} + +func TestListContainersParams(t *testing.T) { + var tests = []struct { + input ListContainersOptions + params map[string][]string + }{ + {ListContainersOptions{}, map[string][]string{}}, + {ListContainersOptions{All: true}, map[string][]string{"all": {"1"}}}, + {ListContainersOptions{All: true, Limit: 10}, map[string][]string{"all": {"1"}, "limit": {"10"}}}, + { + ListContainersOptions{All: true, Limit: 10, Since: "adf9983", Before: "abdeef"}, + map[string][]string{"all": {"1"}, "limit": {"10"}, "since": {"adf9983"}, "before": {"abdeef"}}, + }, + } + fakeRT := &FakeRoundTripper{message: "[]", status: http.StatusOK} + client := newTestClient(fakeRT) + u, _ := url.Parse(client.getURL("/containers/json")) + for _, tt := range tests { + client.ListContainers(tt.input) + got := map[string][]string(fakeRT.requests[0].URL.Query()) + if !reflect.DeepEqual(got, tt.params) { + t.Errorf("Expected %#v, got %#v.", tt.params, got) + } + if path := fakeRT.requests[0].URL.Path; path != u.Path { + t.Errorf("Wrong path on request. Want %q. Got %q.", u.Path, path) + } + if meth := fakeRT.requests[0].Method; meth != "GET" { + t.Errorf("Wrong HTTP method. Want GET. Got %s.", meth) + } + fakeRT.Reset() + } +} + +func TestListContainersFailure(t *testing.T) { + var tests = []struct { + status int + message string + }{ + {400, "bad parameter"}, + {500, "internal server error"}, + } + for _, tt := range tests { + client := newTestClient(&FakeRoundTripper{message: tt.message, status: tt.status}) + expected := Error{Status: tt.status, Message: tt.message} + containers, err := client.ListContainers(ListContainersOptions{}) + if !reflect.DeepEqual(expected, *err.(*Error)) { + t.Errorf("Wrong error in ListContainers. Want %#v. Got %#v.", expected, err) + } + if len(containers) > 0 { + t.Errorf("ListContainers failure. Expected empty list. Got %#v.", containers) + } + } +} + +func TestInspectContainer(t *testing.T) { + jsonContainer := `{ + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.087658+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 17179869184, + "MemorySwap": 34359738368, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Image": "base", + "Volumes": {}, + "VolumesFrom": "" + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:00", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": null, + "PublishAllPorts": false + } +}` + var expected Container + err := json.Unmarshal([]byte(jsonContainer), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c678" + container, err := client.InspectContainer(id) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(*container, expected) { + t.Errorf("InspectContainer(%q): Expected %#v. Got %#v.", id, expected, container) + } + expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/json")) + if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { + t.Errorf("InspectContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestInspectContainerFailure(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "server error", status: 500}) + expected := Error{Status: 500, Message: "server error"} + container, err := client.InspectContainer("abe033") + if container != nil { + t.Errorf("InspectContainer: Expected container, got %#v", container) + } + if !reflect.DeepEqual(expected, *err.(*Error)) { + t.Errorf("InspectContainer: Wrong error information. Want %#v. Got %#v.", expected, err) + } +} + +func TestInspectContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: 404}) + container, err := client.InspectContainer("abe033") + if container != nil { + t.Errorf("InspectContainer: Expected container, got %#v", container) + } + expected := &NoSuchContainer{ID: "abe033"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("InspectContainer: Wrong error information. Want %#v. Got %#v.", expected, err) + } +} + +func TestContainerChanges(t *testing.T) { + jsonChanges := `[ + { + "Path":"/dev", + "Kind":0 + }, + { + "Path":"/dev/kmsg", + "Kind":1 + }, + { + "Path":"/test", + "Kind":1 + } +]` + var expected []Change + err := json.Unmarshal([]byte(jsonChanges), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonChanges, status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c678" + changes, err := client.ContainerChanges(id) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(changes, expected) { + t.Errorf("ContainerChanges(%q): Expected %#v. Got %#v.", id, expected, changes) + } + expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/changes")) + if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { + t.Errorf("ContainerChanges(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestContainerChangesFailure(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "server error", status: 500}) + expected := Error{Status: 500, Message: "server error"} + changes, err := client.ContainerChanges("abe033") + if changes != nil { + t.Errorf("ContainerChanges: Expected changes, got %#v", changes) + } + if !reflect.DeepEqual(expected, *err.(*Error)) { + t.Errorf("ContainerChanges: Wrong error information. Want %#v. Got %#v.", expected, err) + } +} + +func TestContainerChangesNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: 404}) + changes, err := client.ContainerChanges("abe033") + if changes != nil { + t.Errorf("ContainerChanges: Expected changes, got %#v", changes) + } + expected := &NoSuchContainer{ID: "abe033"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("ContainerChanges: Wrong error information. Want %#v. Got %#v.", expected, err) + } +} + +func TestCreateContainer(t *testing.T) { + jsonContainer := `{ + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Warnings": [] +}` + var expected Container + err := json.Unmarshal([]byte(jsonContainer), &expected) + if err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} + client := newTestClient(fakeRT) + config := Config{AttachStdout: true, AttachStdin: true} + opts := CreateContainerOptions{Name: "TestCreateContainer", Config: &config} + container, err := client.CreateContainer(opts) + if err != nil { + t.Fatal(err) + } + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + if container.ID != id { + t.Errorf("CreateContainer: wrong ID. Want %q. Got %q.", id, container.ID) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("CreateContainer: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/create")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("CreateContainer: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) + } + var gotBody Config + err = json.NewDecoder(req.Body).Decode(&gotBody) + if err != nil { + t.Fatal(err) + } +} + +func TestCreateContainerImageNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "No such image", status: http.StatusNotFound}) + config := Config{AttachStdout: true, AttachStdin: true} + container, err := client.CreateContainer(CreateContainerOptions{Config: &config}) + if container != nil { + t.Errorf("CreateContainer: expected container, got %#v.", container) + } + if !reflect.DeepEqual(err, ErrNoSuchImage) { + t.Errorf("CreateContainer: Wrong error type. Want %#v. Got %#v.", ErrNoSuchImage, err) + } +} + +func TestStartContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.StartContainer(id, &HostConfig{}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } + expectedContentType := "application/json" + if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { + t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType) + } +} + +func TestStartContainerNilHostConfig(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.StartContainer(id, nil) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } + expectedContentType := "application/json" + if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { + t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType) + } +} + +func TestStartContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.StartContainer("a2344", &HostConfig{}) + expected := &NoSuchContainer{ID: "a2344"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("StartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestStopContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.StopContainer(id, 10) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("StopContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/stop")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("StopContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestStopContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.StopContainer("a2334", 10) + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestRestartContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.RestartContainer(id, 10) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("RestartContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/restart")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("RestartContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestRestartContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.RestartContainer("a2334", 10) + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("RestartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestPauseContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.PauseContainer(id) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("PauseContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/pause")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("PauseContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestPauseContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.PauseContainer("a2334") + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("PauseContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestUnpauseContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.UnpauseContainer(id) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("PauseContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/unpause")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("PauseContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestUnpauseContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.UnpauseContainer("a2334") + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("PauseContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestKillContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.KillContainer(KillContainerOptions{ID: id}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("KillContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/kill")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("KillContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestKillContainerSignal(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.KillContainer(KillContainerOptions{ID: id, Signal: SIGTERM}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("KillContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + if signal := req.URL.Query().Get("signal"); signal != "15" { + t.Errorf("KillContainer(%q): Wrong query string in request. Want %q. Got %q.", id, "15", signal) + } +} + +func TestKillContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.KillContainer(KillContainerOptions{ID: "a2334"}) + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("KillContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestRemoveContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + opts := RemoveContainerOptions{ID: id} + err := client.RemoveContainer(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "DELETE" { + t.Errorf("RemoveContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "DELETE", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id)) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("RemoveContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestRemoveContainerRemoveVolumes(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + opts := RemoveContainerOptions{ID: id, RemoveVolumes: true} + err := client.RemoveContainer(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + params := map[string][]string(req.URL.Query()) + expected := map[string][]string{"v": {"1"}} + if !reflect.DeepEqual(params, expected) { + t.Errorf("RemoveContainer(%q): wrong parameters. Want %#v. Got %#v.", id, expected, params) + } +} + +func TestRemoveContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + err := client.RemoveContainer(RemoveContainerOptions{ID: "a2334"}) + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("RemoveContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestResizeContainerTTY(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + err := client.ResizeContainerTTY(id, 40, 80) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("ResizeContainerTTY(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/resize")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("ResizeContainerTTY(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } + got := map[string][]string(req.URL.Query()) + expectedParams := map[string][]string{ + "w": {"80"}, + "h": {"40"}, + } + if !reflect.DeepEqual(got, expectedParams) { + t.Errorf("Expected %#v, got %#v.", expectedParams, got) + } +} + +func TestWaitContainer(t *testing.T) { + fakeRT := &FakeRoundTripper{message: `{"StatusCode": 56}`, status: http.StatusOK} + client := newTestClient(fakeRT) + id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" + status, err := client.WaitContainer(id) + if err != nil { + t.Fatal(err) + } + if status != 56 { + t.Errorf("WaitContainer(%q): wrong return. Want 56. Got %d.", id, status) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("WaitContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) + } + expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/wait")) + if gotPath := req.URL.Path; gotPath != expectedURL.Path { + t.Errorf("WaitContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) + } +} + +func TestWaitContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + _, err := client.WaitContainer("a2334") + expected := &NoSuchContainer{ID: "a2334"} + if !reflect.DeepEqual(err, expected) { + t.Errorf("WaitContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestCommitContainer(t *testing.T) { + response := `{"Id":"596069db4bf5"}` + client := newTestClient(&FakeRoundTripper{message: response, status: http.StatusOK}) + id := "596069db4bf5" + image, err := client.CommitContainer(CommitContainerOptions{}) + if err != nil { + t.Fatal(err) + } + if image.ID != id { + t.Errorf("CommitContainer: Wrong image id. Want %q. Got %q.", id, image.ID) + } +} + +func TestCommitContainerParams(t *testing.T) { + cfg := Config{Memory: 67108864} + json, _ := json.Marshal(&cfg) + var tests = []struct { + input CommitContainerOptions + params map[string][]string + body []byte + }{ + {CommitContainerOptions{}, map[string][]string{}, nil}, + {CommitContainerOptions{Container: "44c004db4b17"}, map[string][]string{"container": {"44c004db4b17"}}, nil}, + { + CommitContainerOptions{Container: "44c004db4b17", Repository: "tsuru/python", Message: "something"}, + map[string][]string{"container": {"44c004db4b17"}, "repo": {"tsuru/python"}, "m": {"something"}}, + nil, + }, + { + CommitContainerOptions{Container: "44c004db4b17", Run: &cfg}, + map[string][]string{"container": {"44c004db4b17"}}, + json, + }, + } + fakeRT := &FakeRoundTripper{message: "[]", status: http.StatusOK} + client := newTestClient(fakeRT) + u, _ := url.Parse(client.getURL("/commit")) + for _, tt := range tests { + client.CommitContainer(tt.input) + got := map[string][]string(fakeRT.requests[0].URL.Query()) + if !reflect.DeepEqual(got, tt.params) { + t.Errorf("Expected %#v, got %#v.", tt.params, got) + } + if path := fakeRT.requests[0].URL.Path; path != u.Path { + t.Errorf("Wrong path on request. Want %q. Got %q.", u.Path, path) + } + if meth := fakeRT.requests[0].Method; meth != "POST" { + t.Errorf("Wrong HTTP method. Want POST. Got %s.", meth) + } + if tt.body != nil { + if requestBody, err := ioutil.ReadAll(fakeRT.requests[0].Body); err == nil { + if bytes.Compare(requestBody, tt.body) != 0 { + t.Errorf("Expected body %#v, got %#v", tt.body, requestBody) + } + } else { + t.Errorf("Error reading request body: %#v", err) + } + } + fakeRT.Reset() + } +} + +func TestCommitContainerFailure(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusInternalServerError}) + _, err := client.CommitContainer(CommitContainerOptions{}) + if err == nil { + t.Error("Expected non-nil error, got .") + } +} + +func TestCommitContainerNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) + _, err := client.CommitContainer(CommitContainerOptions{}) + expected := &NoSuchContainer{ID: ""} + if !reflect.DeepEqual(err, expected) { + t.Errorf("CommitContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) + } +} + +func TestAttachToContainerLogs(t *testing.T) { + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 19}) + w.Write([]byte("something happened!")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var buf bytes.Buffer + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: &buf, + Stdout: true, + Stderr: true, + Logs: true, + } + err := client.AttachToContainer(opts) + if err != nil { + t.Fatal(err) + } + expected := "something happened!" + if buf.String() != expected { + t.Errorf("AttachToContainer for logs: wrong output. Want %q. Got %q.", expected, buf.String()) + } + if req.Method != "POST" { + t.Errorf("AttachToContainer: wrong HTTP method. Want POST. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/containers/a123456/attach")) + if req.URL.Path != u.Path { + t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) + } + expectedQs := map[string][]string{ + "logs": {"1"}, + "stdout": {"1"}, + "stderr": {"1"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expectedQs) { + t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expectedQs, got) + } +} + +func TestAttachToContainer(t *testing.T) { + var reader = strings.NewReader("send value") + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) + w.Write([]byte("hello")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stdout, stderr bytes.Buffer + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: &stdout, + ErrorStream: &stderr, + InputStream: reader, + Stdin: true, + Stdout: true, + Stderr: true, + Stream: true, + RawTerminal: true, + } + err := client.AttachToContainer(opts) + if err != nil { + t.Fatal(err) + } + expected := map[string][]string{ + "stdin": {"1"}, + "stdout": {"1"}, + "stderr": {"1"}, + "stream": {"1"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestAttachToContainerSentinel(t *testing.T) { + var reader = strings.NewReader("send value") + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) + w.Write([]byte("hello")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stdout, stderr bytes.Buffer + success := make(chan struct{}) + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: &stdout, + ErrorStream: &stderr, + InputStream: reader, + Stdin: true, + Stdout: true, + Stderr: true, + Stream: true, + RawTerminal: true, + Success: success, + } + go client.AttachToContainer(opts) + success <- <-success +} + +func TestAttachToContainerNilStdout(t *testing.T) { + var reader = strings.NewReader("send value") + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) + w.Write([]byte("hello")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stderr bytes.Buffer + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: nil, + ErrorStream: &stderr, + InputStream: reader, + Stdin: true, + Stdout: true, + Stderr: true, + Stream: true, + RawTerminal: true, + } + err := client.AttachToContainer(opts) + if err != nil { + t.Fatal(err) + } +} + +func TestAttachToContainerNilStderr(t *testing.T) { + var reader = strings.NewReader("send value") + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) + w.Write([]byte("hello")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stdout bytes.Buffer + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: &stdout, + InputStream: reader, + Stdin: true, + Stdout: true, + Stderr: true, + Stream: true, + RawTerminal: true, + } + err := client.AttachToContainer(opts) + if err != nil { + t.Fatal(err) + } +} + +func TestAttachToContainerRawTerminalFalse(t *testing.T) { + input := strings.NewReader("send value") + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + prefix := []byte{1, 0, 0, 0, 0, 0, 0, 5} + w.Write(prefix) + w.Write([]byte("hello")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var stdout, stderr bytes.Buffer + opts := AttachToContainerOptions{ + Container: "a123456", + OutputStream: &stdout, + ErrorStream: &stderr, + InputStream: input, + Stdin: true, + Stdout: true, + Stderr: true, + Stream: true, + RawTerminal: false, + } + err := client.AttachToContainer(opts) + if err != nil { + t.Fatal(err) + } + expected := map[string][]string{ + "stdin": {"1"}, + "stdout": {"1"}, + "stderr": {"1"}, + "stream": {"1"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expected, got) + } + t.Log(stderr.String()) + t.Log(stdout.String()) + if stdout.String() != "hello" { + t.Errorf("AttachToContainer: wrong content written to stdout. Want %q. Got %q.", "hello", stderr.String()) + } +} + +func TestAttachToContainerWithoutContainer(t *testing.T) { + var client Client + err := client.AttachToContainer(AttachToContainerOptions{}) + expected := &NoSuchContainer{ID: ""} + if !reflect.DeepEqual(err, expected) { + t.Errorf("AttachToContainer: wrong error. Want %#v. Got %#v.", expected, err) + } +} + +func TestLogs(t *testing.T) { + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} + w.Write(prefix) + w.Write([]byte("something happened!")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var buf bytes.Buffer + opts := LogsOptions{ + Container: "a123456", + OutputStream: &buf, + Follow: true, + Stdout: true, + Stderr: true, + Timestamps: true, + } + err := client.Logs(opts) + if err != nil { + t.Fatal(err) + } + expected := "something happened!" + if buf.String() != expected { + t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) + } + if req.Method != "GET" { + t.Errorf("Logs: wrong HTTP method. Want GET. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/containers/a123456/logs")) + if req.URL.Path != u.Path { + t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) + } + expectedQs := map[string][]string{ + "follow": {"1"}, + "stdout": {"1"}, + "stderr": {"1"}, + "timestamps": {"1"}, + "tail": {"all"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expectedQs) { + t.Errorf("Logs: wrong query string. Want %#v. Got %#v.", expectedQs, got) + } +} + +func TestLogsNilStdoutDoesntFail(t *testing.T) { + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} + w.Write(prefix) + w.Write([]byte("something happened!")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + opts := LogsOptions{ + Container: "a123456", + Follow: true, + Stdout: true, + Stderr: true, + Timestamps: true, + } + err := client.Logs(opts) + if err != nil { + t.Fatal(err) + } +} + +func TestLogsNilStderrDoesntFail(t *testing.T) { + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + prefix := []byte{2, 0, 0, 0, 0, 0, 0, 19} + w.Write(prefix) + w.Write([]byte("something happened!")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + opts := LogsOptions{ + Container: "a123456", + Follow: true, + Stdout: true, + Stderr: true, + Timestamps: true, + } + err := client.Logs(opts) + if err != nil { + t.Fatal(err) + } +} + +func TestLogsSpecifyingTail(t *testing.T) { + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} + w.Write(prefix) + w.Write([]byte("something happened!")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var buf bytes.Buffer + opts := LogsOptions{ + Container: "a123456", + OutputStream: &buf, + Follow: true, + Stdout: true, + Stderr: true, + Timestamps: true, + Tail: "100", + } + err := client.Logs(opts) + if err != nil { + t.Fatal(err) + } + expected := "something happened!" + if buf.String() != expected { + t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) + } + if req.Method != "GET" { + t.Errorf("Logs: wrong HTTP method. Want GET. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/containers/a123456/logs")) + if req.URL.Path != u.Path { + t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) + } + expectedQs := map[string][]string{ + "follow": {"1"}, + "stdout": {"1"}, + "stderr": {"1"}, + "timestamps": {"1"}, + "tail": {"100"}, + } + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expectedQs) { + t.Errorf("Logs: wrong query string. Want %#v. Got %#v.", expectedQs, got) + } +} + +func TestLogsRawTerminal(t *testing.T) { + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("something happened!")) + req = *r + })) + defer server.Close() + client, _ := NewClient(server.URL) + client.SkipServerVersionCheck = true + var buf bytes.Buffer + opts := LogsOptions{ + Container: "a123456", + OutputStream: &buf, + Follow: true, + RawTerminal: true, + Stdout: true, + Stderr: true, + Timestamps: true, + Tail: "100", + } + err := client.Logs(opts) + if err != nil { + t.Fatal(err) + } + expected := "something happened!" + if buf.String() != expected { + t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) + } +} + +func TestLogsNoContainer(t *testing.T) { + var client Client + err := client.Logs(LogsOptions{}) + expected := &NoSuchContainer{ID: ""} + if !reflect.DeepEqual(err, expected) { + t.Errorf("AttachToContainer: wrong error. Want %#v. Got %#v.", expected, err) + } +} + +func TestNoSuchContainerError(t *testing.T) { + var err error = &NoSuchContainer{ID: "i345"} + expected := "No such container: i345" + if got := err.Error(); got != expected { + t.Errorf("NoSuchContainer: wrong message. Want %q. Got %q.", expected, got) + } +} + +func TestExportContainer(t *testing.T) { + content := "exported container tar content" + out := stdoutMock{bytes.NewBufferString(content)} + client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) + opts := ExportContainerOptions{ID: "4fa6e0f0c678", OutputStream: out} + err := client.ExportContainer(opts) + if err != nil { + t.Errorf("ExportContainer: caugh error %#v while exporting container, expected nil", err.Error()) + } + if out.String() != content { + t.Errorf("ExportContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) + } +} + +func TestExportContainerViaUnixSocket(t *testing.T) { + if runtime.GOOS != "darwin" { + t.Skip("skipping test on %q", runtime.GOOS) + } + content := "exported container tar content" + var buf []byte + out := bytes.NewBuffer(buf) + tempSocket := tempfile("export_socket") + defer os.Remove(tempSocket) + endpoint := "unix://" + tempSocket + u, _ := parseEndpoint(endpoint) + client := Client{ + endpoint: endpoint, + endpointURL: u, + client: http.DefaultClient, + SkipServerVersionCheck: true, + } + listening := make(chan string) + done := make(chan int) + go runStreamConnServer(t, "unix", tempSocket, listening, done) + <-listening // wait for server to start + opts := ExportContainerOptions{ID: "4fa6e0f0c678", OutputStream: out} + err := client.ExportContainer(opts) + <-done // make sure server stopped + if err != nil { + t.Errorf("ExportContainer: caugh error %#v while exporting container, expected nil", err.Error()) + } + if out.String() != content { + t.Errorf("ExportContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) + } +} + +func runStreamConnServer(t *testing.T, network, laddr string, listening chan<- string, done chan<- int) { + defer close(done) + l, err := net.Listen(network, laddr) + if err != nil { + t.Errorf("Listen(%q, %q) failed: %v", network, laddr, err) + listening <- "" + return + } + defer l.Close() + listening <- l.Addr().String() + c, err := l.Accept() + if err != nil { + t.Logf("Accept failed: %v", err) + return + } + c.Write([]byte("HTTP/1.1 200 OK\n\nexported container tar content")) + c.Close() +} + +func tempfile(filename string) string { + return os.TempDir() + "/" + filename + "." + strconv.Itoa(os.Getpid()) +} + +func TestExportContainerNoId(t *testing.T) { + client := Client{} + out := stdoutMock{bytes.NewBufferString("")} + err := client.ExportContainer(ExportContainerOptions{OutputStream: out}) + if err != (NoSuchContainer{}) { + t.Errorf("ExportContainer: wrong error. Want %#v. Got %#v.", NoSuchContainer{}, err) + } +} + +func TestCopyFromContainer(t *testing.T) { + content := "File content" + out := stdoutMock{bytes.NewBufferString(content)} + client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) + opts := CopyFromContainerOptions{ + Container: "a123456", + OutputStream: out, + } + err := client.CopyFromContainer(opts) + if err != nil { + t.Errorf("CopyFromContainer: caugh error %#v while copying from container, expected nil", err.Error()) + } + if out.String() != content { + t.Errorf("CopyFromContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) + } +} + +func TestCopyFromContainerEmptyContainer(t *testing.T) { + client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) + err := client.CopyFromContainer(CopyFromContainerOptions{}) + _, ok := err.(*NoSuchContainer) + if !ok { + t.Errorf("CopyFromContainer: invalid error returned. Want NoSuchContainer, got %#v.", err) + } +} + +func TestPassingNameOptToCreateContainerReturnsItInContainer(t *testing.T) { + jsonContainer := `{ + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Warnings": [] +}` + fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} + client := newTestClient(fakeRT) + config := Config{AttachStdout: true, AttachStdin: true} + opts := CreateContainerOptions{Name: "TestCreateContainer", Config: &config} + container, err := client.CreateContainer(opts) + if err != nil { + t.Fatal(err) + } + if container.Name != "TestCreateContainer" { + t.Errorf("Container name expected to be TestCreateContainer, was %s", container.Name) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/engine.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/engine.go new file mode 100644 index 00000000..3e4cd577 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/engine.go @@ -0,0 +1,147 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package engine + +import ( + "fmt" + "github.com/fsouza/go-dockerclient/utils" + "io" + "log" + "os" + "path/filepath" + "runtime" + "strings" +) + +type Handler func(*Job) Status + +var globalHandlers map[string]Handler + +func init() { + globalHandlers = make(map[string]Handler) +} + +func Register(name string, handler Handler) error { + _, exists := globalHandlers[name] + if exists { + return fmt.Errorf("Can't overwrite global handler for command %s", name) + } + globalHandlers[name] = handler + return nil +} + +// The Engine is the core of Docker. +// It acts as a store for *containers*, and allows manipulation of these +// containers by executing *jobs*. +type Engine struct { + root string + handlers map[string]Handler + hack Hack // data for temporary hackery (see hack.go) + id string + Stdout io.Writer + Stderr io.Writer + Stdin io.Reader +} + +func (eng *Engine) Root() string { + return eng.root +} + +func (eng *Engine) Register(name string, handler Handler) error { + eng.Logf("Register(%s) (handlers=%v)", name, eng.handlers) + _, exists := eng.handlers[name] + if exists { + return fmt.Errorf("Can't overwrite handler for command %s", name) + } + eng.handlers[name] = handler + return nil +} + +// New initializes a new engine managing the directory specified at `root`. +// `root` is used to store containers and any other state private to the engine. +// Changing the contents of the root without executing a job will cause unspecified +// behavior. +func New(root string) (*Engine, error) { + // Check for unsupported architectures + if runtime.GOARCH != "amd64" { + return nil, fmt.Errorf("The docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH) + } + // Check for unsupported kernel versions + // FIXME: it would be cleaner to not test for specific versions, but rather + // test for specific functionalities. + // Unfortunately we can't test for the feature "does not cause a kernel panic" + // without actually causing a kernel panic, so we need this workaround until + // the circumstances of pre-3.8 crashes are clearer. + // For details see http://github.com/dotcloud/docker/issues/407 + if k, err := utils.GetKernelVersion(); err != nil { + log.Printf("WARNING: %s\n", err) + } else { + if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 { + if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { + log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String()) + } + } + } + + if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + // Docker makes some assumptions about the "absoluteness" of root + // ... so let's make sure it has no symlinks + if p, err := filepath.Abs(root); err != nil { + log.Fatalf("Unable to get absolute root (%s): %s", root, err) + } else { + root = p + } + if p, err := filepath.EvalSymlinks(root); err != nil { + log.Fatalf("Unable to canonicalize root (%s): %s", root, err) + } else { + root = p + } + + eng := &Engine{ + root: root, + handlers: make(map[string]Handler), + id: utils.RandomString(), + Stdout: os.Stdout, + Stderr: os.Stderr, + Stdin: os.Stdin, + } + // Copy existing global handlers + for k, v := range globalHandlers { + eng.handlers[k] = v + } + return eng, nil +} + +func (eng *Engine) String() string { + return fmt.Sprintf("%s|%s", eng.Root(), eng.id[:8]) +} + +// Job creates a new job which can later be executed. +// This function mimics `Command` from the standard os/exec package. +func (eng *Engine) Job(name string, args ...string) *Job { + job := &Job{ + Eng: eng, + Name: name, + Args: args, + Stdin: NewInput(), + Stdout: NewOutput(), + Stderr: NewOutput(), + env: &Env{}, + } + job.Stderr.Add(utils.NopWriteCloser(eng.Stderr)) + handler, exists := eng.handlers[name] + if exists { + job.handler = handler + } + return job +} + +func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) { + prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n")) + return fmt.Fprintf(eng.Stderr, prefixedFormat, args...) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/engine_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/engine_test.go new file mode 100644 index 00000000..dba7b074 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/engine_test.go @@ -0,0 +1,111 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package engine + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" +) + +func TestRegister(t *testing.T) { + if err := Register("dummy1", nil); err != nil { + t.Fatal(err) + } + + if err := Register("dummy1", nil); err == nil { + t.Fatalf("Expecting error, got none") + } + + eng := newTestEngine(t) + + //Should fail because global handlers are copied + //at the engine creation + if err := eng.Register("dummy1", nil); err == nil { + t.Fatalf("Expecting error, got none") + } + + if err := eng.Register("dummy2", nil); err != nil { + t.Fatal(err) + } + + if err := eng.Register("dummy2", nil); err == nil { + t.Fatalf("Expecting error, got none") + } +} + +func TestJob(t *testing.T) { + eng := newTestEngine(t) + job1 := eng.Job("dummy1", "--level=awesome") + + if job1.handler != nil { + t.Fatalf("job1.handler should be empty") + } + + h := func(j *Job) Status { + j.Printf("%s\n", j.Name) + return 42 + } + + eng.Register("dummy2", h) + job2 := eng.Job("dummy2", "--level=awesome") + + if job2.handler == nil { + t.Fatalf("job2.handler shouldn't be nil") + } + + if job2.handler(job2) != 42 { + t.Fatalf("handler dummy2 was not found in job2") + } +} + +func TestEngineRoot(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-test-TestEngineCreateDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + dir := path.Join(tmp, "dir") + eng, err := New(dir) + if err != nil { + t.Fatal(err) + } + if st, err := os.Stat(dir); err != nil { + t.Fatal(err) + } else if !st.IsDir() { + t.Fatalf("engine.New() created something other than a directory at %s", dir) + } + r := eng.Root() + r, _ = filepath.EvalSymlinks(r) + dir, _ = filepath.EvalSymlinks(dir) + if r != dir { + t.Fatalf("Expected: %v\nReceived: %v", dir, r) + } +} + +func TestEngineString(t *testing.T) { + eng1 := newTestEngine(t) + defer os.RemoveAll(eng1.Root()) + eng2 := newTestEngine(t) + defer os.RemoveAll(eng2.Root()) + s1 := eng1.String() + s2 := eng2.String() + if eng1 == eng2 { + t.Fatalf("Different engines should have different names (%v == %v)", s1, s2) + } +} + +func TestEngineLogf(t *testing.T) { + eng := newTestEngine(t) + defer os.RemoveAll(eng.Root()) + input := "Test log line" + if n, err := eng.Logf("%s\n", input); err != nil { + t.Fatal(err) + } else if n < len(input) { + t.Fatalf("Test: Logf() should print at least as much as the input\ninput=%d\nprinted=%d", len(input), n) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/env.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/env.go new file mode 100644 index 00000000..00af1862 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/env.go @@ -0,0 +1,238 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package engine + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "strconv" + "strings" +) + +type Env []string + +func (env *Env) Get(key string) (value string) { + // FIXME: use Map() + for _, kv := range *env { + if strings.Index(kv, "=") == -1 { + continue + } + parts := strings.SplitN(kv, "=", 2) + if parts[0] != key { + continue + } + if len(parts) < 2 { + value = "" + } else { + value = parts[1] + } + } + return +} + +func (env *Env) Exists(key string) bool { + _, exists := env.Map()[key] + return exists +} + +func (env *Env) GetBool(key string) (value bool) { + s := strings.ToLower(strings.Trim(env.Get(key), " \t")) + if s == "" || s == "0" || s == "no" || s == "false" || s == "none" { + return false + } + return true +} + +func (env *Env) SetBool(key string, value bool) { + if value { + env.Set(key, "1") + } else { + env.Set(key, "0") + } +} + +func (env *Env) GetInt(key string) int { + return int(env.GetInt64(key)) +} + +func (env *Env) GetInt64(key string) int64 { + s := strings.Trim(env.Get(key), " \t") + val, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return -1 + } + return val +} + +func (env *Env) SetInt(key string, value int) { + env.Set(key, fmt.Sprintf("%d", value)) +} + +func (env *Env) SetInt64(key string, value int64) { + env.Set(key, fmt.Sprintf("%d", value)) +} + +// Returns nil if key not found +func (env *Env) GetList(key string) []string { + sval := env.Get(key) + if sval == "" { + return nil + } + l := make([]string, 0, 1) + if err := json.Unmarshal([]byte(sval), &l); err != nil { + l = append(l, sval) + } + return l +} + +func (env *Env) GetJson(key string, iface interface{}) error { + sval := env.Get(key) + if sval == "" { + return nil + } + return json.Unmarshal([]byte(sval), iface) +} + +func (env *Env) SetJson(key string, value interface{}) error { + sval, err := json.Marshal(value) + if err != nil { + return err + } + env.Set(key, string(sval)) + return nil +} + +func (env *Env) SetList(key string, value []string) error { + return env.SetJson(key, value) +} + +func (env *Env) Set(key, value string) { + *env = append(*env, key+"="+value) +} + +func NewDecoder(src io.Reader) *Decoder { + return &Decoder{ + json.NewDecoder(src), + } +} + +type Decoder struct { + *json.Decoder +} + +func (decoder *Decoder) Decode() (*Env, error) { + m := make(map[string]interface{}) + if err := decoder.Decoder.Decode(&m); err != nil { + return nil, err + } + env := &Env{} + for key, value := range m { + env.SetAuto(key, value) + } + return env, nil +} + +// DecodeEnv decodes `src` as a json dictionary, and adds +// each decoded key-value pair to the environment. +// +// If `src` cannot be decoded as a json dictionary, an error +// is returned. +func (env *Env) Decode(src io.Reader) error { + m := make(map[string]interface{}) + if err := json.NewDecoder(src).Decode(&m); err != nil { + return err + } + for k, v := range m { + env.SetAuto(k, v) + } + return nil +} + +func (env *Env) SetAuto(k string, v interface{}) { + // FIXME: we fix-convert float values to int, because + // encoding/json decodes integers to float64, but cannot encode them back. + // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) + if fval, ok := v.(float64); ok { + env.SetInt64(k, int64(fval)) + } else if sval, ok := v.(string); ok { + env.Set(k, sval) + } else if val, err := json.Marshal(v); err == nil { + env.Set(k, string(val)) + } else { + env.Set(k, fmt.Sprintf("%v", v)) + } +} + +func (env *Env) Encode(dst io.Writer) error { + m := make(map[string]interface{}) + for k, v := range env.Map() { + var val interface{} + if err := json.Unmarshal([]byte(v), &val); err == nil { + // FIXME: we fix-convert float values to int, because + // encoding/json decodes integers to float64, but cannot encode them back. + // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) + if fval, isFloat := val.(float64); isFloat { + val = int(fval) + } + m[k] = val + } else { + m[k] = v + } + } + if err := json.NewEncoder(dst).Encode(&m); err != nil { + return err + } + return nil +} + +func (env *Env) WriteTo(dst io.Writer) (n int64, err error) { + // FIXME: return the number of bytes written to respect io.WriterTo + return 0, env.Encode(dst) +} + +func (env *Env) Export(dst interface{}) (err error) { + defer func() { + if err != nil { + err = fmt.Errorf("ExportEnv %s", err) + } + }() + var buf bytes.Buffer + // step 1: encode/marshal the env to an intermediary json representation + if err := env.Encode(&buf); err != nil { + return err + } + // step 2: decode/unmarshal the intermediary json into the destination object + if err := json.NewDecoder(&buf).Decode(dst); err != nil { + return err + } + return nil +} + +func (env *Env) Import(src interface{}) (err error) { + defer func() { + if err != nil { + err = fmt.Errorf("ImportEnv: %s", err) + } + }() + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(src); err != nil { + return err + } + if err := env.Decode(&buf); err != nil { + return err + } + return nil +} + +func (env *Env) Map() map[string]string { + m := make(map[string]string) + for _, kv := range *env { + parts := strings.SplitN(kv, "=", 2) + m[parts[0]] = parts[1] + } + return m +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/env_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/env_test.go new file mode 100644 index 00000000..d77e8e25 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/env_test.go @@ -0,0 +1,127 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package engine + +import ( + "testing" +) + +func TestNewJob(t *testing.T) { + job := mkJob(t, "dummy", "--level=awesome") + if job.Name != "dummy" { + t.Fatalf("Wrong job name: %s", job.Name) + } + if len(job.Args) != 1 { + t.Fatalf("Wrong number of job arguments: %d", len(job.Args)) + } + if job.Args[0] != "--level=awesome" { + t.Fatalf("Wrong job arguments: %s", job.Args[0]) + } +} + +func TestSetenv(t *testing.T) { + job := mkJob(t, "dummy") + job.Setenv("foo", "bar") + if val := job.Getenv("foo"); val != "bar" { + t.Fatalf("Getenv returns incorrect value: %s", val) + } + + job.Setenv("bar", "") + if val := job.Getenv("bar"); val != "" { + t.Fatalf("Getenv returns incorrect value: %s", val) + } + if val := job.Getenv("nonexistent"); val != "" { + t.Fatalf("Getenv returns incorrect value: %s", val) + } +} + +func TestSetenvBool(t *testing.T) { + job := mkJob(t, "dummy") + job.SetenvBool("foo", true) + if val := job.GetenvBool("foo"); !val { + t.Fatalf("GetenvBool returns incorrect value: %t", val) + } + + job.SetenvBool("bar", false) + if val := job.GetenvBool("bar"); val { + t.Fatalf("GetenvBool returns incorrect value: %t", val) + } + + if val := job.GetenvBool("nonexistent"); val { + t.Fatalf("GetenvBool returns incorrect value: %t", val) + } +} + +func TestSetenvInt(t *testing.T) { + job := mkJob(t, "dummy") + + job.SetenvInt("foo", -42) + if val := job.GetenvInt("foo"); val != -42 { + t.Fatalf("GetenvInt returns incorrect value: %d", val) + } + + job.SetenvInt("bar", 42) + if val := job.GetenvInt("bar"); val != 42 { + t.Fatalf("GetenvInt returns incorrect value: %d", val) + } + if val := job.GetenvInt("nonexistent"); val != -1 { + t.Fatalf("GetenvInt returns incorrect value: %d", val) + } +} + +func TestSetenvList(t *testing.T) { + job := mkJob(t, "dummy") + + job.SetenvList("foo", []string{"bar"}) + if val := job.GetenvList("foo"); len(val) != 1 || val[0] != "bar" { + t.Fatalf("GetenvList returns incorrect value: %v", val) + } + + job.SetenvList("bar", nil) + if val := job.GetenvList("bar"); val != nil { + t.Fatalf("GetenvList returns incorrect value: %v", val) + } + if val := job.GetenvList("nonexistent"); val != nil { + t.Fatalf("GetenvList returns incorrect value: %v", val) + } +} + +func TestImportEnv(t *testing.T) { + type dummy struct { + DummyInt int + DummyStringArray []string + } + + job := mkJob(t, "dummy") + if err := job.ImportEnv(&dummy{42, []string{"foo", "bar"}}); err != nil { + t.Fatal(err) + } + + dmy := dummy{} + if err := job.ExportEnv(&dmy); err != nil { + t.Fatal(err) + } + + if dmy.DummyInt != 42 { + t.Fatalf("Expected 42, got %d", dmy.DummyInt) + } + + if len(dmy.DummyStringArray) != 2 || dmy.DummyStringArray[0] != "foo" || dmy.DummyStringArray[1] != "bar" { + t.Fatalf("Expected {foo, bar}, got %v", dmy.DummyStringArray) + } + +} + +func TestEnviron(t *testing.T) { + job := mkJob(t, "dummy") + job.Setenv("foo", "bar") + val, exists := job.Environ()["foo"] + if !exists { + t.Fatalf("foo not found in the environ") + } + if val != "bar" { + t.Fatalf("bar not found in the environ") + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/hack.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/hack.go new file mode 100644 index 00000000..7801274f --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/hack.go @@ -0,0 +1,25 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package engine + +type Hack map[string]interface{} + +func (eng *Engine) Hack_GetGlobalVar(key string) interface{} { + if eng.hack == nil { + return nil + } + val, exists := eng.hack[key] + if !exists { + return nil + } + return val +} + +func (eng *Engine) Hack_SetGlobalVar(key string, val interface{}) { + if eng.hack == nil { + eng.hack = make(Hack) + } + eng.hack[key] = val +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/helpers_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/helpers_test.go new file mode 100644 index 00000000..0e2c1548 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/helpers_test.go @@ -0,0 +1,28 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package engine + +import ( + "io/ioutil" + "testing" +) + +var globalTestID string + +func newTestEngine(t *testing.T) *Engine { + tmp, err := ioutil.TempDir("", "asd") + if err != nil { + t.Fatal(err) + } + eng, err := New(tmp) + if err != nil { + t.Fatal(err) + } + return eng +} + +func mkJob(t *testing.T, name string, args ...string) *Job { + return newTestEngine(t).Job(name, args...) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/http.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/http.go new file mode 100644 index 00000000..fa586c42 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/http.go @@ -0,0 +1,44 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package engine + +import ( + "net/http" + "path" +) + +// ServeHTTP executes a job as specified by the http request `r`, and sends the +// result as an http response. +// This method allows an Engine instance to be passed as a standard http.Handler interface. +// +// Note that the protocol used in this methid is a convenience wrapper and is not the canonical +// implementation of remote job execution. This is because HTTP/1 does not handle stream multiplexing, +// and so cannot differentiate stdout from stderr. Additionally, headers cannot be added to a response +// once data has been written to the body, which makes it inconvenient to return metadata such +// as the exit status. +// +func (eng *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) { + jobName := path.Base(r.URL.Path) + jobArgs, exists := r.URL.Query()["a"] + if !exists { + jobArgs = []string{} + } + w.Header().Set("Job-Name", jobName) + for _, arg := range jobArgs { + w.Header().Add("Job-Args", arg) + } + job := eng.Job(jobName, jobArgs...) + job.Stdout.Add(w) + job.Stderr.Add(w) + // FIXME: distinguish job status from engine error in Run() + // The former should be passed as a special header, the former + // should cause a 500 status + w.WriteHeader(http.StatusOK) + // The exit status cannot be sent reliably with HTTP1, because headers + // can only be sent before the body. + // (we could possibly use http footers via chunked encoding, but I couldn't find + // how to use them in net/http) + job.Run() +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/job.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/job.go new file mode 100644 index 00000000..53c418ea --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/job.go @@ -0,0 +1,197 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package engine + +import ( + "fmt" + "io" + "strings" + "time" +) + +// A job is the fundamental unit of work in the docker engine. +// Everything docker can do should eventually be exposed as a job. +// For example: execute a process in a container, create a new container, +// download an archive from the internet, serve the http api, etc. +// +// The job API is designed after unix processes: a job has a name, arguments, +// environment variables, standard streams for input, output and error, and +// an exit status which can indicate success (0) or error (anything else). +// +// One slight variation is that jobs report their status as a string. The +// string "0" indicates success, and any other strings indicates an error. +// This allows for richer error reporting. +// +type Job struct { + Eng *Engine + Name string + Args []string + env *Env + Stdout *Output + Stderr *Output + Stdin *Input + handler Handler + status Status + end time.Time + onExit []func() +} + +type Status int + +const ( + StatusOK Status = 0 + StatusErr Status = 1 + StatusNotFound Status = 127 +) + +// Run executes the job and blocks until the job completes. +// If the job returns a failure status, an error is returned +// which includes the status. +func (job *Job) Run() error { + // FIXME: make this thread-safe + // FIXME: implement wait + if !job.end.IsZero() { + return fmt.Errorf("%s: job has already completed", job.Name) + } + // Log beginning and end of the job + job.Eng.Logf("+job %s", job.CallString()) + defer func() { + job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString()) + }() + var errorMessage string + job.Stderr.AddString(&errorMessage) + if job.handler == nil { + job.Errorf("%s: command not found", job.Name) + job.status = 127 + } else { + job.status = job.handler(job) + job.end = time.Now() + } + // Wait for all background tasks to complete + if err := job.Stdout.Close(); err != nil { + return err + } + if err := job.Stderr.Close(); err != nil { + return err + } + if job.status != 0 { + return fmt.Errorf("%s: %s", job.Name, errorMessage) + } + return nil +} + +func (job *Job) CallString() string { + return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", ")) +} + +func (job *Job) StatusString() string { + // If the job hasn't completed, status string is empty + if job.end.IsZero() { + return "" + } + var okerr string + if job.status == StatusOK { + okerr = "OK" + } else { + okerr = "ERR" + } + return fmt.Sprintf(" = %s (%d)", okerr, job.status) +} + +// String returns a human-readable description of `job` +func (job *Job) String() string { + return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString()) +} + +func (job *Job) Getenv(key string) (value string) { + return job.env.Get(key) +} + +func (job *Job) GetenvBool(key string) (value bool) { + return job.env.GetBool(key) +} + +func (job *Job) SetenvBool(key string, value bool) { + job.env.SetBool(key, value) +} + +func (job *Job) GetenvInt64(key string) int64 { + return job.env.GetInt64(key) +} + +func (job *Job) GetenvInt(key string) int { + return job.env.GetInt(key) +} + +func (job *Job) SetenvInt64(key string, value int64) { + job.env.SetInt64(key, value) +} + +func (job *Job) SetenvInt(key string, value int) { + job.env.SetInt(key, value) +} + +// Returns nil if key not found +func (job *Job) GetenvList(key string) []string { + return job.env.GetList(key) +} + +func (job *Job) GetenvJson(key string, iface interface{}) error { + return job.env.GetJson(key, iface) +} + +func (job *Job) SetenvJson(key string, value interface{}) error { + return job.env.SetJson(key, value) +} + +func (job *Job) SetenvList(key string, value []string) error { + return job.env.SetJson(key, value) +} + +func (job *Job) Setenv(key, value string) { + job.env.Set(key, value) +} + +// DecodeEnv decodes `src` as a json dictionary, and adds +// each decoded key-value pair to the environment. +// +// If `src` cannot be decoded as a json dictionary, an error +// is returned. +func (job *Job) DecodeEnv(src io.Reader) error { + return job.env.Decode(src) +} + +func (job *Job) EncodeEnv(dst io.Writer) error { + return job.env.Encode(dst) +} + +func (job *Job) ExportEnv(dst interface{}) (err error) { + return job.env.Export(dst) +} + +func (job *Job) ImportEnv(src interface{}) (err error) { + return job.env.Import(src) +} + +func (job *Job) Environ() map[string]string { + return job.env.Map() +} + +func (job *Job) Logf(format string, args ...interface{}) (n int, err error) { + prefixedFormat := fmt.Sprintf("[%s] %s\n", job, strings.TrimRight(format, "\n")) + return fmt.Fprintf(job.Stderr, prefixedFormat, args...) +} + +func (job *Job) Printf(format string, args ...interface{}) (n int, err error) { + return fmt.Fprintf(job.Stdout, format, args...) +} + +func (job *Job) Errorf(format string, args ...interface{}) (n int, err error) { + return fmt.Fprintf(job.Stderr, format, args...) +} + +func (job *Job) Error(err error) (int, error) { + return fmt.Fprintf(job.Stderr, "%s", err) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/job_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/job_test.go new file mode 100644 index 00000000..c58ee4b4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/job_test.go @@ -0,0 +1,84 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package engine + +import ( + "os" + "testing" +) + +func TestJobStatusOK(t *testing.T) { + eng := newTestEngine(t) + defer os.RemoveAll(eng.Root()) + eng.Register("return_ok", func(job *Job) Status { return StatusOK }) + err := eng.Job("return_ok").Run() + if err != nil { + t.Fatalf("Expected: err=%v\nReceived: err=%v", nil, err) + } +} + +func TestJobStatusErr(t *testing.T) { + eng := newTestEngine(t) + defer os.RemoveAll(eng.Root()) + eng.Register("return_err", func(job *Job) Status { return StatusErr }) + err := eng.Job("return_err").Run() + if err == nil { + t.Fatalf("When a job returns StatusErr, Run() should return an error") + } +} + +func TestJobStatusNotFound(t *testing.T) { + eng := newTestEngine(t) + defer os.RemoveAll(eng.Root()) + eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound }) + err := eng.Job("return_not_found").Run() + if err == nil { + t.Fatalf("When a job returns StatusNotFound, Run() should return an error") + } +} + +func TestJobStdoutString(t *testing.T) { + eng := newTestEngine(t) + defer os.RemoveAll(eng.Root()) + // FIXME: test multiple combinations of output and status + eng.Register("say_something_in_stdout", func(job *Job) Status { + job.Printf("Hello world\n") + return StatusOK + }) + + job := eng.Job("say_something_in_stdout") + var output string + if err := job.Stdout.AddString(&output); err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + if expectedOutput := "Hello world"; output != expectedOutput { + t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output) + } +} + +func TestJobStderrString(t *testing.T) { + eng := newTestEngine(t) + defer os.RemoveAll(eng.Root()) + // FIXME: test multiple combinations of output and status + eng.Register("say_something_in_stderr", func(job *Job) Status { + job.Errorf("Warning, something might happen\nHere it comes!\nOh no...\nSomething happened\n") + return StatusOK + }) + + job := eng.Job("say_something_in_stderr") + var output string + if err := job.Stderr.AddString(&output); err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + if expectedOutput := "Something happened"; output != expectedOutput { + t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/streams.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/streams.go new file mode 100644 index 00000000..2dcfe23d --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/streams.go @@ -0,0 +1,196 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package engine + +import ( + "bufio" + "container/ring" + "fmt" + "io" + "sync" +) + +type Output struct { + sync.Mutex + dests []io.Writer + tasks sync.WaitGroup +} + +// NewOutput returns a new Output object with no destinations attached. +// Writing to an empty Output will cause the written data to be discarded. +func NewOutput() *Output { + return &Output{} +} + +// Add attaches a new destination to the Output. Any data subsequently written +// to the output will be written to the new destination in addition to all the others. +// This method is thread-safe. +// FIXME: Add cannot fail +func (o *Output) Add(dst io.Writer) error { + o.Mutex.Lock() + defer o.Mutex.Unlock() + o.dests = append(o.dests, dst) + return nil +} + +// AddPipe creates an in-memory pipe with io.Pipe(), adds its writing end as a destination, +// and returns its reading end for consumption by the caller. +// This is a rough equivalent similar to Cmd.StdoutPipe() in the standard os/exec package. +// This method is thread-safe. +func (o *Output) AddPipe() (io.Reader, error) { + r, w := io.Pipe() + o.Add(w) + return r, nil +} + +// AddTail starts a new goroutine which will read all subsequent data written to the output, +// line by line, and append the last `n` lines to `dst`. +func (o *Output) AddTail(dst *[]string, n int) error { + src, err := o.AddPipe() + if err != nil { + return err + } + o.tasks.Add(1) + go func() { + defer o.tasks.Done() + Tail(src, n, dst) + }() + return nil +} + +// AddString starts a new goroutine which will read all subsequent data written to the output, +// line by line, and store the last line into `dst`. +func (o *Output) AddString(dst *string) error { + src, err := o.AddPipe() + if err != nil { + return err + } + o.tasks.Add(1) + go func() { + defer o.tasks.Done() + lines := make([]string, 0, 1) + Tail(src, 1, &lines) + if len(lines) == 0 { + *dst = "" + } else { + *dst = lines[0] + } + }() + return nil +} + +// Write writes the same data to all registered destinations. +// This method is thread-safe. +func (o *Output) Write(p []byte) (n int, err error) { + o.Mutex.Lock() + defer o.Mutex.Unlock() + var firstErr error + for _, dst := range o.dests { + _, err := dst.Write(p) + if err != nil && firstErr == nil { + firstErr = err + } + } + return len(p), firstErr +} + +// Close unregisters all destinations and waits for all background +// AddTail and AddString tasks to complete. +// The Close method of each destination is called if it exists. +func (o *Output) Close() error { + o.Mutex.Lock() + defer o.Mutex.Unlock() + var firstErr error + for _, dst := range o.dests { + if closer, ok := dst.(io.WriteCloser); ok { + err := closer.Close() + if err != nil && firstErr == nil { + firstErr = err + } + } + } + o.tasks.Wait() + return firstErr +} + +type Input struct { + src io.Reader + sync.Mutex +} + +// NewInput returns a new Input object with no source attached. +// Reading to an empty Input will return io.EOF. +func NewInput() *Input { + return &Input{} +} + +// Read reads from the input in a thread-safe way. +func (i *Input) Read(p []byte) (n int, err error) { + i.Mutex.Lock() + defer i.Mutex.Unlock() + if i.src == nil { + return 0, io.EOF + } + return i.src.Read(p) +} + +// Add attaches a new source to the input. +// Add can only be called once per input. Subsequent calls will +// return an error. +func (i *Input) Add(src io.Reader) error { + i.Mutex.Lock() + defer i.Mutex.Unlock() + if i.src != nil { + return fmt.Errorf("Maximum number of sources reached: 1") + } + i.src = src + return nil +} + +// Tail reads from `src` line per line, and returns the last `n` lines as an array. +// A ring buffer is used to only store `n` lines at any time. +func Tail(src io.Reader, n int, dst *[]string) { + scanner := bufio.NewScanner(src) + r := ring.New(n) + for scanner.Scan() { + if n == 0 { + continue + } + r.Value = scanner.Text() + r = r.Next() + } + r.Do(func(v interface{}) { + if v == nil { + return + } + *dst = append(*dst, v.(string)) + }) +} + +// AddEnv starts a new goroutine which will decode all subsequent data +// as a stream of json-encoded objects, and point `dst` to the last +// decoded object. +// The result `env` can be queried using the type-neutral Env interface. +// It is not safe to query `env` until the Output is closed. +func (o *Output) AddEnv() (dst *Env, err error) { + src, err := o.AddPipe() + if err != nil { + return nil, err + } + dst = &Env{} + o.tasks.Add(1) + go func() { + defer o.tasks.Done() + decoder := NewDecoder(src) + for { + env, err := decoder.Decode() + if err != nil { + return + } + *dst = *env + } + }() + return dst, nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/streams_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/streams_test.go new file mode 100644 index 00000000..177fb21e --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/engine/streams_test.go @@ -0,0 +1,298 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package engine + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" +) + +func TestOutputAddString(t *testing.T) { + var testInputs = [][2]string{ + { + "hello, world!", + "hello, world!", + }, + + { + "One\nTwo\nThree", + "Three", + }, + + { + "", + "", + }, + + { + "A line\nThen another nl-terminated line\n", + "Then another nl-terminated line", + }, + + { + "A line followed by an empty line\n\n", + "", + }, + } + for _, testData := range testInputs { + input := testData[0] + expectedOutput := testData[1] + o := NewOutput() + var output string + if err := o.AddString(&output); err != nil { + t.Error(err) + } + if n, err := o.Write([]byte(input)); err != nil { + t.Error(err) + } else if n != len(input) { + t.Errorf("Expected %d, got %d", len(input), n) + } + o.Close() + if output != expectedOutput { + t.Errorf("Last line is not stored as return string.\nInput: '%s'\nExpected: '%s'\nGot: '%s'", input, expectedOutput, output) + } + } +} + +type sentinelWriteCloser struct { + calledWrite bool + calledClose bool +} + +func (w *sentinelWriteCloser) Write(p []byte) (int, error) { + w.calledWrite = true + return len(p), nil +} + +func (w *sentinelWriteCloser) Close() error { + w.calledClose = true + return nil +} + +func TestOutputAddEnv(t *testing.T) { + input := "{\"foo\": \"bar\", \"answer_to_life_the_universe_and_everything\": 42}" + o := NewOutput() + result, err := o.AddEnv() + if err != nil { + t.Fatal(err) + } + o.Write([]byte(input)) + o.Close() + if v := result.Get("foo"); v != "bar" { + t.Errorf("Expected %v, got %v", "bar", v) + } + if v := result.GetInt("answer_to_life_the_universe_and_everything"); v != 42 { + t.Errorf("Expected %v, got %v", 42, v) + } + if v := result.Get("this-value-doesnt-exist"); v != "" { + t.Errorf("Expected %v, got %v", "", v) + } +} + +func TestOutputAddClose(t *testing.T) { + o := NewOutput() + var s sentinelWriteCloser + if err := o.Add(&s); err != nil { + t.Fatal(err) + } + if err := o.Close(); err != nil { + t.Fatal(err) + } + // Write data after the output is closed. + // Write should succeed, but no destination should receive it. + if _, err := o.Write([]byte("foo bar")); err != nil { + t.Fatal(err) + } + if !s.calledClose { + t.Fatal("Output.Close() didn't close the destination") + } +} + +func TestOutputAddPipe(t *testing.T) { + var testInputs = []string{ + "hello, world!", + "One\nTwo\nThree", + "", + "A line\nThen another nl-terminated line\n", + "A line followed by an empty line\n\n", + } + for _, input := range testInputs { + expectedOutput := input + o := NewOutput() + r, err := o.AddPipe() + if err != nil { + t.Fatal(err) + } + go func(o *Output) { + if n, err := o.Write([]byte(input)); err != nil { + t.Error(err) + } else if n != len(input) { + t.Errorf("Expected %d, got %d", len(input), n) + } + if err := o.Close(); err != nil { + t.Error(err) + } + }(o) + output, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if string(output) != expectedOutput { + t.Errorf("Last line is not stored as return string.\nExpected: '%s'\nGot: '%s'", expectedOutput, output) + } + } +} + +func TestTail(t *testing.T) { + var tests = make(map[string][][]string) + tests["hello, world!"] = [][]string{ + {}, + {"hello, world!"}, + {"hello, world!"}, + {"hello, world!"}, + } + tests["One\nTwo\nThree"] = [][]string{ + {}, + {"Three"}, + {"Two", "Three"}, + {"One", "Two", "Three"}, + } + for input, outputs := range tests { + for n, expectedOutput := range outputs { + var output []string + Tail(strings.NewReader(input), n, &output) + if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) { + t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", n, expectedOutput, output) + } + } + } +} + +func TestOutputAddTail(t *testing.T) { + var tests = make(map[string][][]string) + tests["hello, world!"] = [][]string{ + {}, + {"hello, world!"}, + {"hello, world!"}, + {"hello, world!"}, + } + tests["One\nTwo\nThree"] = [][]string{ + {}, + {"Three"}, + {"Two", "Three"}, + {"One", "Two", "Three"}, + } + for input, outputs := range tests { + for n, expectedOutput := range outputs { + o := NewOutput() + var output []string + if err := o.AddTail(&output, n); err != nil { + t.Error(err) + } + if n, err := o.Write([]byte(input)); err != nil { + t.Error(err) + } else if n != len(input) { + t.Errorf("Expected %d, got %d", len(input), n) + } + o.Close() + if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) { + t.Errorf("Tail(%d) returned wrong result.\nExpected: %v\nGot: %v", n, expectedOutput, output) + } + } + } +} + +func lastLine(txt string) string { + scanner := bufio.NewScanner(strings.NewReader(txt)) + var lastLine string + for scanner.Scan() { + lastLine = scanner.Text() + } + return lastLine +} + +func TestOutputAdd(t *testing.T) { + o := NewOutput() + b := &bytes.Buffer{} + o.Add(b) + input := "hello, world!" + if n, err := o.Write([]byte(input)); err != nil { + t.Fatal(err) + } else if n != len(input) { + t.Fatalf("Expected %d, got %d", len(input), n) + } + if output := b.String(); output != input { + t.Fatalf("Received wrong data from Add.\nExpected: '%s'\nGot: '%s'", input, output) + } +} + +func TestOutputWriteError(t *testing.T) { + o := NewOutput() + buf := &bytes.Buffer{} + o.Add(buf) + r, w := io.Pipe() + input := "Hello there" + expectedErr := fmt.Errorf("This is an error") + r.CloseWithError(expectedErr) + o.Add(w) + n, err := o.Write([]byte(input)) + if err != expectedErr { + t.Fatalf("Output.Write() should return the first error encountered, if any") + } + if buf.String() != input { + t.Fatalf("Output.Write() should attempt write on all destinations, even after encountering an error") + } + if n != len(input) { + t.Fatalf("Output.Write() should return the size of the input if it successfully writes to at least one destination") + } +} + +func TestInputAddEmpty(t *testing.T) { + i := NewInput() + var b bytes.Buffer + if err := i.Add(&b); err != nil { + t.Fatal(err) + } + data, err := ioutil.ReadAll(i) + if err != nil { + t.Fatal(err) + } + if len(data) > 0 { + t.Fatalf("Read from empty input shoul yield no data") + } +} + +func TestInputAddTwo(t *testing.T) { + i := NewInput() + var b1 bytes.Buffer + // First add should succeed + if err := i.Add(&b1); err != nil { + t.Fatal(err) + } + var b2 bytes.Buffer + // Second add should fail + if err := i.Add(&b2); err == nil { + t.Fatalf("Adding a second source should return an error") + } +} + +func TestInputAddNotEmpty(t *testing.T) { + i := NewInput() + b := bytes.NewBufferString("hello world\nabc") + expectedResult := b.String() + i.Add(b) + result, err := ioutil.ReadAll(i) + if err != nil { + t.Fatal(err) + } + if string(result) != expectedResult { + t.Fatalf("Expected: %v\nReceived: %v", expectedResult, result) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go new file mode 100644 index 00000000..1808e416 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go @@ -0,0 +1,278 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "net/http/httputil" + "sync" + "sync/atomic" + "time" +) + +// APIEvents represents an event returned by the API. +type APIEvents struct { + Status string + ID string + From string + Time int64 +} + +type eventMonitoringState struct { + sync.RWMutex + sync.WaitGroup + enabled bool + lastSeen *int64 + C chan *APIEvents + errC chan error + listeners []chan<- *APIEvents +} + +const ( + maxMonitorConnRetries = 5 + retryInitialWaitTime = 10. +) + +var ( + // ErrNoListeners is the error returned when no listeners are available + // to receive an event. + ErrNoListeners = errors.New("no listeners present to receive event") + + // ErrListenerAlreadyExists is the error returned when the listerner already + // exists. + ErrListenerAlreadyExists = errors.New("listener already exists for docker events") +) + +// AddEventListener adds a new listener to container events in the Docker API. +// +// The parameter is a channel through which events will be sent. +func (c *Client) AddEventListener(listener chan<- *APIEvents) error { + var err error + if !c.eventMonitor.isEnabled() { + err = c.eventMonitor.enableEventMonitoring(c) + if err != nil { + return err + } + } + err = c.eventMonitor.addListener(listener) + if err != nil { + return err + } + return nil +} + +// RemoveEventListener removes a listener from the monitor. +func (c *Client) RemoveEventListener(listener chan *APIEvents) error { + err := c.eventMonitor.removeListener(listener) + if err != nil { + return err + } + if len(c.eventMonitor.listeners) == 0 { + err = c.eventMonitor.disableEventMonitoring() + if err != nil { + return err + } + } + return nil +} + +func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error { + eventState.Lock() + defer eventState.Unlock() + if listenerExists(listener, &eventState.listeners) { + return ErrListenerAlreadyExists + } + eventState.Add(1) + eventState.listeners = append(eventState.listeners, listener) + return nil +} + +func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error { + eventState.Lock() + defer eventState.Unlock() + if listenerExists(listener, &eventState.listeners) { + var newListeners []chan<- *APIEvents + for _, l := range eventState.listeners { + if l != listener { + newListeners = append(newListeners, l) + } + } + eventState.listeners = newListeners + eventState.Add(-1) + } + return nil +} + +func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool { + for _, b := range *list { + if b == a { + return true + } + } + return false +} + +func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error { + eventState.Lock() + defer eventState.Unlock() + if !eventState.enabled { + eventState.enabled = true + var lastSeenDefault = int64(0) + eventState.lastSeen = &lastSeenDefault + eventState.C = make(chan *APIEvents, 100) + eventState.errC = make(chan error, 1) + go eventState.monitorEvents(c) + } + return nil +} + +func (eventState *eventMonitoringState) disableEventMonitoring() error { + eventState.Wait() + eventState.Lock() + defer eventState.Unlock() + if eventState.enabled { + eventState.enabled = false + close(eventState.C) + close(eventState.errC) + } + return nil +} + +func (eventState *eventMonitoringState) monitorEvents(c *Client) { + var err error + for eventState.noListeners() { + time.Sleep(10 * time.Millisecond) + } + if err = eventState.connectWithRetry(c); err != nil { + eventState.terminate(err) + } + for eventState.isEnabled() { + timeout := time.After(100 * time.Millisecond) + select { + case ev, ok := <-eventState.C: + if !ok { + return + } + go eventState.sendEvent(ev) + go eventState.updateLastSeen(ev) + case err = <-eventState.errC: + if err == ErrNoListeners { + eventState.terminate(nil) + return + } else if err != nil { + defer func() { go eventState.monitorEvents(c) }() + return + } + case <-timeout: + continue + } + } +} + +func (eventState *eventMonitoringState) connectWithRetry(c *Client) error { + var retries int + var err error + for err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC); err != nil && retries < maxMonitorConnRetries; retries++ { + waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries))) + time.Sleep(time.Duration(waitTime) * time.Millisecond) + err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC) + } + return err +} + +func (eventState *eventMonitoringState) noListeners() bool { + eventState.RLock() + defer eventState.RUnlock() + return len(eventState.listeners) == 0 +} + +func (eventState *eventMonitoringState) isEnabled() bool { + eventState.RLock() + defer eventState.RUnlock() + return eventState.enabled +} + +func (eventState *eventMonitoringState) sendEvent(event *APIEvents) { + eventState.RLock() + defer eventState.RUnlock() + eventState.Add(1) + defer eventState.Done() + if eventState.isEnabled() { + if eventState.noListeners() { + eventState.errC <- ErrNoListeners + return + } + + for _, listener := range eventState.listeners { + listener <- event + } + } +} + +func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) { + eventState.Lock() + defer eventState.Unlock() + if atomic.LoadInt64(eventState.lastSeen) < e.Time { + atomic.StoreInt64(eventState.lastSeen, e.Time) + } +} + +func (eventState *eventMonitoringState) terminate(err error) { + eventState.disableEventMonitoring() +} + +func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error { + uri := "/events" + if startTime != 0 { + uri += fmt.Sprintf("?since=%d", startTime) + } + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if protocol != "unix" { + protocol = "tcp" + address = c.endpointURL.Host + } + dial, err := net.Dial(protocol, address) + if err != nil { + return err + } + conn := httputil.NewClientConn(dial, nil) + req, err := http.NewRequest("GET", uri, nil) + if err != nil { + return err + } + res, err := conn.Do(req) + if err != nil { + return err + } + go func(res *http.Response, conn *httputil.ClientConn) { + defer conn.Close() + defer res.Body.Close() + decoder := json.NewDecoder(res.Body) + for { + var event APIEvents + if err = decoder.Decode(&event); err != nil { + if err == io.EOF || err == io.ErrUnexpectedEOF { + break + } + errChan <- err + } + if event.Time == 0 { + continue + } + if !c.eventMonitor.isEnabled() { + return + } + c.eventMonitor.C <- &event + } + }(res, conn) + return nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go new file mode 100644 index 00000000..cb54f4ae --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go @@ -0,0 +1,93 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bufio" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" +) + +func TestEventListeners(t *testing.T) { + response := `{"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} +{"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} +{"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} +{"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} +` + + var req http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + rsc := bufio.NewScanner(strings.NewReader(response)) + for rsc.Scan() { + w.Write([]byte(rsc.Text())) + w.(http.Flusher).Flush() + time.Sleep(10 * time.Millisecond) + } + req = *r + })) + defer server.Close() + + client, err := NewClient(server.URL) + if err != nil { + t.Errorf("Failed to create client: %s", err) + } + client.SkipServerVersionCheck = true + + listener := make(chan *APIEvents, 10) + defer func() { time.Sleep(10 * time.Millisecond); client.RemoveEventListener(listener) }() + + err = client.AddEventListener(listener) + if err != nil { + t.Errorf("Failed to add event listener: %s", err) + } + + timeout := time.After(1 * time.Second) + var count int + + for { + select { + case msg := <-listener: + t.Logf("Recieved: %s", *msg) + count++ + err = checkEvent(count, msg) + if err != nil { + t.Fatalf("Check event failed: %s", err) + } + if count == 4 { + return + } + case <-timeout: + t.Fatal("TestAddEventListener timed out waiting on events") + } + } +} + +func checkEvent(index int, event *APIEvents) error { + if event.ID != "dfdf82bd3881" { + return fmt.Errorf("event ID did not match. Expected dfdf82bd3881 got %s", event.ID) + } + if event.From != "base:latest" { + return fmt.Errorf("event from did not match. Expected base:latest got %s", event.From) + } + var status string + switch index { + case 1: + status = "create" + case 2: + status = "start" + case 3: + status = "stop" + case 4: + status = "destroy" + } + if event.Status != status { + return fmt.Errorf("event status did not match. Expected %s got %s", status, event.Status) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go new file mode 100644 index 00000000..ba44dd8c --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go @@ -0,0 +1,134 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker_test + +import ( + "archive/tar" + "bytes" + "io" + "log" + "time" + + "github.com/fsouza/go-dockerclient" +) + +func ExampleClient_AttachToContainer() { + client, err := docker.NewClient("http://localhost:4243") + if err != nil { + log.Fatal(err) + } + client.SkipServerVersionCheck = true + // Reading logs from container a84849 and sending them to buf. + var buf bytes.Buffer + err = client.AttachToContainer(docker.AttachToContainerOptions{ + Container: "a84849", + OutputStream: &buf, + Logs: true, + Stdout: true, + Stderr: true, + }) + if err != nil { + log.Fatal(err) + } + log.Println(buf.String()) + // Attaching to stdout and streaming. + buf.Reset() + err = client.AttachToContainer(docker.AttachToContainerOptions{ + Container: "a84849", + OutputStream: &buf, + Stdout: true, + Stream: true, + }) + if err != nil { + log.Fatal(err) + } + log.Println(buf.String()) +} + +func ExampleClient_CopyFromContainer() { + client, err := docker.NewClient("http://localhost:4243") + if err != nil { + log.Fatal(err) + } + cid := "a84849" + // Copy resulting file + var buf bytes.Buffer + filename := "/tmp/output.txt" + err = client.CopyFromContainer(docker.CopyFromContainerOptions{ + Container: cid, + Resource: filename, + OutputStream: &buf, + }) + if err != nil { + log.Fatalf("Error while copying from %s: %s\n", cid, err) + } + content := new(bytes.Buffer) + r := bytes.NewReader(buf.Bytes()) + tr := tar.NewReader(r) + tr.Next() + if err != nil && err != io.EOF { + log.Fatal(err) + } + if _, err := io.Copy(content, tr); err != nil { + log.Fatal(err) + } + log.Println(buf.String()) +} + +func ExampleClient_BuildImage() { + client, err := docker.NewClient("http://localhost:4243") + if err != nil { + log.Fatal(err) + } + + t := time.Now() + inputbuf, outputbuf := bytes.NewBuffer(nil), bytes.NewBuffer(nil) + tr := tar.NewWriter(inputbuf) + tr.WriteHeader(&tar.Header{Name: "Dockerfile", Size: 10, ModTime: t, AccessTime: t, ChangeTime: t}) + tr.Write([]byte("FROM base\n")) + tr.Close() + opts := docker.BuildImageOptions{ + Name: "test", + InputStream: inputbuf, + OutputStream: outputbuf, + } + if err := client.BuildImage(opts); err != nil { + log.Fatal(err) + } +} + +func ExampleClient_ListenEvents() { + client, err := docker.NewClient("http://localhost:4243") + if err != nil { + log.Fatal(err) + } + + listener := make(chan *docker.APIEvents) + err = client.AddEventListener(listener) + if err != nil { + log.Fatal(err) + } + + defer func() { + + err = client.RemoveEventListener(listener) + if err != nil { + log.Fatal(err) + } + + }() + + timeout := time.After(1 * time.Second) + + for { + select { + case msg := <-listener: + log.Println(msg) + case <-timeout: + break + } + } + +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go new file mode 100644 index 00000000..4280d135 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go @@ -0,0 +1,322 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "time" +) + +// APIImages represent an image returned in the ListImages call. +type APIImages struct { + ID string `json:"Id"` + RepoTags []string `json:",omitempty"` + Created int64 + Size int64 + VirtualSize int64 + ParentId string `json:",omitempty"` + Repository string `json:",omitempty"` + Tag string `json:",omitempty"` +} + +type Image struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig Config `json:"containerconfig,omitempty"` + DockerVersion string `json:"dockerversion,omitempty"` + Author string `json:"author,omitempty"` + Config *Config `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 +} + +type ImagePre012 struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig Config `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *Config `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 +} + +var ( + // ErrNoSuchImage is the error returned when the image does not exist. + ErrNoSuchImage = errors.New("no such image") + + // ErrMissingRepo is the error returned when the remote repository is + // missing. + ErrMissingRepo = errors.New("missing remote repository e.g. 'github.com/user/repo'") + + // ErrMissingOutputStream is the error returned when no output stream + // is provided to some calls, like BuildImage. + ErrMissingOutputStream = errors.New("missing output stream") +) + +// ListImages returns the list of available images in the server. +// +// See http://goo.gl/dkMrwP for more details. +func (c *Client) ListImages(all bool) ([]APIImages, error) { + path := "/images/json?all=" + if all { + path += "1" + } else { + path += "0" + } + body, _, err := c.do("GET", path, nil) + if err != nil { + return nil, err + } + var images []APIImages + err = json.Unmarshal(body, &images) + if err != nil { + return nil, err + } + return images, nil +} + +// RemoveImage removes an image by its name or ID. +// +// See http://goo.gl/7hjHHy for more details. +func (c *Client) RemoveImage(name string) error { + _, status, err := c.do("DELETE", "/images/"+name, nil) + if status == http.StatusNotFound { + return ErrNoSuchImage + } + return err +} + +// InspectImage returns an image by its name or ID. +// +// See http://goo.gl/pHEbma for more details. +func (c *Client) InspectImage(name string) (*Image, error) { + body, status, err := c.do("GET", "/images/"+name+"/json", nil) + if status == http.StatusNotFound { + return nil, ErrNoSuchImage + } + if err != nil { + return nil, err + } + + var image Image + + // if the caller elected to skip checking the server's version, assume it's the latest + if c.SkipServerVersionCheck || c.expectedApiVersion.GreaterThanOrEqualTo(apiVersion_1_12) { + err = json.Unmarshal(body, &image) + if err != nil { + return nil, err + } + } else { + var imagePre012 ImagePre012 + err = json.Unmarshal(body, &imagePre012) + if err != nil { + return nil, err + } + + image.ID = imagePre012.ID + image.Parent = imagePre012.Parent + image.Comment = imagePre012.Comment + image.Created = imagePre012.Created + image.Container = imagePre012.Container + image.ContainerConfig = imagePre012.ContainerConfig + image.DockerVersion = imagePre012.DockerVersion + image.Author = imagePre012.Author + image.Config = imagePre012.Config + image.Architecture = imagePre012.Architecture + image.Size = imagePre012.Size + } + + return &image, nil +} + +// PushImageOptions represents options to use in the PushImage method. +// +// See http://goo.gl/GBmyhc for more details. +type PushImageOptions struct { + // Name of the image + Name string + + // Tag of the image + Tag string + + // Registry server to push the image + Registry string + + OutputStream io.Writer `qs:"-"` +} + +// AuthConfiguration represents authentication options to use in the PushImage +// method. It represents the authencation in the Docker index server. +type AuthConfiguration struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Email string `json:"email,omitempty"` +} + +// PushImage pushes an image to a remote registry, logging progress to w. +// +// An empty instance of AuthConfiguration may be used for unauthenticated +// pushes. +// +// See http://goo.gl/GBmyhc for more details. +func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error { + if opts.Name == "" { + return ErrNoSuchImage + } + name := opts.Name + opts.Name = "" + path := "/images/" + name + "/push?" + queryString(&opts) + var headers = make(map[string]string) + var buf bytes.Buffer + json.NewEncoder(&buf).Encode(auth) + + headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes()) + + return c.stream("POST", path, true, headers, nil, opts.OutputStream, nil) +} + +// PullImageOptions present the set of options available for pulling an image +// from a registry. +// +// See http://goo.gl/PhBKnS for more details. +type PullImageOptions struct { + Repository string `qs:"fromImage"` + Registry string + Tag string + OutputStream io.Writer `qs:"-"` +} + +// PullImage pulls an image from a remote registry, logging progress to w. +// +// See http://goo.gl/PhBKnS for more details. +func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error { + if opts.Repository == "" { + return ErrNoSuchImage + } + + var headers = make(map[string]string) + var buf bytes.Buffer + json.NewEncoder(&buf).Encode(auth) + headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes()) + + return c.createImage(queryString(&opts), headers, nil, opts.OutputStream) +} + +func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer) error { + path := "/images/create?" + qs + return c.stream("POST", path, true, headers, in, w, nil) +} + +// ImportImageOptions present the set of informations available for importing +// an image from a source file or the stdin. +// +// See http://goo.gl/PhBKnS for more details. +type ImportImageOptions struct { + Repository string `qs:"repo"` + Source string `qs:"fromSrc"` + Tag string `qs:"tag"` + + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` +} + +// ImportImage imports an image from a url, a file or stdin +// +// See http://goo.gl/PhBKnS for more details. +func (c *Client) ImportImage(opts ImportImageOptions) error { + if opts.Repository == "" { + return ErrNoSuchImage + } + if opts.Source != "-" { + opts.InputStream = nil + } + if opts.Source != "-" && !isURL(opts.Source) { + f, err := os.Open(opts.Source) + if err != nil { + return err + } + b, err := ioutil.ReadAll(f) + opts.InputStream = bytes.NewBuffer(b) + opts.Source = "-" + } + return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream) +} + +// BuildImageOptions present the set of informations available for building +// an image from a tarfile with a Dockerfile in it,the details about Dockerfile +// see http://docs.docker.io/en/latest/reference/builder/ +type BuildImageOptions struct { + Name string `qs:"t"` + NoCache bool `qs:"nocache"` + SuppressOutput bool `qs:"q"` + RmTmpContainer bool `qs:"rm"` + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + Remote string `qs:"remote"` +} + +// BuildImage builds an image from a tarball's url or a Dockerfile in the input +// stream. +func (c *Client) BuildImage(opts BuildImageOptions) error { + if opts.OutputStream == nil { + return ErrMissingOutputStream + } + var headers map[string]string + if opts.Remote != "" && opts.Name == "" { + opts.Name = opts.Remote + } + if opts.InputStream != nil { + headers = map[string]string{"Content-Type": "application/tar"} + } else if opts.Remote == "" { + return ErrMissingRepo + } + return c.stream("POST", fmt.Sprintf("/build?%s", + queryString(&opts)), true, headers, opts.InputStream, opts.OutputStream, nil) +} + +// TagImageOptions present the set of options to tag an image +type TagImageOptions struct { + Repo string + Tag string + Force bool +} + +// TagImage adds a tag to the image 'name' +func (c *Client) TagImage(name string, opts TagImageOptions) error { + if name == "" { + return ErrNoSuchImage + } + _, status, err := c.do("POST", fmt.Sprintf("/images/"+name+"/tag?%s", + queryString(&opts)), nil) + if status == http.StatusNotFound { + return ErrNoSuchImage + } + + return err +} + +func isURL(u string) bool { + p, err := url.Parse(u) + if err != nil { + return false + } + return p.Scheme == "http" || p.Scheme == "https" +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go new file mode 100644 index 00000000..a2db84db --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go @@ -0,0 +1,642 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "os" + "reflect" + "strings" + "testing" +) + +func newTestClient(rt *FakeRoundTripper) Client { + endpoint := "http://localhost:4243" + u, _ := parseEndpoint("http://localhost:4243") + client := Client{ + endpoint: endpoint, + endpointURL: u, + client: &http.Client{Transport: rt}, + SkipServerVersionCheck: true, + } + return client +} + +type stdoutMock struct { + *bytes.Buffer +} + +func (m stdoutMock) Close() error { + return nil +} + +type stdinMock struct { + *bytes.Buffer +} + +func (m stdinMock) Close() error { + return nil +} + +func TestListImages(t *testing.T) { + body := `[ + { + "Repository":"base", + "Tag":"ubuntu-12.10", + "Id":"b750fe79269d", + "Created":1364102658 + }, + { + "Repository":"base", + "Tag":"ubuntu-quantal", + "Id":"b750fe79269d", + "Created":1364102658 + }, + { + "RepoTag": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTag": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2e", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } +]` + var expected []APIImages + err := json.Unmarshal([]byte(body), &expected) + if err != nil { + t.Fatal(err) + } + client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) + images, err := client.ListImages(false) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(images, expected) { + t.Errorf("ListImages: Wrong return value. Want %#v. Got %#v.", expected, images) + } +} + +func TestListImagesParameters(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "null", status: http.StatusOK} + client := newTestClient(fakeRT) + _, err := client.ListImages(false) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("ListImages(false: Wrong HTTP method. Want GET. Got %s.", req.Method) + } + if all := req.URL.Query().Get("all"); all != "0" { + t.Errorf("ListImages(false): Wrong parameter. Want all=0. Got all=%s", all) + } + fakeRT.Reset() + _, err = client.ListImages(true) + if err != nil { + t.Fatal(err) + } + req = fakeRT.requests[0] + if all := req.URL.Query().Get("all"); all != "1" { + t.Errorf("ListImages(true): Wrong parameter. Want all=1. Got all=%s", all) + } +} + +func TestRemoveImage(t *testing.T) { + name := "test" + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + err := client.RemoveImage(name) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expectedMethod := "DELETE" + if req.Method != expectedMethod { + t.Errorf("RemoveImage(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) + } + u, _ := url.Parse(client.getURL("/images/" + name)) + if req.URL.Path != u.Path { + t.Errorf("RemoveImage(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) + } +} + +func TestRemoveImageNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such image", status: http.StatusNotFound}) + err := client.RemoveImage("test:") + if err != ErrNoSuchImage { + t.Errorf("RemoveImage: wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) + } +} + +func TestInspectImage(t *testing.T) { + body := `{ + "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "parent":"27cf784147099545", + "created":"2013-03-23T22:24:18.818426-07:00", + "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "container_config":{"Memory":0} +}` + var expected Image + json.Unmarshal([]byte(body), &expected) + fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK} + client := newTestClient(fakeRT) + image, err := client.InspectImage(expected.ID) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(*image, expected) { + t.Errorf("InspectImage(%q): Wrong image returned. Want %#v. Got %#v.", expected.ID, expected, *image) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("InspectImage(%q): Wrong HTTP method. Want GET. Got %s.", expected.ID, req.Method) + } + u, _ := url.Parse(client.getURL("/images/" + expected.ID + "/json")) + if req.URL.Path != u.Path { + t.Errorf("InspectImage(%q): Wrong request URL. Want %q. Got %q.", expected.ID, u.Path, req.URL.Path) + } +} + +func TestInspectImageNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such image", status: http.StatusNotFound}) + name := "test" + image, err := client.InspectImage(name) + if image != nil { + t.Errorf("InspectImage(%q): expected image, got %#v.", name, image) + } + if err != ErrNoSuchImage { + t.Errorf("InspectImage(%q): wrong error. Want %#v. Got %#v.", name, ErrNoSuchImage, err) + } +} + +func TestPushImage(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + err := client.PushImage(PushImageOptions{Name: "test", OutputStream: &buf}, AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + expected := "Pushing 1/100" + if buf.String() != expected { + t.Errorf("PushImage: Wrong output. Want %q. Got %q.", expected, buf.String()) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("PushImage: Wrong HTTP method. Want POST. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/images/test/push")) + if req.URL.Path != u.Path { + t.Errorf("PushImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) + } + if query := req.URL.Query().Encode(); query != "" { + t.Errorf("PushImage: Wrong query string. Want no parameters, got %q.", query) + } + + auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) + if err != nil { + t.Errorf("PushImage: caught error decoding auth. %#v", err.Error()) + } + if strings.TrimSpace(string(auth)) != "{}" { + t.Errorf("PushImage: wrong body. Want %q. Got %q.", + base64.URLEncoding.EncodeToString([]byte("{}")), req.Header.Get("X-Registry-Auth")) + } +} + +func TestPushImageWithAuthentication(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + inputAuth := AuthConfiguration{ + Username: "gopher", + Password: "gopher123", + Email: "gopher@tsuru.io", + } + err := client.PushImage(PushImageOptions{Name: "test", OutputStream: &buf}, inputAuth) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + var gotAuth AuthConfiguration + + auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) + if err != nil { + t.Errorf("PushImage: caught error decoding auth. %#v", err.Error()) + } + + err = json.Unmarshal(auth, &gotAuth) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotAuth, inputAuth) { + t.Errorf("PushImage: wrong auth configuration. Want %#v. Got %#v.", inputAuth, gotAuth) + } +} + +func TestPushImageCustomRegistry(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var authConfig AuthConfiguration + var buf bytes.Buffer + opts := PushImageOptions{ + Name: "test", Registry: "docker.tsuru.io", + OutputStream: &buf, + } + err := client.PushImage(opts, authConfig) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expectedQuery := "registry=docker.tsuru.io" + if query := req.URL.Query().Encode(); query != expectedQuery { + t.Errorf("PushImage: Wrong query string. Want %q. Got %q.", expectedQuery, query) + } +} + +func TestPushImageNoName(t *testing.T) { + client := Client{} + err := client.PushImage(PushImageOptions{}, AuthConfiguration{}) + if err != ErrNoSuchImage { + t.Errorf("PushImage: got wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) + } +} + +func TestPullImage(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + err := client.PullImage(PullImageOptions{Repository: "base", OutputStream: &buf}, + AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + expected := "Pulling 1/100" + if buf.String() != expected { + t.Errorf("PullImage: Wrong output. Want %q. Got %q.", expected, buf.String()) + } + req := fakeRT.requests[0] + if req.Method != "POST" { + t.Errorf("PullImage: Wrong HTTP method. Want POST. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/images/create")) + if req.URL.Path != u.Path { + t.Errorf("PullImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) + } + expectedQuery := "fromImage=base" + if query := req.URL.Query().Encode(); query != expectedQuery { + t.Errorf("PullImage: Wrong query strin. Want %q. Got %q.", expectedQuery, query) + } +} + +func TestPullImageWithoutOutputStream(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := PullImageOptions{ + Repository: "base", + Registry: "docker.tsuru.io", + } + err := client.PullImage(opts, AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestPullImageCustomRegistry(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := PullImageOptions{ + Repository: "base", + Registry: "docker.tsuru.io", + OutputStream: &buf, + } + err := client.PullImage(opts, AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestPullImageTag(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := PullImageOptions{ + Repository: "base", + Registry: "docker.tsuru.io", + Tag: "latest", + OutputStream: &buf, + } + err := client.PullImage(opts, AuthConfiguration{}) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}, "tag": {"latest"}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestPullImageNoRepository(t *testing.T) { + var opts PullImageOptions + client := Client{} + err := client.PullImage(opts, AuthConfiguration{}) + if err != ErrNoSuchImage { + t.Errorf("PullImage: got wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) + } +} + +func TestImportImageFromUrl(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := ImportImageOptions{ + Source: "http://mycompany.com/file.tar", + Repository: "testimage", + Tag: "tag", + OutputStream: &buf, + } + err := client.ImportImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}, "tag": {opts.Tag}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestImportImageFromInput(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + in := bytes.NewBufferString("tar content") + var buf bytes.Buffer + opts := ImportImageOptions{ + Source: "-", Repository: "testimage", + InputStream: in, OutputStream: &buf, + Tag: "tag", + } + err := client.ImportImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}, "tag": {opts.Tag}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) + } + body, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("ImportImage: caugth error while reading body %#v", err.Error()) + } + e := "tar content" + if string(body) != e { + t.Errorf("ImportImage: wrong body. Want %#v. Got %#v.", e, string(body)) + } +} + +func TestImportImageDoesNotPassesInputIfSourceIsNotDash(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + in := bytes.NewBufferString("foo") + opts := ImportImageOptions{ + Source: "http://test.com/container.tar", Repository: "testimage", + InputStream: in, OutputStream: &buf, + } + err := client.ImportImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) + } + body, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("ImportImage: caugth error while reading body %#v", err.Error()) + } + if string(body) != "" { + t.Errorf("ImportImage: wrong body. Want nothing. Got %#v.", string(body)) + } +} + +func TestImportImageShouldPassTarContentToBodyWhenSourceIsFilePath(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + tarPath := "testing/data/container.tar" + opts := ImportImageOptions{ + Source: tarPath, Repository: "testimage", + OutputStream: &buf, + } + err := client.ImportImage(opts) + if err != nil { + t.Fatal(err) + } + tar, err := os.Open(tarPath) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + tarContent, err := ioutil.ReadAll(tar) + body, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(tarContent, body) { + t.Errorf("ImportImage: wrong body. Want %#v content. Got %#v.", tarPath, body) + } +} + +func TestImportImageShouldChangeSourceToDashWhenItsAFilePath(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + tarPath := "testing/data/container.tar" + opts := ImportImageOptions{ + Source: tarPath, Repository: "testimage", + OutputStream: &buf, + } + err := client.ImportImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"fromSrc": {"-"}, "repo": {opts.Repository}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestBuildImageParameters(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + NoCache: true, + SuppressOutput: true, + RmTmpContainer: true, + InputStream: &buf, + OutputStream: &buf, + } + err := client.BuildImage(opts) + if err != nil && strings.Index(err.Error(), "build image fail") == -1 { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"t": {opts.Name}, "nocache": {"1"}, "q": {"1"}, "rm": {"1"}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestBuildImageParametersForRemoteBuild(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + Remote: "testing/data/container.tar", + SuppressOutput: true, + OutputStream: &buf, + } + err := client.BuildImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"t": {opts.Name}, "remote": {opts.Remote}, "q": {"1"}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestBuildImageMissingRepoAndNilInput(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Name: "testImage", + SuppressOutput: true, + OutputStream: &buf, + } + err := client.BuildImage(opts) + if err != ErrMissingRepo { + t.Errorf("BuildImage: wrong error returned. Want %#v. Got %#v.", ErrMissingRepo, err) + } +} + +func TestBuildImageMissingOutputStream(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := BuildImageOptions{Name: "testImage"} + err := client.BuildImage(opts) + if err != ErrMissingOutputStream { + t.Errorf("BuildImage: wrong error returned. Want %#v. Got %#v.", ErrMissingOutputStream, err) + } +} + +func TestBuildImageRemoteWithoutName(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + var buf bytes.Buffer + opts := BuildImageOptions{ + Remote: "testing/data/container.tar", + SuppressOutput: true, + OutputStream: &buf, + } + err := client.BuildImage(opts) + if err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := map[string][]string{"t": {opts.Remote}, "remote": {opts.Remote}, "q": {"1"}} + got := map[string][]string(req.URL.Query()) + if !reflect.DeepEqual(got, expected) { + t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestTagImageParameters(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := TagImageOptions{Repo: "testImage"} + err := client.TagImage("base", opts) + if err != nil && strings.Index(err.Error(), "tag image fail") == -1 { + t.Fatal(err) + } + req := fakeRT.requests[0] + expected := "http://localhost:4243/images/base/tag?repo=testImage" + got := req.URL.String() + if !reflect.DeepEqual(got, expected) { + t.Errorf("TagImage: wrong query string. Want %#v. Got %#v.", expected, got) + } +} + +func TestTagImageMissingRepo(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} + client := newTestClient(fakeRT) + opts := TagImageOptions{Repo: "testImage"} + err := client.TagImage("", opts) + if err != ErrNoSuchImage { + t.Errorf("TestTag: wrong error returned. Want %#v. Got %#v.", + ErrNoSuchImage, err) + } +} + +func TestIsUrl(t *testing.T) { + url := "http://foo.bar/" + result := isURL(url) + if !result { + t.Errorf("isURL: wrong match. Expected %#v to be a url. Got %#v.", url, result) + } + url = "/foo/bar.tar" + result = isURL(url) + if result { + t.Errorf("isURL: wrong match. Expected %#v to not be a url. Got %#v", url, result) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go new file mode 100644 index 00000000..1b9267e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go @@ -0,0 +1,47 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "io" + + "github.com/fsouza/go-dockerclient/engine" +) + +// Version returns version information about the docker server. +// +// See http://goo.gl/IqKNRE for more details. +func (c *Client) Version() (*engine.Env, error) { + body, _, err := c.do("GET", "/version", nil) + if err != nil { + return nil, err + } + out := engine.NewOutput() + remoteVersion, err := out.AddEnv() + if err != nil { + return nil, err + } + if _, err := io.Copy(out, bytes.NewReader(body)); err != nil { + return nil, err + } + return remoteVersion, nil +} + +// Info returns system-wide information, like the number of running containers. +// +// See http://goo.gl/LOmySw for more details. +func (c *Client) Info() (*engine.Env, error) { + body, _, err := c.do("GET", "/info", nil) + if err != nil { + return nil, err + } + var info engine.Env + err = info.Decode(bytes.NewReader(body)) + if err != nil { + return nil, err + } + return &info, nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc_test.go new file mode 100644 index 00000000..8cf283e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc_test.go @@ -0,0 +1,123 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "net/http" + "net/url" + "reflect" + "sort" + "testing" + + "github.com/fsouza/go-dockerclient/engine" +) + +type DockerVersion struct { + Version string + GitCommit string + GoVersion string +} + +func TestVersion(t *testing.T) { + body := `{ + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" +}` + fakeRT := FakeRoundTripper{message: body, status: http.StatusOK} + client := newTestClient(&fakeRT) + expected := DockerVersion{ + Version: "0.2.2", + GitCommit: "5a2a5cc+CHANGES", + GoVersion: "go1.0.3", + } + version, err := client.Version() + if err != nil { + t.Fatal(err) + } + + if result := version.Get("Version"); result != expected.Version { + t.Errorf("Version(): Wrong result. Want %#v. Got %#v.", expected.Version, version.Get("Version")) + } + if result := version.Get("GitCommit"); result != expected.GitCommit { + t.Errorf("GitCommit(): Wrong result. Want %#v. Got %#v.", expected.GitCommit, version.Get("GitCommit")) + } + if result := version.Get("GoVersion"); result != expected.GoVersion { + t.Errorf("GoVersion(): Wrong result. Want %#v. Got %#v.", expected.GoVersion, version.Get("GoVersion")) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("Version(): wrong request method. Want GET. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/version")) + if req.URL.Path != u.Path { + t.Errorf("Version(): wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) + } +} + +func TestVersionError(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "internal error", status: http.StatusInternalServerError} + client := newTestClient(fakeRT) + version, err := client.Version() + if version != nil { + t.Errorf("Version(): expected value, got %#v.", version) + } + if err == nil { + t.Error("Version(): unexpected error") + } +} + +func TestInfo(t *testing.T) { + body := `{ + "Containers":11, + "Images":16, + "Debug":0, + "NFd":11, + "NGoroutines":21, + "MemoryLimit":1, + "SwapLimit":0 +}` + fakeRT := FakeRoundTripper{message: body, status: http.StatusOK} + client := newTestClient(&fakeRT) + expected := engine.Env{} + expected.SetInt("Containers", 11) + expected.SetInt("Images", 16) + expected.SetBool("Debug", false) + expected.SetInt("NFd", 11) + expected.SetInt("NGoroutines", 21) + expected.SetBool("MemoryLimit", true) + expected.SetBool("SwapLimit", false) + info, err := client.Info() + if err != nil { + t.Fatal(err) + } + infoSlice := []string(*info) + expectedSlice := []string(expected) + sort.Strings(infoSlice) + sort.Strings(expectedSlice) + if !reflect.DeepEqual(expectedSlice, infoSlice) { + t.Errorf("Info(): Wrong result.\nWant %#v.\nGot %#v.", expected, *info) + } + req := fakeRT.requests[0] + if req.Method != "GET" { + t.Errorf("Info(): Wrong HTTP method. Want GET. Got %s.", req.Method) + } + u, _ := url.Parse(client.getURL("/info")) + if req.URL.Path != u.Path { + t.Errorf("Info(): Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) + } +} + +func TestInfoError(t *testing.T) { + fakeRT := &FakeRoundTripper{message: "internal error", status: http.StatusInternalServerError} + client := newTestClient(fakeRT) + version, err := client.Info() + if version != nil { + t.Errorf("Info(): expected value, got %#v.", version) + } + if err == nil { + t.Error("Info(): unexpected error") + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go new file mode 100644 index 00000000..16aa0038 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go @@ -0,0 +1,49 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +// Signal represents a signal that can be send to the container on +// KillContainer call. +type Signal int + +// These values represent all signals available on Linux, where containers will +// be running. +const ( + SIGABRT = Signal(0x6) + SIGALRM = Signal(0xe) + SIGBUS = Signal(0x7) + SIGCHLD = Signal(0x11) + SIGCLD = Signal(0x11) + SIGCONT = Signal(0x12) + SIGFPE = Signal(0x8) + SIGHUP = Signal(0x1) + SIGILL = Signal(0x4) + SIGINT = Signal(0x2) + SIGIO = Signal(0x1d) + SIGIOT = Signal(0x6) + SIGKILL = Signal(0x9) + SIGPIPE = Signal(0xd) + SIGPOLL = Signal(0x1d) + SIGPROF = Signal(0x1b) + SIGPWR = Signal(0x1e) + SIGQUIT = Signal(0x3) + SIGSEGV = Signal(0xb) + SIGSTKFLT = Signal(0x10) + SIGSTOP = Signal(0x13) + SIGSYS = Signal(0x1f) + SIGTERM = Signal(0xf) + SIGTRAP = Signal(0x5) + SIGTSTP = Signal(0x14) + SIGTTIN = Signal(0x15) + SIGTTOU = Signal(0x16) + SIGUNUSED = Signal(0x1f) + SIGURG = Signal(0x17) + SIGUSR1 = Signal(0xa) + SIGUSR2 = Signal(0xc) + SIGVTALRM = Signal(0x1a) + SIGWINCH = Signal(0x1c) + SIGXCPU = Signal(0x18) + SIGXFSZ = Signal(0x19) +) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile new file mode 100644 index 00000000..0948dcfa --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile @@ -0,0 +1,15 @@ +# this file describes how to build tsuru python image +# to run it: +# 1- install docker +# 2- run: $ docker build -t tsuru/python https://raw.github.com/tsuru/basebuilder/master/python/Dockerfile + +from base:ubuntu-quantal +run apt-get install wget -y --force-yes +run wget http://github.com/tsuru/basebuilder/tarball/master -O basebuilder.tar.gz --no-check-certificate +run mkdir /var/lib/tsuru +run tar -xvf basebuilder.tar.gz -C /var/lib/tsuru --strip 1 +run cp /var/lib/tsuru/python/deploy /var/lib/tsuru +run cp /var/lib/tsuru/base/restart /var/lib/tsuru +run cp /var/lib/tsuru/base/start /var/lib/tsuru +run /var/lib/tsuru/base/install +run /var/lib/tsuru/base/setup diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar new file mode 100644 index 0000000000000000000000000000000000000000..e4b066e3b6df8cb78ac445a34234f3780d164cf4 GIT binary patch literal 2048 zcmeH_Q3``F42FH)DgF~kTC`qZ7s*`9%A^%r$Bu89Fp<6NMew1akmheFe?H>)Y5N#5 z`(UT)m>?q4G^iwZ#(XmAwH8Ujv`|_rQd)Ig3sQ!(szArs+5bAH%#&Di1HU}iJx_zp z+3uU9k~Zgl)J<3?S%)LS_Hgc7e)t4AX&%Rz>>WAcX2Ec>82D}md=O1Y)p%bo=N_rJ OD+CIGLZA@%gTMmt=q{T8 literal 0 HcmV?d00001 diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar new file mode 100644 index 0000000000000000000000000000000000000000..32c9ce64704835cd096b85ac44c35b5087b5ccdd GIT binary patch literal 2560 zcmeHGy>8<$49;3V1%d0TNOs}`$a>xT46-c8LTt+?QB8ACf0XPiQll-h0~9$I?_v`_`p)qp;@ z0OJK)JAmosQD=m*-~y?5ASGvD1{zS;L7n!AYz2z}2Y8%Kb25fgK0fDb5l4UE+{yF$ zXs`{{TG^hbn!J);Cl1>2UV0=k!T8hL+GbhfZ2u5L51|SJ2KFb&fyiW3|3Qw(jvC+i zouk4oz*u9Q((Iyric9uLhPZsmgZ8ANMrS_2p5cn+n!M}dU&=mMrdq8|OlgOvF-oFN zh5A!%9Pk(EcxS4q(c~Z~u-BL7!+gIN2&&-GnGy1YRpY|{e@?X?J9}9;KY_$PxYO}H o;5QJT#=q||{Y*ZuNn-Gk-)jtGb|Y`+PV+v2`vmS2xaA4_1I+dVl>h($ literal 0 HcmV?d00001 diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go new file mode 100644 index 00000000..c67df2c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go @@ -0,0 +1,651 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package testing provides a fake implementation of the Docker API, useful for +// testing purpose. +package testing + +import ( + "archive/tar" + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "github.com/fsouza/go-dockerclient" + "github.com/fsouza/go-dockerclient/utils" + "github.com/gorilla/mux" + mathrand "math/rand" + "net" + "net/http" + "regexp" + "strconv" + "strings" + "sync" + "time" +) + +// DockerServer represents a programmable, concurrent (not much), HTTP server +// implementing a fake version of the Docker remote API. +// +// It can used in standalone mode, listening for connections or as an arbitrary +// HTTP handler. +// +// For more details on the remote API, check http://goo.gl/yMI1S. +type DockerServer struct { + containers []*docker.Container + cMut sync.RWMutex + images []docker.Image + iMut sync.RWMutex + imgIDs map[string]string + listener net.Listener + mux *mux.Router + hook func(*http.Request) + failures map[string]string + customHandlers map[string]http.Handler + handlerMutex sync.RWMutex + cChan chan<- *docker.Container +} + +// NewServer returns a new instance of the fake server, in standalone mode. Use +// the method URL to get the URL of the server. +// +// It receives the bind address (use 127.0.0.1:0 for getting an available port +// on the host), a channel of containers and a hook function, that will be +// called on every request. +// +// The fake server will send containers in the channel whenever the container +// changes its state, via the HTTP API (i.e.: create, start and stop). This +// channel may be nil, which means that the server won't notify on state +// changes. +func NewServer(bind string, containerChan chan<- *docker.Container, hook func(*http.Request)) (*DockerServer, error) { + listener, err := net.Listen("tcp", bind) + if err != nil { + return nil, err + } + server := DockerServer{ + listener: listener, + imgIDs: make(map[string]string), + hook: hook, + failures: make(map[string]string), + customHandlers: make(map[string]http.Handler), + cChan: containerChan, + } + server.buildMuxer() + go http.Serve(listener, &server) + return &server, nil +} + +func (s *DockerServer) notify(container *docker.Container) { + if s.cChan != nil { + s.cChan <- container + } +} + +func (s *DockerServer) buildMuxer() { + s.mux = mux.NewRouter() + s.mux.Path("/commit").Methods("POST").HandlerFunc(s.handlerWrapper(s.commitContainer)) + s.mux.Path("/containers/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listContainers)) + s.mux.Path("/containers/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.createContainer)) + s.mux.Path("/containers/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectContainer)) + s.mux.Path("/containers/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startContainer)) + s.mux.Path("/containers/{id:.*}/stop").Methods("POST").HandlerFunc(s.handlerWrapper(s.stopContainer)) + s.mux.Path("/containers/{id:.*}/pause").Methods("POST").HandlerFunc(s.handlerWrapper(s.pauseContainer)) + s.mux.Path("/containers/{id:.*}/unpause").Methods("POST").HandlerFunc(s.handlerWrapper(s.unpauseContainer)) + s.mux.Path("/containers/{id:.*}/wait").Methods("POST").HandlerFunc(s.handlerWrapper(s.waitContainer)) + s.mux.Path("/containers/{id:.*}/attach").Methods("POST").HandlerFunc(s.handlerWrapper(s.attachContainer)) + s.mux.Path("/containers/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeContainer)) + s.mux.Path("/images/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.pullImage)) + s.mux.Path("/build").Methods("POST").HandlerFunc(s.handlerWrapper(s.buildImage)) + s.mux.Path("/images/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listImages)) + s.mux.Path("/images/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeImage)) + s.mux.Path("/images/{name:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectImage)) + s.mux.Path("/images/{name:.*}/push").Methods("POST").HandlerFunc(s.handlerWrapper(s.pushImage)) + s.mux.Path("/events").Methods("GET").HandlerFunc(s.listEvents) + s.mux.Path("/_ping").Methods("GET").HandlerFunc(s.handlerWrapper(s.pingDocker)) +} + +// PrepareFailure adds a new expected failure based on a URL regexp it receives +// an id for the failure. +func (s *DockerServer) PrepareFailure(id string, urlRegexp string) { + s.failures[id] = urlRegexp +} + +// ResetFailure removes an expected failure identified by the given id. +func (s *DockerServer) ResetFailure(id string) { + delete(s.failures, id) +} + +// CustomHandler registers a custom handler for a specific path. +// +// For example: +// +// server.CustomHandler("/containers/json", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +// http.Error(w, "Something wrong is not right", http.StatusInternalServerError) +// })) +func (s *DockerServer) CustomHandler(path string, handler http.Handler) { + s.handlerMutex.Lock() + s.customHandlers[path] = handler + s.handlerMutex.Unlock() +} + +// MutateContainer changes the state of a container, returning an error if the +// given id does not match to any container "running" in the server. +func (s *DockerServer) MutateContainer(id string, state docker.State) error { + for _, container := range s.containers { + if container.ID == id { + container.State = state + return nil + } + } + return errors.New("container not found") +} + +// Stop stops the server. +func (s *DockerServer) Stop() { + if s.listener != nil { + s.listener.Close() + } +} + +// URL returns the HTTP URL of the server. +func (s *DockerServer) URL() string { + if s.listener == nil { + return "" + } + return "http://" + s.listener.Addr().String() + "/" +} + +// ServeHTTP handles HTTP requests sent to the server. +func (s *DockerServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.handlerMutex.RLock() + defer s.handlerMutex.RUnlock() + if handler, ok := s.customHandlers[r.URL.Path]; ok { + handler.ServeHTTP(w, r) + return + } + s.mux.ServeHTTP(w, r) + if s.hook != nil { + s.hook(r) + } +} + +// Returns default http.Handler mux, it allows customHandlers to call the +// default behavior if wanted. +func (s *DockerServer) DefaultHandler() http.Handler { + return s.mux +} + +func (s *DockerServer) handlerWrapper(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + for errorID, urlRegexp := range s.failures { + matched, err := regexp.MatchString(urlRegexp, r.URL.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if !matched { + continue + } + http.Error(w, errorID, http.StatusBadRequest) + return + } + f(w, r) + } +} + +func (s *DockerServer) listContainers(w http.ResponseWriter, r *http.Request) { + all := r.URL.Query().Get("all") + s.cMut.RLock() + result := make([]docker.APIContainers, len(s.containers)) + for i, container := range s.containers { + if all == "1" || container.State.Running { + result[i] = docker.APIContainers{ + ID: container.ID, + Image: container.Image, + Command: fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " ")), + Created: container.Created.Unix(), + Status: container.State.String(), + Ports: container.NetworkSettings.PortMappingAPI(), + } + } + } + s.cMut.RUnlock() + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(result) +} + +func (s *DockerServer) listImages(w http.ResponseWriter, r *http.Request) { + s.cMut.RLock() + result := make([]docker.APIImages, len(s.images)) + for i, image := range s.images { + result[i] = docker.APIImages{ + ID: image.ID, + Created: image.Created.Unix(), + } + } + s.cMut.RUnlock() + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(result) +} + +func (s *DockerServer) findImage(id string) (string, error) { + s.iMut.RLock() + defer s.iMut.RUnlock() + image, ok := s.imgIDs[id] + if ok { + return image, nil + } + image, _, err := s.findImageByID(id) + return image, err +} + +func (s *DockerServer) findImageByID(id string) (string, int, error) { + s.iMut.RLock() + defer s.iMut.RUnlock() + for i, image := range s.images { + if image.ID == id { + return image.ID, i, nil + } + } + return "", -1, errors.New("No such image") +} + +func (s *DockerServer) createContainer(w http.ResponseWriter, r *http.Request) { + var config docker.Config + defer r.Body.Close() + err := json.NewDecoder(r.Body).Decode(&config) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + image, err := s.findImage(config.Image) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + w.WriteHeader(http.StatusCreated) + ports := map[docker.Port][]docker.PortBinding{} + for port := range config.ExposedPorts { + ports[port] = []docker.PortBinding{{ + HostIp: "0.0.0.0", + HostPort: strconv.Itoa(mathrand.Int() % 65536), + }} + } + + //the container may not have cmd when using a Dockerfile + var path string + var args []string + if len(config.Cmd) == 1 { + path = config.Cmd[0] + } else if len(config.Cmd) > 1 { + path = config.Cmd[0] + args = config.Cmd[1:] + } + + container := docker.Container{ + ID: s.generateID(), + Created: time.Now(), + Path: path, + Args: args, + Config: &config, + State: docker.State{ + Running: false, + Pid: mathrand.Int() % 50000, + ExitCode: 0, + StartedAt: time.Now(), + }, + Image: image, + NetworkSettings: &docker.NetworkSettings{ + IPAddress: fmt.Sprintf("172.16.42.%d", mathrand.Int()%250+2), + IPPrefixLen: 24, + Gateway: "172.16.42.1", + Bridge: "docker0", + Ports: ports, + }, + } + s.cMut.Lock() + s.containers = append(s.containers, &container) + s.cMut.Unlock() + s.notify(&container) + var c = struct{ ID string }{ID: container.ID} + json.NewEncoder(w).Encode(c) +} + +func (s *DockerServer) generateID() string { + var buf [16]byte + rand.Read(buf[:]) + return fmt.Sprintf("%x", buf) +} + +func (s *DockerServer) inspectContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(container) +} + +func (s *DockerServer) startContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + s.cMut.Lock() + defer s.cMut.Unlock() + if container.State.Running { + http.Error(w, "Container already running", http.StatusBadRequest) + return + } + container.State.Running = true + s.notify(container) +} + +func (s *DockerServer) stopContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + s.cMut.Lock() + defer s.cMut.Unlock() + if !container.State.Running { + http.Error(w, "Container not running", http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusNoContent) + container.State.Running = false + s.notify(container) +} + +func (s *DockerServer) pauseContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + s.cMut.Lock() + defer s.cMut.Unlock() + if container.State.Paused { + http.Error(w, "Container already paused", http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusNoContent) + container.State.Paused = true +} + +func (s *DockerServer) unpauseContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + s.cMut.Lock() + defer s.cMut.Unlock() + if !container.State.Paused { + http.Error(w, "Container not paused", http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusNoContent) + container.State.Paused = false +} + +func (s *DockerServer) attachContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + outStream := utils.NewStdWriter(w, utils.Stdout) + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + if container.State.Running { + fmt.Fprintf(outStream, "Container %q is running\n", container.ID) + } else { + fmt.Fprintf(outStream, "Container %q is not running\n", container.ID) + } + fmt.Fprintln(outStream, "What happened?") + fmt.Fprintln(outStream, "Something happened") +} + +func (s *DockerServer) waitContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + for { + time.Sleep(1e6) + s.cMut.RLock() + if !container.State.Running { + s.cMut.RUnlock() + break + } + s.cMut.RUnlock() + } + result := map[string]int{"StatusCode": container.State.ExitCode} + json.NewEncoder(w).Encode(result) +} + +func (s *DockerServer) removeContainer(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + _, index, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + if s.containers[index].State.Running { + msg := "Error: API error (406): Impossible to remove a running container, please stop it first" + http.Error(w, msg, http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusNoContent) + s.cMut.Lock() + defer s.cMut.Unlock() + s.containers[index] = s.containers[len(s.containers)-1] + s.containers = s.containers[:len(s.containers)-1] +} + +func (s *DockerServer) commitContainer(w http.ResponseWriter, r *http.Request) { + id := r.URL.Query().Get("container") + container, _, err := s.findContainer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + var config *docker.Config + runConfig := r.URL.Query().Get("run") + if runConfig != "" { + config = new(docker.Config) + err = json.Unmarshal([]byte(runConfig), config) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + } + w.WriteHeader(http.StatusOK) + image := docker.Image{ + ID: "img-" + container.ID, + Parent: container.Image, + Container: container.ID, + Comment: r.URL.Query().Get("m"), + Author: r.URL.Query().Get("author"), + Config: config, + } + repository := r.URL.Query().Get("repo") + s.iMut.Lock() + s.images = append(s.images, image) + if repository != "" { + s.imgIDs[repository] = image.ID + } + s.iMut.Unlock() + fmt.Fprintf(w, `{"ID":%q}`, image.ID) +} + +func (s *DockerServer) findContainer(id string) (*docker.Container, int, error) { + s.cMut.RLock() + defer s.cMut.RUnlock() + for i, container := range s.containers { + if container.ID == id { + return container, i, nil + } + } + return nil, -1, errors.New("No such container") +} + +func (s *DockerServer) buildImage(w http.ResponseWriter, r *http.Request) { + if ct := r.Header.Get("Content-Type"); ct == "application/tar" { + gotDockerFile := false + tr := tar.NewReader(r.Body) + for { + header, err := tr.Next() + if err != nil { + break + } + if header.Name == "Dockerfile" { + gotDockerFile = true + } + } + if !gotDockerFile { + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte("miss Dockerfile")) + return + } + } + //we did not use that Dockerfile to build image cause we are a fake Docker daemon + image := docker.Image{ + ID: s.generateID(), + } + query := r.URL.Query() + repository := image.ID + if t := query.Get("t"); t != "" { + repository = t + } + s.iMut.Lock() + s.images = append(s.images, image) + s.imgIDs[repository] = image.ID + s.iMut.Unlock() + w.Write([]byte(fmt.Sprintf("Successfully built %s", image.ID))) +} + +func (s *DockerServer) pullImage(w http.ResponseWriter, r *http.Request) { + repository := r.URL.Query().Get("fromImage") + image := docker.Image{ + ID: s.generateID(), + } + s.iMut.Lock() + s.images = append(s.images, image) + if repository != "" { + s.imgIDs[repository] = image.ID + } + s.iMut.Unlock() +} + +func (s *DockerServer) pushImage(w http.ResponseWriter, r *http.Request) { + name := mux.Vars(r)["name"] + s.iMut.RLock() + if _, ok := s.imgIDs[name]; !ok { + s.iMut.RUnlock() + http.Error(w, "No such image", http.StatusNotFound) + return + } + s.iMut.RUnlock() + fmt.Fprintln(w, "Pushing...") + fmt.Fprintln(w, "Pushed") +} + +func (s *DockerServer) removeImage(w http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + s.iMut.RLock() + var tag string + if img, ok := s.imgIDs[id]; ok { + id, tag = img, id + } + s.iMut.RUnlock() + _, index, err := s.findImageByID(id) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + w.WriteHeader(http.StatusNoContent) + s.iMut.Lock() + defer s.iMut.Unlock() + s.images[index] = s.images[len(s.images)-1] + s.images = s.images[:len(s.images)-1] + if tag != "" { + delete(s.imgIDs, tag) + } +} + +func (s *DockerServer) inspectImage(w http.ResponseWriter, r *http.Request) { + name := mux.Vars(r)["name"] + if id, ok := s.imgIDs[name]; ok { + s.iMut.Lock() + defer s.iMut.Unlock() + + for _, img := range s.images { + if img.ID == id { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(img) + return + } + } + } + http.Error(w, "not found", http.StatusNotFound) +} + +func (s *DockerServer) listEvents(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + var events [][]byte + count := mathrand.Intn(20) + for i := 0; i < count; i++ { + data, err := json.Marshal(s.generateEvent()) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + events = append(events, data) + } + w.WriteHeader(http.StatusOK) + for _, d := range events { + fmt.Fprintln(w, d) + time.Sleep(time.Duration(mathrand.Intn(200)) * time.Millisecond) + } +} + +func (s *DockerServer) pingDocker(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) +} + +func (s *DockerServer) generateEvent() *docker.APIEvents { + var eventType string + switch mathrand.Intn(4) { + case 0: + eventType = "create" + case 1: + eventType = "start" + case 2: + eventType = "stop" + case 3: + eventType = "destroy" + } + return &docker.APIEvents{ + ID: s.generateID(), + Status: eventType, + From: "mybase:latest", + Time: time.Now().Unix(), + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go new file mode 100644 index 00000000..bfc84e52 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go @@ -0,0 +1,963 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "encoding/json" + "fmt" + "github.com/fsouza/go-dockerclient" + "math/rand" + "net" + "net/http" + "net/http/httptest" + "os" + "reflect" + "strings" + "testing" + "time" +) + +func TestNewServer(t *testing.T) { + server, err := NewServer("127.0.0.1:0", nil, nil) + if err != nil { + t.Fatal(err) + } + defer server.listener.Close() + conn, err := net.Dial("tcp", server.listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + conn.Close() +} + +func TestServerStop(t *testing.T) { + server, err := NewServer("127.0.0.1:0", nil, nil) + if err != nil { + t.Fatal(err) + } + server.Stop() + _, err = net.Dial("tcp", server.listener.Addr().String()) + if err == nil { + t.Error("Unexpected error when dialing to stopped server") + } +} + +func TestServerStopNoListener(t *testing.T) { + server := DockerServer{} + server.Stop() +} + +func TestServerURL(t *testing.T) { + server, err := NewServer("127.0.0.1:0", nil, nil) + if err != nil { + t.Fatal(err) + } + defer server.Stop() + url := server.URL() + if expected := "http://" + server.listener.Addr().String() + "/"; url != expected { + t.Errorf("DockerServer.URL(): Want %q. Got %q.", expected, url) + } +} + +func TestServerURLNoListener(t *testing.T) { + server := DockerServer{} + url := server.URL() + if url != "" { + t.Errorf("DockerServer.URL(): Expected empty URL on handler mode, got %q.", url) + } +} + +func TestHandleWithHook(t *testing.T) { + var called bool + server, _ := NewServer("127.0.0.1:0", nil, func(*http.Request) { called = true }) + defer server.Stop() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if !called { + t.Error("ServeHTTP did not call the hook function.") + } +} + +func TestCustomHandler(t *testing.T) { + var called bool + server, _ := NewServer("127.0.0.1:0", nil, nil) + addContainers(server, 2) + server.CustomHandler("/containers/json", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called = true + fmt.Fprint(w, "Hello world") + })) + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if !called { + t.Error("Did not call the custom handler") + } + if got := recorder.Body.String(); got != "Hello world" { + t.Errorf("Wrong output for custom handler: want %q. Got %q.", "Hello world", got) + } +} + +func TestListContainers(t *testing.T) { + server := DockerServer{} + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("ListContainers: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := make([]docker.APIContainers, 2) + for i, container := range server.containers { + expected[i] = docker.APIContainers{ + ID: container.ID, + Image: container.Image, + Command: strings.Join(container.Config.Cmd, " "), + Created: container.Created.Unix(), + Status: container.State.String(), + Ports: container.NetworkSettings.PortMappingAPI(), + } + } + var got []docker.APIContainers + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, expected) { + t.Errorf("ListContainers. Want %#v. Got %#v.", expected, got) + } +} + +func TestListRunningContainers(t *testing.T) { + server := DockerServer{} + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=0", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("ListRunningContainers: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + var got []docker.APIContainers + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if len(got) == 0 { + t.Errorf("ListRunningContainers: Want 0. Got %d.", len(got)) + } +} + +func TestCreateContainer(t *testing.T) { + server := DockerServer{} + server.imgIDs = map[string]string{"base": "a1234"} + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, +"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":""}` + request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusCreated { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + var returned docker.Container + err := json.NewDecoder(recorder.Body).Decode(&returned) + if err != nil { + t.Fatal(err) + } + stored := server.containers[0] + if returned.ID != stored.ID { + t.Errorf("CreateContainer: ID mismatch. Stored: %q. Returned: %q.", stored.ID, returned.ID) + } + if stored.State.Running { + t.Errorf("CreateContainer should not set container to running state.") + } +} + +func TestCreateContainerWithNotifyChannel(t *testing.T) { + ch := make(chan *docker.Container, 1) + server := DockerServer{} + server.imgIDs = map[string]string{"base": "a1234"} + server.cChan = ch + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, +"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":""}` + request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusCreated { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) + } + if notified := <-ch; notified != server.containers[0] { + t.Errorf("CreateContainer: did not notify the proper container. Want %q. Got %q.", server.containers[0].ID, notified.ID) + } +} + +func TestCreateContainerInvalidBody(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader("whaaaaaat---")) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestCreateContainerImageNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, +"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], +"Image":"base", "Volumes":{}, "VolumesFrom":""}` + request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body)) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestCommitContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/commit?container="+server.containers[0].ID, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("CommitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := fmt.Sprintf(`{"ID":"%s"}`, server.images[0].ID) + if got := recorder.Body.String(); got != expected { + t.Errorf("CommitContainer: wrong response body. Want %q. Got %q.", expected, got) + } +} + +func TestCommitContainerComplete(t *testing.T) { + server := DockerServer{} + server.imgIDs = make(map[string]string) + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + queryString := "container=" + server.containers[0].ID + "&repo=tsuru/python&m=saving&author=developers" + queryString += `&run={"Cmd": ["cat", "/world"],"PortSpecs":["22"]}` + request, _ := http.NewRequest("POST", "/commit?"+queryString, nil) + server.ServeHTTP(recorder, request) + image := server.images[0] + if image.Parent != server.containers[0].Image { + t.Errorf("CommitContainer: wrong parent image. Want %q. Got %q.", server.containers[0].Image, image.Parent) + } + if image.Container != server.containers[0].ID { + t.Errorf("CommitContainer: wrong container. Want %q. Got %q.", server.containers[0].ID, image.Container) + } + message := "saving" + if image.Comment != message { + t.Errorf("CommitContainer: wrong comment (commit message). Want %q. Got %q.", message, image.Comment) + } + author := "developers" + if image.Author != author { + t.Errorf("CommitContainer: wrong author. Want %q. Got %q.", author, image.Author) + } + if id := server.imgIDs["tsuru/python"]; id != image.ID { + t.Errorf("CommitContainer: wrong ID saved for repository. Want %q. Got %q.", image.ID, id) + } + portSpecs := []string{"22"} + if !reflect.DeepEqual(image.Config.PortSpecs, portSpecs) { + t.Errorf("CommitContainer: wrong port spec in config. Want %#v. Got %#v.", portSpecs, image.Config.PortSpecs) + } + cmd := []string{"cat", "/world"} + if !reflect.DeepEqual(image.Config.Cmd, cmd) { + t.Errorf("CommitContainer: wrong cmd in config. Want %#v. Got %#v.", cmd, image.Config.Cmd) + } +} + +func TestCommitContainerInvalidRun(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/commit?container="+server.containers[0].ID+"&run=abc---", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("CommitContainer. Wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestCommitContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/commit?container=abc123", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("CommitContainer. Wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestInspectContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 2) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/json", server.containers[0].ID) + request, _ := http.NewRequest("GET", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("InspectContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := server.containers[0] + var got docker.Container + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got.Config, expected.Config) { + t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) + } + if !reflect.DeepEqual(got.NetworkSettings, expected.NetworkSettings) { + t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) + } + got.State.StartedAt = expected.State.StartedAt + got.State.FinishedAt = expected.State.FinishedAt + got.Config = expected.Config + got.Created = expected.Created + got.NetworkSettings = expected.NetworkSettings + if !reflect.DeepEqual(got, *expected) { + t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) + } +} + +func TestInspectContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/abc123/json", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("InspectContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestStartContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/start", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + if !server.containers[0].State.Running { + t.Error("StartContainer: did not set the container to running state") + } +} + +func TestStartContainerWithNotifyChannel(t *testing.T) { + ch := make(chan *docker.Container, 1) + server := DockerServer{} + server.cChan = ch + addContainers(&server, 1) + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/start", server.containers[1].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + if notified := <-ch; notified != server.containers[1] { + t.Errorf("StartContainer: did not notify the proper container. Want %q. Got %q.", server.containers[1].ID, notified.ID) + } +} + +func TestStartContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/start" + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestStartContainerAlreadyRunning(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/start", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestStopContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/stop", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if server.containers[0].State.Running { + t.Error("StopContainer: did not stop the container") + } +} + +func TestStopContainerWithNotifyChannel(t *testing.T) { + ch := make(chan *docker.Container, 1) + server := DockerServer{} + server.cChan = ch + addContainers(&server, 1) + addContainers(&server, 1) + server.containers[1].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/stop", server.containers[1].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if notified := <-ch; notified != server.containers[1] { + t.Errorf("StopContainer: did not notify the proper container. Want %q. Got %q.", server.containers[1].ID, notified.ID) + } +} + +func TestStopContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/stop" + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestStopContainerNotRunning(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/stop", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestPauseContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/pause", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("PauseContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if !server.containers[0].State.Paused { + t.Error("PauseContainer: did not pause the container") + } +} + +func TestPauseContainerAlreadyPaused(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Paused = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/pause", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("PauseContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestPauseContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/pause" + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("PauseContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestUnpauseContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Paused = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/unpause", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("UnpauseContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if server.containers[0].State.Paused { + t.Error("UnpauseContainer: did not unpause the container") + } +} + +func TestUnpauseContainerNotPaused(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/unpause", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("UnpauseContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } +} + +func TestUnpauseContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/unpause" + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("UnpauseContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestWaitContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/wait", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + go func() { + server.cMut.Lock() + server.containers[0].State.Running = false + server.cMut.Unlock() + }() + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("WaitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := `{"StatusCode":0}` + "\n" + if body := recorder.Body.String(); body != expected { + t.Errorf("WaitContainer: wrong body. Want %q. Got %q.", expected, body) + } +} + +func TestWaitContainerStatus(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + server.containers[0].State.ExitCode = 63 + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/wait", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("WaitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := `{"StatusCode":63}` + "\n" + if body := recorder.Body.String(); body != expected { + t.Errorf("WaitContainer: wrong body. Want %q. Got %q.", expected, body) + } +} + +func TestWaitContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/wait" + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("WaitContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestAttachContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s/attach?logs=1", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + lines := []string{ + fmt.Sprintf("\x01\x00\x00\x00\x03\x00\x00\x00Container %q is running", server.containers[0].ID), + "What happened?", + "Something happened", + } + expected := strings.Join(lines, "\n") + "\n" + if body := recorder.Body.String(); body == expected { + t.Errorf("AttachContainer: wrong body. Want %q. Got %q.", expected, body) + } +} + +func TestAttachContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := "/containers/abc123/attach?logs=1" + request, _ := http.NewRequest("POST", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("AttachContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestRemoveContainer(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s", server.containers[0].ID) + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if len(server.containers) > 0 { + t.Error("RemoveContainer: did not remove the container.") + } +} + +func TestRemoveContainerNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/abc123") + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func TestRemoveContainerRunning(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/containers/%s", server.containers[0].ID) + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusInternalServerError { + t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusInternalServerError, recorder.Code) + } + if len(server.containers) < 1 { + t.Error("RemoveContainer: should not remove the container.") + } +} + +func TestPullImage(t *testing.T) { + server := DockerServer{imgIDs: make(map[string]string)} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/create?fromImage=base", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("PullImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + if len(server.images) != 1 { + t.Errorf("PullImage: Want 1 image. Got %d.", len(server.images)) + } + if _, ok := server.imgIDs["base"]; !ok { + t.Error("PullImage: Repository should not be empty.") + } +} + +func TestPushImage(t *testing.T) { + server := DockerServer{imgIDs: map[string]string{"tsuru/python": "a123"}} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/tsuru/python/push", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("PushImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } +} + +func TestPushImageNotFound(t *testing.T) { + server := DockerServer{} + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/images/tsuru/python/push", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNotFound { + t.Errorf("PushImage: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) + } +} + +func addContainers(server *DockerServer, n int) { + server.cMut.Lock() + defer server.cMut.Unlock() + for i := 0; i < n; i++ { + date := time.Now().Add(time.Duration((rand.Int() % (i + 1))) * time.Hour) + container := docker.Container{ + ID: fmt.Sprintf("%x", rand.Int()%10000), + Created: date, + Path: "ls", + Args: []string{"-la", ".."}, + Config: &docker.Config{ + Hostname: fmt.Sprintf("docker-%d", i), + AttachStdout: true, + AttachStderr: true, + Env: []string{"ME=you", fmt.Sprintf("NUMBER=%d", i)}, + Cmd: []string{"ls", "-la", ".."}, + Image: "base", + }, + State: docker.State{ + Running: false, + Pid: 400 + i, + ExitCode: 0, + StartedAt: date, + }, + Image: "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + NetworkSettings: &docker.NetworkSettings{ + IPAddress: fmt.Sprintf("10.10.10.%d", i+2), + IPPrefixLen: 24, + Gateway: "10.10.10.1", + Bridge: "docker0", + PortMapping: map[string]docker.PortMapping{ + "Tcp": {"8888": fmt.Sprintf("%d", 49600+i)}, + }, + }, + ResolvConfPath: "/etc/resolv.conf", + } + server.containers = append(server.containers, &container) + } +} + +func addImages(server *DockerServer, n int, repo bool) { + server.iMut.Lock() + defer server.iMut.Unlock() + if server.imgIDs == nil { + server.imgIDs = make(map[string]string) + } + for i := 0; i < n; i++ { + date := time.Now().Add(time.Duration((rand.Int() % (i + 1))) * time.Hour) + image := docker.Image{ + ID: fmt.Sprintf("%x", rand.Int()%10000), + Created: date, + } + server.images = append(server.images, image) + if repo { + repo := "docker/python-" + image.ID + server.imgIDs[repo] = image.ID + } + } +} + +func TestListImages(t *testing.T) { + server := DockerServer{} + addImages(&server, 2, false) + server.buildMuxer() + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/images/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("ListImages: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } + expected := make([]docker.APIImages, 2) + for i, image := range server.images { + expected[i] = docker.APIImages{ + ID: image.ID, + Created: image.Created.Unix(), + } + } + var got []docker.APIImages + err := json.NewDecoder(recorder.Body).Decode(&got) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, expected) { + t.Errorf("ListImages. Want %#v. Got %#v.", expected, got) + } +} + +func TestRemoveImage(t *testing.T) { + server := DockerServer{} + addImages(&server, 1, false) + server.buildMuxer() + recorder := httptest.NewRecorder() + path := fmt.Sprintf("/images/%s", server.images[0].ID) + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("RemoveImage: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if len(server.images) > 0 { + t.Error("RemoveImage: did not remove the image.") + } +} + +func TestRemoveImageByName(t *testing.T) { + server := DockerServer{} + addImages(&server, 1, true) + server.buildMuxer() + recorder := httptest.NewRecorder() + imgName := "docker/python-" + server.images[0].ID + path := "/images/" + imgName + request, _ := http.NewRequest("DELETE", path, nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusNoContent { + t.Errorf("RemoveImage: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) + } + if len(server.images) > 0 { + t.Error("RemoveImage: did not remove the image.") + } + _, ok := server.imgIDs[imgName] + if ok { + t.Error("RemoveImage: did not remove image tag name.") + } +} + +func TestPrepareFailure(t *testing.T) { + server := DockerServer{failures: make(map[string]string)} + server.buildMuxer() + errorID := "my_error" + server.PrepareFailure(errorID, "containers/json") + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } + if recorder.Body.String() != errorID+"\n" { + t.Errorf("PrepareFailure: wrong message. Want %s. Got %s.", errorID, recorder.Body.String()) + } +} + +func TestRemoveFailure(t *testing.T) { + server := DockerServer{failures: make(map[string]string)} + server.buildMuxer() + errorID := "my_error" + server.PrepareFailure(errorID, "containers/json") + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusBadRequest { + t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + } + server.ResetFailure(errorID) + recorder = httptest.NewRecorder() + request, _ = http.NewRequest("GET", "/containers/json?all=1", nil) + server.ServeHTTP(recorder, request) + if recorder.Code != http.StatusOK { + t.Errorf("RemoveFailure: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) + } +} + +func TestMutateContainer(t *testing.T) { + server := DockerServer{failures: make(map[string]string)} + server.buildMuxer() + server.containers = append(server.containers, &docker.Container{ID: "id123"}) + state := docker.State{Running: false, ExitCode: 1} + err := server.MutateContainer("id123", state) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(server.containers[0].State, state) { + t.Errorf("Wrong state after mutation.\nWant %#v.\nGot %#v.", + state, server.containers[0].State) + } +} + +func TestMutateContainerNotFound(t *testing.T) { + server := DockerServer{failures: make(map[string]string)} + server.buildMuxer() + state := docker.State{Running: false, ExitCode: 1} + err := server.MutateContainer("id123", state) + if err == nil { + t.Error("Unexpected error") + } + if err.Error() != "container not found" { + t.Errorf("wrong error message. Want %q. Got %q.", "container not found", err) + } +} + +func TestBuildImageWithContentTypeTar(t *testing.T) { + server := DockerServer{imgIDs: make(map[string]string)} + imageName := "teste" + recorder := httptest.NewRecorder() + tarFile, err := os.Open("data/dockerfile.tar") + if err != nil { + t.Fatal(err) + } + defer tarFile.Close() + request, _ := http.NewRequest("POST", "/build?t=teste", tarFile) + request.Header.Add("Content-Type", "application/tar") + server.buildImage(recorder, request) + if recorder.Body.String() == "miss Dockerfile" { + t.Errorf("BuildImage: miss Dockerfile") + return + } + if _, ok := server.imgIDs[imageName]; ok == false { + t.Errorf("BuildImage: image %s not builded", imageName) + } +} + +func TestBuildImageWithRemoteDockerfile(t *testing.T) { + server := DockerServer{imgIDs: make(map[string]string)} + imageName := "teste" + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("POST", "/build?t=teste&remote=http://localhost/Dockerfile", nil) + server.buildImage(recorder, request) + if _, ok := server.imgIDs[imageName]; ok == false { + t.Errorf("BuildImage: image %s not builded", imageName) + } +} + +func TestPing(t *testing.T) { + server := DockerServer{} + recorder := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/_ping", nil) + server.pingDocker(recorder, request) + if recorder.Body.String() != "" { + t.Errorf("Ping: Unexpected body: %s", recorder.Body.String()) + } + if recorder.Code != http.StatusOK { + t.Errorf("Ping: Expected code %d, got: %d", http.StatusOK, recorder.Code) + } +} + +func TestDefaultHandler(t *testing.T) { + server, err := NewServer("127.0.0.1:0", nil, nil) + if err != nil { + t.Fatal(err) + } + defer server.listener.Close() + if server.mux != server.DefaultHandler() { + t.Fatalf("DefaultHandler: Expected to return server.mux, got: %#v", server.DefaultHandler()) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/random.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/random.go new file mode 100644 index 00000000..e2e68365 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/random.go @@ -0,0 +1,20 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package utils + +import ( + "crypto/rand" + "encoding/hex" + "io" +) + +func RandomString() string { + id := make([]byte, 32) + _, err := io.ReadFull(rand.Reader, id) + if err != nil { + panic(err) // This shouldn't happen + } + return hex.EncodeToString(id) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/stdcopy.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/stdcopy.go new file mode 100644 index 00000000..dec604ce --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/stdcopy.go @@ -0,0 +1,168 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package utils + +import ( + "encoding/binary" + "errors" + "io" +) + +const ( + StdWriterPrefixLen = 8 + StdWriterFdIndex = 0 + StdWriterSizeIndex = 4 +) + +type StdType [StdWriterPrefixLen]byte + +var ( + Stdin StdType = StdType{0: 0} + Stdout StdType = StdType{0: 1} + Stderr StdType = StdType{0: 2} +) + +type StdWriter struct { + io.Writer + prefix StdType + sizeBuf []byte +} + +func (w *StdWriter) Write(buf []byte) (n int, err error) { + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instanciated") + } + binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf))) + buf = append(w.prefix[:], buf...) + + n, err = w.Writer.Write(buf) + return n - StdWriterPrefixLen, err +} + +// NewStdWriter instanciates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be utils.Stdin, utils.Stdout, utils.Stderr. +func NewStdWriter(w io.Writer, t StdType) *StdWriter { + if len(t) != StdWriterPrefixLen { + return nil + } + + return &StdWriter{ + Writer: w, + prefix: t, + sizeBuf: make([]byte, 4), + } +} + +var ErrInvalidStdHeader = errors.New("Unrecognized input header") + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, 32*1024+StdWriterPrefixLen+1) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < StdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + if er == io.EOF { + if nr < StdWriterPrefixLen && nr2 < StdWriterPrefixLen { + return written, nil + } + nr += nr2 + break + } else if er != nil { + return 0, er + } + nr += nr2 + } + + // Check the first byte to know where to write + switch buf[StdWriterFdIndex] { + case 0: + fallthrough + case 1: + // Write on stdout + out = dstout + case 2: + // Write on stderr + out = dsterr + default: + Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex]) + return 0, ErrInvalidStdHeader + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4])) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+StdWriterPrefixLen > bufLen { + Debugf("Extending buffer cap.") + buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-len(buf)+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+StdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + if er == io.EOF { + if nr == 0 { + return written, nil + } + nr += nr2 + break + } else if er != nil { + Debugf("Error reading frame: %s", er) + return 0, er + } + nr += nr2 + } + + // Write the retrieved frame (without header) + bound := frameSize + StdWriterPrefixLen + if bound > nr { + bound = nr + } + nw, ew = out.Write(buf[StdWriterPrefixLen:bound]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + Debugf("Error writing frame: %s", ew) + return 0, ew + } + // If the frame has not been fully written: error + if nw != frameSize { + Debugf("Error Short Write: (%d on %d)", nw, frameSize) + return written, io.ErrShortWrite + } + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+StdWriterPrefixLen:]) + // Move the index + nr -= frameSize + StdWriterPrefixLen + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/stdcopy_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/stdcopy_test.go new file mode 100644 index 00000000..81820ebb --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/stdcopy_test.go @@ -0,0 +1,217 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package utils + +import ( + "bytes" + "errors" + "io" + "strings" + "testing" + "testing/iotest" +) + +type errorWriter struct { +} + +func (errorWriter) Write([]byte) (int, error) { + return 0, errors.New("something went wrong") +} + +func TestStdCopy(t *testing.T) { + var input, stdout, stderr bytes.Buffer + input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19}) + input.Write([]byte("something happened!")) + input.Write([]byte{1, 0, 0, 0, 0, 0, 0, 12}) + input.Write([]byte("just kidding")) + input.Write([]byte{0, 0, 0, 0, 0, 0, 0, 6}) + input.Write([]byte("\nyeah!")) + n, err := StdCopy(&stdout, &stderr, &input) + if err != nil { + t.Fatal(err) + } + if expected := int64(19 + 12 + 6); n != expected { + t.Errorf("Wrong number of bytes. Want %d. Got %d.", expected, n) + } + if got := stderr.String(); got != "something happened!" { + t.Errorf("StdCopy: wrong stderr. Want %q. Got %q.", "something happened!", got) + } + if got := stdout.String(); got != "just kidding\nyeah!" { + t.Errorf("StdCopy: wrong stdout. Want %q. Got %q.", "just kidding\nyeah!", got) + } +} + +func TestStdCopyStress(t *testing.T) { + var input, stdout, stderr bytes.Buffer + value := strings.Repeat("something ", 4096) + writer := NewStdWriter(&input, Stdout) + writer.Write([]byte(value)) + n, err := StdCopy(&stdout, &stderr, &input) + if err != nil { + t.Fatal(err) + } + if n != 40960 { + t.Errorf("Wrong number of bytes. Want 40960. Got %d.", n) + } + if got := stderr.String(); got != "" { + t.Errorf("StdCopy: wrong stderr. Want empty string. Got %q", got) + } + if got := stdout.String(); got != value { + t.Errorf("StdCopy: wrong stdout. Want %q. Got %q", value, got) + } +} + +func TestStdCopyInvalidStdHeader(t *testing.T) { + var input, stdout, stderr bytes.Buffer + input.Write([]byte{3, 0, 0, 0, 0, 0, 0, 19}) + n, err := StdCopy(&stdout, &stderr, &input) + if n != 0 { + t.Errorf("StdCopy: wrong number of bytes. Want 0. Got %d", n) + } + if err != ErrInvalidStdHeader { + t.Errorf("StdCopy: wrong error. Want ErrInvalidStdHeader. Got %#v", err) + } +} + +func TestStdCopyBigFrame(t *testing.T) { + var input, stdout, stderr bytes.Buffer + input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 18}) + input.Write([]byte("something happened!")) + n, err := StdCopy(&stdout, &stderr, &input) + if err != nil { + t.Fatal(err) + } + if expected := int64(18); n != expected { + t.Errorf("Wrong number of bytes. Want %d. Got %d.", expected, n) + } + if got := stderr.String(); got != "something happened" { + t.Errorf("StdCopy: wrong stderr. Want %q. Got %q.", "something happened", got) + } + if got := stdout.String(); got != "" { + t.Errorf("StdCopy: wrong stdout. Want %q. Got %q.", "", got) + } +} + +func TestStdCopySmallFrame(t *testing.T) { + var input, stdout, stderr bytes.Buffer + input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 20}) + input.Write([]byte("something happened!")) + n, err := StdCopy(&stdout, &stderr, &input) + if err != io.ErrShortWrite { + t.Errorf("StdCopy: wrong error. Want ShortWrite. Got %#v", err) + } + if expected := int64(19); n != expected { + t.Errorf("Wrong number of bytes. Want %d. Got %d.", expected, n) + } + if got := stderr.String(); got != "something happened!" { + t.Errorf("StdCopy: wrong stderr. Want %q. Got %q.", "something happened", got) + } + if got := stdout.String(); got != "" { + t.Errorf("StdCopy: wrong stdout. Want %q. Got %q.", "", got) + } +} + +func TestStdCopyEmpty(t *testing.T) { + var input, stdout, stderr bytes.Buffer + n, err := StdCopy(&stdout, &stderr, &input) + if err != nil { + t.Fatal(err) + } + if n != 0 { + t.Errorf("StdCopy: wrong number of bytes. Want 0. Got %d.", n) + } +} + +func TestStdCopyCorruptedHeader(t *testing.T) { + var input, stdout, stderr bytes.Buffer + input.Write([]byte{2, 0, 0, 0, 0}) + n, err := StdCopy(&stdout, &stderr, &input) + if err != nil { + t.Fatal(err) + } + if n != 0 { + t.Errorf("StdCopy: wrong number of bytes. Want 0. Got %d.", n) + } +} + +func TestStdCopyTruncateWriter(t *testing.T) { + var input, stdout, stderr bytes.Buffer + input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19}) + input.Write([]byte("something happened!")) + n, err := StdCopy(&stdout, iotest.TruncateWriter(&stderr, 7), &input) + if err != nil { + t.Fatal(err) + } + if expected := int64(19); n != expected { + t.Errorf("Wrong number of bytes. Want %d. Got %d.", expected, n) + } + if got := stderr.String(); got != "somethi" { + t.Errorf("StdCopy: wrong stderr. Want %q. Got %q.", "somethi", got) + } + if got := stdout.String(); got != "" { + t.Errorf("StdCopy: wrong stdout. Want %q. Got %q.", "", got) + } +} + +func TestStdCopyHeaderOnly(t *testing.T) { + var input, stdout, stderr bytes.Buffer + input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19}) + n, err := StdCopy(&stdout, iotest.TruncateWriter(&stderr, 7), &input) + if err != io.ErrShortWrite { + t.Errorf("StdCopy: wrong error. Want ShortWrite. Got %#v", err) + } + if n != 0 { + t.Errorf("Wrong number of bytes. Want 0. Got %d.", n) + } + if got := stderr.String(); got != "" { + t.Errorf("StdCopy: wrong stderr. Want %q. Got %q.", "", got) + } + if got := stdout.String(); got != "" { + t.Errorf("StdCopy: wrong stdout. Want %q. Got %q.", "", got) + } +} + +func TestStdCopyDataErrReader(t *testing.T) { + var input, stdout, stderr bytes.Buffer + input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19}) + input.Write([]byte("something happened!")) + n, err := StdCopy(&stdout, &stderr, iotest.DataErrReader(&input)) + if err != nil { + t.Fatal(err) + } + if expected := int64(19); n != expected { + t.Errorf("Wrong number of bytes. Want %d. Got %d.", expected, n) + } + if got := stderr.String(); got != "something happened!" { + t.Errorf("StdCopy: wrong stderr. Want %q. Got %q.", "something happened!", got) + } + if got := stdout.String(); got != "" { + t.Errorf("StdCopy: wrong stdout. Want %q. Got %q.", "", got) + } +} + +func TestStdCopyTimeoutReader(t *testing.T) { + var input, stdout, stderr bytes.Buffer + input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19}) + input.Write([]byte("something happened!")) + _, err := StdCopy(&stdout, &stderr, iotest.TimeoutReader(&input)) + if err != iotest.ErrTimeout { + t.Errorf("StdCopy: wrong error. Want ErrTimeout. Got %#v.", err) + } +} + +func TestStdCopyWriteError(t *testing.T) { + var input bytes.Buffer + input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19}) + input.Write([]byte("something happened!")) + var stdout, stderr errorWriter + n, err := StdCopy(stdout, stderr, &input) + if err.Error() != "something went wrong" { + t.Errorf("StdCopy: wrong error. Want %q. Got %q", "something went wrong", err) + } + if n != 0 { + t.Errorf("StdCopy: wrong number of bytes. Want 0. Got %d.", n) + } +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/uname_darwin.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/uname_darwin.go new file mode 100644 index 00000000..8e599699 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/uname_darwin.go @@ -0,0 +1,17 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package utils + +import ( + "errors" +) + +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is not available on darwin") +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/uname_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/uname_linux.go new file mode 100644 index 00000000..9ee7e3d9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/uname_linux.go @@ -0,0 +1,20 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package utils + +import ( + "syscall" +) + +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/utils.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/utils.go new file mode 100644 index 00000000..84b2815a --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/utils.go @@ -0,0 +1,1114 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package utils + +import ( + "bytes" + "crypto/sha1" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "index/suffixarray" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +var ( + IAMSTATIC bool // whether or not Docker itself was compiled statically via ./hack/make.sh binary + INITSHA1 string // sha1sum of separate static dockerinit, if Docker itself was compiled dynamically via ./hack/make.sh dynbinary + INITPATH string // custom location to search for a valid dockerinit binary (available for packagers as a last resort escape hatch) +) + +// A common interface to access the Fatal method of +// both testing.B and testing.T. +type Fataler interface { + Fatal(args ...interface{}) +} + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error) + go func() { + ch <- f() + }() + return ch +} + +// Request a given URL and return an io.Reader +func Download(url string) (*http.Response, error) { + var resp *http.Response + var err error + if resp, err = http.Get(url); err != nil { + return nil, err + } + if resp.StatusCode >= 400 { + return nil, errors.New("Got HTTP status code >= 400: " + resp.Status) + } + return resp, nil +} + +func logf(level string, format string, a ...interface{}) { + // Retrieve the stack infos + _, file, line, ok := runtime.Caller(2) + if !ok { + file = "" + line = -1 + } else { + file = file[strings.LastIndex(file, "/")+1:] + } + + fmt.Fprintf(os.Stderr, fmt.Sprintf("[%s] %s:%d %s\n", level, file, line, format), a...) +} + +// Debug function, if the debug flag is set, then display. Do nothing otherwise +// If Docker is in damon mode, also send the debug info on the socket +func Debugf(format string, a ...interface{}) { + if os.Getenv("DEBUG") != "" { + logf("debug", format, a...) + } +} + +func Errorf(format string, a ...interface{}) { + logf("error", format, a...) +} + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.) +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours()); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*3 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%f years", d.Hours()/24/365) +} + +// HumanSize returns a human-readable approximation of a size +// using SI standard (eg. "44kB", "17MB") +func HumanSize(size int64) string { + i := 0 + var sizef float64 + sizef = float64(size) + units := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + for sizef >= 1000.0 { + sizef = sizef / 1000.0 + i++ + } + return fmt.Sprintf("%.4g %s", sizef, units[i]) +} + +// Parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes or gibibytes, and returns the +// number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (bytes int64, err error) { + re, error := regexp.Compile("^(\\d+)([kKmMgG])?[bB]?$") + if error != nil { + return -1, error + } + + matches := re.FindStringSubmatch(size) + + if len(matches) != 3 { + return -1, fmt.Errorf("Invalid size: '%s'", size) + } + + memLimit, error := strconv.ParseInt(matches[1], 10, 0) + if error != nil { + return -1, error + } + + unit := strings.ToLower(matches[2]) + + if unit == "k" { + memLimit *= 1024 + } else if unit == "m" { + memLimit *= 1024 * 1024 + } else if unit == "g" { + memLimit *= 1024 * 1024 * 1024 + } + + return memLimit, nil +} + +func Trunc(s string, maxlen int) string { + if len(s) <= maxlen { + return s + } + return s[:maxlen] +} + +// Figure out the absolute path of our own binary (if it's still around). +func SelfPath() string { + path, err := exec.LookPath(os.Args[0]) + if err != nil { + if os.IsNotExist(err) { + return "" + } + if execErr, ok := err.(*exec.Error); ok && os.IsNotExist(execErr.Err) { + return "" + } + panic(err) + } + path, err = filepath.Abs(path) + if err != nil { + if os.IsNotExist(err) { + return "" + } + panic(err) + } + return path +} + +func dockerInitSha1(target string) string { + f, err := os.Open(target) + if err != nil { + return "" + } + defer f.Close() + h := sha1.New() + _, err = io.Copy(h, f) + if err != nil { + return "" + } + return hex.EncodeToString(h.Sum(nil)) +} + +func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this) + if target == "" { + return false + } + if IAMSTATIC { + if selfPath == "" { + return false + } + if target == selfPath { + return true + } + targetFileInfo, err := os.Lstat(target) + if err != nil { + return false + } + selfPathFileInfo, err := os.Lstat(selfPath) + if err != nil { + return false + } + return os.SameFile(targetFileInfo, selfPathFileInfo) + } + return INITSHA1 != "" && dockerInitSha1(target) == INITSHA1 +} + +// Figure out the path of our dockerinit (which may be SelfPath()) +func DockerInitPath(localCopy string) string { + selfPath := SelfPath() + if isValidDockerInitPath(selfPath, selfPath) { + // if we're valid, don't bother checking anything else + return selfPath + } + var possibleInits = []string{ + localCopy, + INITPATH, + filepath.Join(filepath.Dir(selfPath), "dockerinit"), + + // FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec." + // http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec + "/usr/libexec/docker/dockerinit", + "/usr/local/libexec/docker/dockerinit", + + // FHS 2.3: "/usr/lib includes object files, libraries, and internal binaries that are not intended to be executed directly by users or shell scripts." + // http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA + "/usr/lib/docker/dockerinit", + "/usr/local/lib/docker/dockerinit", + } + for _, dockerInit := range possibleInits { + if dockerInit == "" { + continue + } + path, err := exec.LookPath(dockerInit) + if err == nil { + path, err = filepath.Abs(path) + if err != nil { + // LookPath already validated that this file exists and is executable (following symlinks), so how could Abs fail? + panic(err) + } + if isValidDockerInitPath(path, selfPath) { + return path + } + } + } + return "" +} + +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +type bufReader struct { + sync.Mutex + buf *bytes.Buffer + reader io.Reader + err error + wait sync.Cond +} + +func NewBufReader(r io.Reader) *bufReader { + reader := &bufReader{ + buf: &bytes.Buffer{}, + reader: r, + } + reader.wait.L = &reader.Mutex + go reader.drain() + return reader +} + +func (r *bufReader) drain() { + buf := make([]byte, 1024) + for { + n, err := r.reader.Read(buf) + r.Lock() + if err != nil { + r.err = err + } else { + r.buf.Write(buf[0:n]) + } + r.wait.Signal() + r.Unlock() + if err != nil { + break + } + } +} + +func (r *bufReader) Read(p []byte) (n int, err error) { + r.Lock() + defer r.Unlock() + for { + n, err = r.buf.Read(p) + if n > 0 { + return n, err + } + if r.err != nil { + return 0, r.err + } + r.wait.Wait() + } +} + +func (r *bufReader) Close() error { + closer, ok := r.reader.(io.ReadCloser) + if !ok { + return nil + } + return closer.Close() +} + +type WriteBroadcaster struct { + sync.Mutex + buf *bytes.Buffer + writers map[StreamWriter]bool +} + +type StreamWriter struct { + wc io.WriteCloser + stream string +} + +func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser, stream string) { + w.Lock() + sw := StreamWriter{wc: writer, stream: stream} + w.writers[sw] = true + w.Unlock() +} + +type JSONLog struct { + Log string `json:"log,omitempty"` + Stream string `json:"stream,omitempty"` + Created time.Time `json:"time"` +} + +func (w *WriteBroadcaster) Write(p []byte) (n int, err error) { + w.Lock() + defer w.Unlock() + w.buf.Write(p) + for sw := range w.writers { + lp := p + if sw.stream != "" { + lp = nil + for { + line, err := w.buf.ReadString('\n') + if err != nil { + w.buf.Write([]byte(line)) + break + } + b, err := json.Marshal(&JSONLog{Log: line, Stream: sw.stream, Created: time.Now().UTC()}) + if err != nil { + // On error, evict the writer + delete(w.writers, sw) + continue + } + lp = append(lp, b...) + lp = append(lp, '\n') + } + } + if n, err := sw.wc.Write(lp); err != nil || n != len(lp) { + // On error, evict the writer + delete(w.writers, sw) + } + } + return len(p), nil +} + +func (w *WriteBroadcaster) CloseWriters() error { + w.Lock() + defer w.Unlock() + for sw := range w.writers { + sw.wc.Close() + } + w.writers = make(map[StreamWriter]bool) + return nil +} + +func NewWriteBroadcaster() *WriteBroadcaster { + return &WriteBroadcaster{writers: make(map[StreamWriter]bool), buf: bytes.NewBuffer(nil)} +} + +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} + +// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. +// This is used to retrieve image and container IDs by more convenient shorthand prefixes. +type TruncIndex struct { + index *suffixarray.Index + ids map[string]bool + bytes []byte +} + +func NewTruncIndex() *TruncIndex { + return &TruncIndex{ + index: suffixarray.New([]byte{' '}), + ids: make(map[string]bool), + bytes: []byte{' '}, + } +} + +func (idx *TruncIndex) Add(id string) error { + if strings.Contains(id, " ") { + return fmt.Errorf("Illegal character: ' '") + } + if _, exists := idx.ids[id]; exists { + return fmt.Errorf("Id already exists: %s", id) + } + idx.ids[id] = true + idx.bytes = append(idx.bytes, []byte(id+" ")...) + idx.index = suffixarray.New(idx.bytes) + return nil +} + +func (idx *TruncIndex) Delete(id string) error { + if _, exists := idx.ids[id]; !exists { + return fmt.Errorf("No such id: %s", id) + } + before, after, err := idx.lookup(id) + if err != nil { + return err + } + delete(idx.ids, id) + idx.bytes = append(idx.bytes[:before], idx.bytes[after:]...) + idx.index = suffixarray.New(idx.bytes) + return nil +} + +func (idx *TruncIndex) lookup(s string) (int, int, error) { + offsets := idx.index.Lookup([]byte(" "+s), -1) + //log.Printf("lookup(%s): %v (index bytes: '%s')\n", s, offsets, idx.index.Bytes()) + if offsets == nil || len(offsets) == 0 || len(offsets) > 1 { + return -1, -1, fmt.Errorf("No such id: %s", s) + } + offsetBefore := offsets[0] + 1 + offsetAfter := offsetBefore + strings.Index(string(idx.bytes[offsetBefore:]), " ") + return offsetBefore, offsetAfter, nil +} + +func (idx *TruncIndex) Get(s string) (string, error) { + before, after, err := idx.lookup(s) + //log.Printf("Get(%s) bytes=|%s| before=|%d| after=|%d|\n", s, idx.bytes, before, after) + if err != nil { + return "", err + } + return string(idx.bytes[before:after]), err +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a langer prefix, or the full-length Id. +func TruncateID(id string) string { + shortLen := 12 + if len(id) < shortLen { + shortLen = len(id) + } + return id[:shortLen] +} + +// Code c/c from io.Copy() modified to handle escape sequence +func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) { + buf := make([]byte, 32*1024) + for { + nr, er := src.Read(buf) + if nr > 0 { + // ---- Docker addition + // char 16 is C-p + if nr == 1 && buf[0] == 16 { + nr, er = src.Read(buf) + // char 17 is C-q + if nr == 1 && buf[0] == 17 { + if err := src.Close(); err != nil { + return 0, err + } + return 0, nil + } + } + // ---- End of docker + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} + +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +type KernelVersionInfo struct { + Kernel int + Major int + Minor int + Flavor string +} + +func (k *KernelVersionInfo) String() string { + flavor := "" + if len(k.Flavor) > 0 { + flavor = fmt.Sprintf("-%s", k.Flavor) + } + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, flavor) +} + +// Compare two KernelVersionInfo struct. +// Returns -1 if a < b, = if a == b, 1 it a > b +func CompareKernelVersion(a, b *KernelVersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +func GetKernelVersion() (*KernelVersionInfo, error) { + var ( + err error + ) + + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +func ParseRelease(release string) (*KernelVersionInfo, error) { + var ( + flavor string + kernel, major, minor int + err error + ) + + tmp := strings.SplitN(release, "-", 2) + tmp2 := strings.Split(tmp[0], ".") + + if len(tmp2) > 0 { + kernel, err = strconv.Atoi(tmp2[0]) + if err != nil { + return nil, err + } + } + + if len(tmp2) > 1 { + major, err = strconv.Atoi(tmp2[1]) + if err != nil { + return nil, err + } + } + + if len(tmp2) > 2 { + // Removes "+" because git kernels might set it + minorUnparsed := strings.Trim(tmp2[2], "+") + minor, err = strconv.Atoi(minorUnparsed) + if err != nil { + return nil, err + } + } + + if len(tmp) == 2 { + flavor = tmp[1] + } else { + flavor = "" + } + + return &KernelVersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} + +// FIXME: this is deprecated by CopyWithTar in archive.go +func CopyDirectory(source, dest string) error { + if output, err := exec.Command("cp", "-ra", source, dest).CombinedOutput(); err != nil { + return fmt.Errorf("Error copy: %s (%s)", err, output) + } + return nil +} + +type NopFlusher struct{} + +func (f *NopFlusher) Flush() {} + +type WriteFlusher struct { + sync.Mutex + w io.Writer + flusher http.Flusher +} + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + wf.Lock() + defer wf.Unlock() + n, err = wf.w.Write(b) + wf.flusher.Flush() + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + wf.Lock() + defer wf.Unlock() + wf.flusher.Flush() +} + +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var flusher http.Flusher + if f, ok := w.(http.Flusher); ok { + flusher = f + } else { + flusher = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: flusher} +} + +func IsURL(str string) bool { + return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://") +} + +func IsGIT(str string) bool { + return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") +} + +// GetResolvConf opens and read the content of /etc/resolv.conf. +// It returns it as byte slice. +func GetResolvConf() ([]byte, error) { + resolv, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + Errorf("Error openning resolv.conf: %s", err) + return nil, err + } + return resolv, nil +} + +// CheckLocalDns looks into the /etc/resolv.conf, +// it returns true if there is a local nameserver or if there is no nameserver. +func CheckLocalDns(resolvConf []byte) bool { + var parsedResolvConf = StripComments(resolvConf, []byte("#")) + if !bytes.Contains(parsedResolvConf, []byte("nameserver")) { + return true + } + for _, ip := range [][]byte{ + []byte("127.0.0.1"), + []byte("127.0.1.1"), + } { + if bytes.Contains(parsedResolvConf, ip) { + return true + } + } + return false +} + +// StripComments parses input into lines and strips away comments. +func StripComments(input []byte, commentMarker []byte) []byte { + lines := bytes.Split(input, []byte("\n")) + var output []byte + for _, currentLine := range lines { + var commentIndex = bytes.Index(currentLine, commentMarker) + if commentIndex == -1 { + output = append(output, currentLine...) + } else { + output = append(output, currentLine[:commentIndex]...) + } + output = append(output, []byte("\n")...) + } + return output +} + +// GetNameserversAsCIDR returns nameservers (if any) listed in +// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") +// This function's output is intended for net.ParseCIDR +func GetNameserversAsCIDR(resolvConf []byte) []string { + var parsedResolvConf = StripComments(resolvConf, []byte("#")) + nameservers := []string{} + re := regexp.MustCompile(`^\s*nameserver\s*(([0-9]+\.){3}([0-9]+))\s*$`) + for _, line := range bytes.Split(parsedResolvConf, []byte("\n")) { + var ns = re.FindSubmatch(line) + if len(ns) > 0 { + nameservers = append(nameservers, string(ns[1])+"/32") + } + } + + return nameservers +} + +// FIXME: Change this not to receive default value as parameter +func ParseHost(defaultHost string, defaultPort int, defaultUnix, addr string) (string, error) { + var ( + proto string + host string + port int + ) + addr = strings.TrimSpace(addr) + switch { + case strings.HasPrefix(addr, "unix://"): + proto = "unix" + addr = strings.TrimPrefix(addr, "unix://") + if addr == "" { + addr = defaultUnix + } + case strings.HasPrefix(addr, "tcp://"): + proto = "tcp" + addr = strings.TrimPrefix(addr, "tcp://") + case addr == "": + proto = "unix" + addr = defaultUnix + default: + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid bind address protocol: %s", addr) + } + proto = "tcp" + } + + if proto != "unix" && strings.Contains(addr, ":") { + hostParts := strings.Split(addr, ":") + if len(hostParts) != 2 { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } + if hostParts[0] != "" { + host = hostParts[0] + } else { + host = defaultHost + } + + if p, err := strconv.Atoi(hostParts[1]); err == nil && p != 0 { + port = p + } else { + port = defaultPort + } + + } else { + host = addr + port = defaultPort + } + if proto == "unix" { + return fmt.Sprintf("%s://%s", proto, host), nil + } + return fmt.Sprintf("%s://%s:%d", proto, host, port), nil +} + +func GetReleaseVersion() string { + resp, err := http.Get("http://get.docker.io/latest") + if err != nil { + return "" + } + defer resp.Body.Close() + if resp.ContentLength > 24 || resp.StatusCode != 200 { + return "" + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "" + } + return strings.TrimSpace(string(body)) +} + +// Get a repos name and returns the right reposName + tag +// The tag can be confusing because of a port in a repository name. +// Ex: localhost.localdomain:5000/samalba/hipache:latest +func ParseRepositoryTag(repos string) (string, string) { + n := strings.LastIndex(repos, ":") + if n < 0 { + return repos, "" + } + if tag := repos[n+1:]; !strings.Contains(tag, "/") { + return repos[:n], tag + } + return repos, "" +} + +type User struct { + Uid string // user id + Gid string // primary group id + Username string + Name string + HomeDir string +} + +// UserLookup check if the given username or uid is present in /etc/passwd +// and returns the user struct. +// If the username is not found, an error is returned. +func UserLookup(uid string) (*User, error) { + file, err := ioutil.ReadFile("/etc/passwd") + if err != nil { + return nil, err + } + for _, line := range strings.Split(string(file), "\n") { + data := strings.Split(line, ":") + if len(data) > 5 && (data[0] == uid || data[2] == uid) { + return &User{ + Uid: data[2], + Gid: data[3], + Username: data[0], + Name: data[4], + HomeDir: data[5], + }, nil + } + } + return nil, fmt.Errorf("User not found in /etc/passwd") +} + +type DependencyGraph struct { + nodes map[string]*DependencyNode +} + +type DependencyNode struct { + id string + deps map[*DependencyNode]bool +} + +func NewDependencyGraph() DependencyGraph { + return DependencyGraph{ + nodes: map[string]*DependencyNode{}, + } +} + +func (graph *DependencyGraph) addNode(node *DependencyNode) string { + if graph.nodes[node.id] == nil { + graph.nodes[node.id] = node + } + return node.id +} + +func (graph *DependencyGraph) NewNode(id string) string { + if graph.nodes[id] != nil { + return id + } + nd := &DependencyNode{ + id: id, + deps: map[*DependencyNode]bool{}, + } + graph.addNode(nd) + return id +} + +func (graph *DependencyGraph) AddDependency(node, to string) error { + if graph.nodes[node] == nil { + return fmt.Errorf("Node %s does not belong to this graph", node) + } + + if graph.nodes[to] == nil { + return fmt.Errorf("Node %s does not belong to this graph", to) + } + + if node == to { + return fmt.Errorf("Dependency loops are forbidden!") + } + + graph.nodes[node].addDependency(graph.nodes[to]) + return nil +} + +func (node *DependencyNode) addDependency(to *DependencyNode) bool { + node.deps[to] = true + return node.deps[to] +} + +func (node *DependencyNode) Degree() int { + return len(node.deps) +} + +// The magic happens here :: +func (graph *DependencyGraph) GenerateTraversalMap() ([][]string, error) { + Debugf("Generating traversal map. Nodes: %d", len(graph.nodes)) + result := [][]string{} + processed := map[*DependencyNode]bool{} + // As long as we haven't processed all nodes... + for len(processed) < len(graph.nodes) { + // Use a temporary buffer for processed nodes, otherwise + // nodes that depend on each other could end up in the same round. + tmpProcessed := []*DependencyNode{} + for _, node := range graph.nodes { + // If the node has more dependencies than what we have cleared, + // it won't be valid for this round. + if node.Degree() > len(processed) { + continue + } + // If it's already processed, get to the next one + if processed[node] { + continue + } + // It's not been processed yet and has 0 deps. Add it! + // (this is a shortcut for what we're doing below) + if node.Degree() == 0 { + tmpProcessed = append(tmpProcessed, node) + continue + } + // If at least one dep hasn't been processed yet, we can't + // add it. + ok := true + for dep := range node.deps { + if !processed[dep] { + ok = false + break + } + } + // All deps have already been processed. Add it! + if ok { + tmpProcessed = append(tmpProcessed, node) + } + } + Debugf("Round %d: found %d available nodes", len(result), len(tmpProcessed)) + // If no progress has been made this round, + // that means we have circular dependencies. + if len(tmpProcessed) == 0 { + return nil, fmt.Errorf("Could not find a solution to this dependency graph") + } + round := []string{} + for _, nd := range tmpProcessed { + round = append(round, nd.id) + processed[nd] = true + } + result = append(result, round) + } + return result, nil +} + +// An StatusError reports an unsuccessful exit by a command. +type StatusError struct { + Status string + StatusCode int +} + +func (e *StatusError) Error() string { + return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and a open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// Take a list of strings and escape them so they will be handled right +// when passed as arguments to an program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} + +func IsClosedError(err error) bool { + /* This comparison is ugly, but unfortunately, net.go doesn't export errClosing. + * See: + * http://golang.org/src/pkg/net/net.go + * https://code.google.com/p/go/issues/detail?id=4337 + * https://groups.google.com/forum/#!msg/golang-nuts/0_aaCvBmOcM/SptmDyX1XJMJ + */ + return strings.HasSuffix(err.Error(), "use of closed network connection") +} + +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +var globalTestID string + +// GetCallerName introspects the call stack and returns the name of the +// function `depth` levels down in the stack. +func GetCallerName(depth int) string { + // Use the caller function name as a prefix. + // This helps trace temp directories back to their test. + pc, _, _, _ := runtime.Caller(depth + 1) + callerLongName := runtime.FuncForPC(pc).Name() + parts := strings.Split(callerLongName, ".") + callerShortName := parts[len(parts)-1] + return callerShortName +} + +func CopyFile(src, dst string) (int64, error) { + if src == dst { + return 0, nil + } + sf, err := os.Open(src) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(dst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(dst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/utils_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/utils_test.go new file mode 100644 index 00000000..7c5dadee --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/utils_test.go @@ -0,0 +1,535 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package utils + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "strings" + "testing" +) + +func TestBufReader(t *testing.T) { + reader, writer := io.Pipe() + bufreader := NewBufReader(reader) + + // Write everything down to a Pipe + // Usually, a pipe should block but because of the buffered reader, + // the writes will go through + done := make(chan bool) + go func() { + writer.Write([]byte("hello world")) + writer.Close() + done <- true + }() + + // Drain the reader *after* everything has been written, just to verify + // it is indeed buffering + <-done + output, err := ioutil.ReadAll(bufreader) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(output, []byte("hello world")) { + t.Error(string(output)) + } +} + +type dummyWriter struct { + buffer bytes.Buffer + failOnWrite bool +} + +func (dw *dummyWriter) Write(p []byte) (n int, err error) { + if dw.failOnWrite { + return 0, errors.New("Fake fail") + } + return dw.buffer.Write(p) +} + +func (dw *dummyWriter) String() string { + return dw.buffer.String() +} + +func (dw *dummyWriter) Close() error { + return nil +} + +func TestWriteBroadcaster(t *testing.T) { + writer := NewWriteBroadcaster() + + // Test 1: Both bufferA and bufferB should contain "foo" + bufferA := &dummyWriter{} + writer.AddWriter(bufferA, "") + bufferB := &dummyWriter{} + writer.AddWriter(bufferB, "") + writer.Write([]byte("foo")) + + if bufferA.String() != "foo" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foo" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + // Test2: bufferA and bufferB should contain "foobar", + // while bufferC should only contain "bar" + bufferC := &dummyWriter{} + writer.AddWriter(bufferC, "") + writer.Write([]byte("bar")) + + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + if bufferC.String() != "bar" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + // Test3: Test eviction on failure + bufferA.failOnWrite = true + writer.Write([]byte("fail")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfail" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + // Even though we reset the flag, no more writes should go in there + bufferA.failOnWrite = false + writer.Write([]byte("test")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfailtest" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + writer.CloseWriters() +} + +type devNullCloser int + +func (d devNullCloser) Close() error { + return nil +} + +func (d devNullCloser) Write(buf []byte) (int, error) { + return len(buf), nil +} + +// This test checks for races. It is only useful when run with the race detector. +func TestRaceWriteBroadcaster(t *testing.T) { + writer := NewWriteBroadcaster() + c := make(chan bool) + go func() { + writer.AddWriter(devNullCloser(0), "") + c <- true + }() + writer.Write([]byte("hello")) + <-c +} + +// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. +func TestTruncIndex(t *testing.T) { + index := NewTruncIndex() + // Get on an empty index + if _, err := index.Get("foobar"); err == nil { + t.Fatal("Get on an empty index should return an error") + } + + // Spaces should be illegal in an id + if err := index.Add("I have a space"); err == nil { + t.Fatalf("Adding an id with ' ' should return an error") + } + + id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96" + // Add an id + if err := index.Add(id); err != nil { + t.Fatal(err) + } + // Get a non-existing id + assertIndexGet(t, index, "abracadabra", "", true) + // Get the exact id + assertIndexGet(t, index, id, id, false) + // The first letter should match + assertIndexGet(t, index, id[:1], id, false) + // The first half should match + assertIndexGet(t, index, id[:len(id)/2], id, false) + // The second half should NOT match + assertIndexGet(t, index, id[len(id)/2:], "", true) + + id2 := id[:6] + "blabla" + // Add an id + if err := index.Add(id2); err != nil { + t.Fatal(err) + } + // Both exact IDs should work + assertIndexGet(t, index, id, id, false) + assertIndexGet(t, index, id2, id2, false) + + // 6 characters or less should conflict + assertIndexGet(t, index, id[:6], "", true) + assertIndexGet(t, index, id[:4], "", true) + assertIndexGet(t, index, id[:1], "", true) + + // 7 characters should NOT conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id2[:7], id2, false) + + // Deleting a non-existing id should return an error + if err := index.Delete("non-existing"); err == nil { + t.Fatalf("Deleting a non-existing id should return an error") + } + + // Deleting id2 should remove conflicts + if err := index.Delete(id2); err != nil { + t.Fatal(err) + } + // id2 should no longer work + assertIndexGet(t, index, id2, "", true) + assertIndexGet(t, index, id2[:7], "", true) + assertIndexGet(t, index, id2[:11], "", true) + + // conflicts between id and id2 should be gone + assertIndexGet(t, index, id[:6], id, false) + assertIndexGet(t, index, id[:4], id, false) + assertIndexGet(t, index, id[:1], id, false) + + // non-conflicting substrings should still not conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id[:15], id, false) + assertIndexGet(t, index, id, id, false) +} + +func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { + if result, err := index.Get(input); err != nil && !expectError { + t.Fatalf("Unexpected error getting '%s': %s", input, err) + } else if err == nil && expectError { + t.Fatalf("Getting '%s' should return an error", input) + } else if result != expectedResult { + t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) + } +} + +func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) { + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareKernelVersion(t *testing.T) { + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, + 1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "0"}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "16"}, + 0) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20, Flavor: "25"}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "0"}, + -1) +} + +func TestHumanSize(t *testing.T) { + + size := strings.Trim(HumanSize(1000), " \t") + expect := "1 kB" + if size != expect { + t.Errorf("1000 -> expected '%s', got '%s'", expect, size) + } + + size = strings.Trim(HumanSize(1024), " \t") + expect = "1.024 kB" + if size != expect { + t.Errorf("1024 -> expected '%s', got '%s'", expect, size) + } +} + +func TestRAMInBytes(t *testing.T) { + assertRAMInBytes(t, "32", false, 32) + assertRAMInBytes(t, "32b", false, 32) + assertRAMInBytes(t, "32B", false, 32) + assertRAMInBytes(t, "32k", false, 32*1024) + assertRAMInBytes(t, "32K", false, 32*1024) + assertRAMInBytes(t, "32kb", false, 32*1024) + assertRAMInBytes(t, "32Kb", false, 32*1024) + assertRAMInBytes(t, "32Mb", false, 32*1024*1024) + assertRAMInBytes(t, "32Gb", false, 32*1024*1024*1024) + + assertRAMInBytes(t, "", true, -1) + assertRAMInBytes(t, "hello", true, -1) + assertRAMInBytes(t, "-32", true, -1) + assertRAMInBytes(t, " 32 ", true, -1) + assertRAMInBytes(t, "32 mb", true, -1) + assertRAMInBytes(t, "32m b", true, -1) + assertRAMInBytes(t, "32bm", true, -1) +} + +func assertRAMInBytes(t *testing.T, size string, expectError bool, expectedBytes int64) { + actualBytes, err := RAMInBytes(size) + if (err != nil) && !expectError { + t.Errorf("Unexpected error parsing '%s': %s", size, err) + } + if (err == nil) && expectError { + t.Errorf("Expected to get an error parsing '%s', but got none (bytes=%d)", size, actualBytes) + } + if actualBytes != expectedBytes { + t.Errorf("Expected '%s' to parse as %d bytes, got %d", size, expectedBytes, actualBytes) + } +} + +func TestParseHost(t *testing.T) { + var ( + defaultHttpHost = "127.0.0.1" + defaultHttpPort = 4243 + defaultUnix = "/var/run/docker.sock" + ) + if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "0.0.0.0"); err != nil || addr != "tcp://0.0.0.0:4243" { + t.Errorf("0.0.0.0 -> expected tcp://0.0.0.0:4243, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "0.0.0.1:5555"); err != nil || addr != "tcp://0.0.0.1:5555" { + t.Errorf("0.0.0.1:5555 -> expected tcp://0.0.0.1:5555, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, ":6666"); err != nil || addr != "tcp://127.0.0.1:6666" { + t.Errorf(":6666 -> expected tcp://127.0.0.1:6666, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" { + t.Errorf("tcp://:7777 -> expected tcp://127.0.0.1:7777, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" { + t.Errorf("empty argument -> expected unix:///var/run/docker.sock, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" { + t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "unix://"); err != nil || addr != "unix:///var/run/docker.sock" { + t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "udp://127.0.0.1"); err == nil { + t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "udp://127.0.0.1:4243"); err == nil { + t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) + } +} + +func TestParseRepositoryTag(t *testing.T) { + if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag) + } + if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag) + } + if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag) + } +} + +func TestGetResolvConf(t *testing.T) { + resolvConfUtils, err := GetResolvConf() + if err != nil { + t.Fatal(err) + } + resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + t.Fatal(err) + } + if string(resolvConfUtils) != string(resolvConfSystem) { + t.Fatalf("/etc/resolv.conf and GetResolvConf have different content.") + } +} + +func TestCheckLocalDns(t *testing.T) { + for resolv, result := range map[string]bool{`# Dynamic +nameserver 10.0.2.3 +search dotcloud.net`: false, + `# Dynamic +#nameserver 127.0.0.1 +nameserver 10.0.2.3 +search dotcloud.net`: false, + `# Dynamic +nameserver 10.0.2.3 #not used 127.0.1.1 +search dotcloud.net`: false, + `# Dynamic +#nameserver 10.0.2.3 +#search dotcloud.net`: true, + `# Dynamic +nameserver 127.0.0.1 +search dotcloud.net`: true, + `# Dynamic +nameserver 127.0.1.1 +search dotcloud.net`: true, + `# Dynamic +`: true, + ``: true, + } { + if CheckLocalDns([]byte(resolv)) != result { + t.Fatalf("Wrong local dns detection: {%s} should be %v", resolv, result) + } + } +} + +func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, result int) { + var ( + a *KernelVersionInfo + ) + a, _ = ParseRelease(release) + + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } +} + +func TestParseRelease(t *testing.T) { + assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: "1"}, 0) + assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "19-generic"}, 0) +} + +func TestDependencyGraphCircular(t *testing.T) { + g1 := NewDependencyGraph() + a := g1.NewNode("a") + b := g1.NewNode("b") + g1.AddDependency(a, b) + g1.AddDependency(b, a) + res, err := g1.GenerateTraversalMap() + if res != nil { + t.Fatalf("Expected nil result") + } + if err == nil { + t.Fatalf("Expected error (circular graph can not be resolved)") + } +} + +func TestDependencyGraph(t *testing.T) { + g1 := NewDependencyGraph() + a := g1.NewNode("a") + b := g1.NewNode("b") + c := g1.NewNode("c") + d := g1.NewNode("d") + g1.AddDependency(b, a) + g1.AddDependency(c, a) + g1.AddDependency(d, c) + g1.AddDependency(d, b) + res, err := g1.GenerateTraversalMap() + + if err != nil { + t.Fatalf("%s", err) + } + + if res == nil { + t.Fatalf("Unexpected nil result") + } + + if len(res) != 3 { + t.Fatalf("Expected map of length 3, found %d instead", len(res)) + } + + if len(res[0]) != 1 || res[0][0] != "a" { + t.Fatalf("Expected [a], found %v instead", res[0]) + } + + if len(res[1]) != 2 { + t.Fatalf("Expected 2 nodes for step 2, found %d", len(res[1])) + } + + if (res[1][0] != "b" && res[1][1] != "b") || (res[1][0] != "c" && res[1][1] != "c") { + t.Fatalf("Expected [b, c], found %v instead", res[1]) + } + + if len(res[2]) != 1 || res[2][0] != "d" { + t.Fatalf("Expected [d], found %v instead", res[2]) + } +} + +func TestParsePortMapping(t *testing.T) { + data, err := PartParser("ip:public:private", "192.168.1.1:80:8080") + if err != nil { + t.Fatal(err) + } + + if len(data) != 3 { + t.FailNow() + } + if data["ip"] != "192.168.1.1" { + t.Fail() + } + if data["public"] != "80" { + t.Fail() + } + if data["private"] != "8080" { + t.Fail() + } +} + +func TestGetNameserversAsCIDR(t *testing.T) { + for resolv, result := range map[string][]string{` +nameserver 1.2.3.4 +nameserver 40.3.200.10 +search example.com`: {"1.2.3.4/32", "40.3.200.10/32"}, + `search example.com`: {}, + `nameserver 1.2.3.4 +search example.com +nameserver 4.30.20.100`: {"1.2.3.4/32", "4.30.20.100/32"}, + ``: {}, + ` nameserver 1.2.3.4 `: {"1.2.3.4/32"}, + `search example.com +nameserver 1.2.3.4 +#nameserver 4.3.2.1`: {"1.2.3.4/32"}, + `search example.com +nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"}, + } { + test := GetNameserversAsCIDR([]byte(resolv)) + if !StrSlicesEqual(test, result) { + t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) + } + } +} + +func StrSlicesEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + + for i, v := range a { + if v != b[i] { + return false + } + } + + return true +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/utils_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/utils_windows.go new file mode 100644 index 00000000..a6815a49 --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/utils/utils_windows.go @@ -0,0 +1,17 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package utils + +import ( + "errors" +) + +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is not available on windows") +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md new file mode 100644 index 00000000..2d849dfb --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md @@ -0,0 +1,2 @@ +influxdb-go +=========== diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go new file mode 100644 index 00000000..993076a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go @@ -0,0 +1,610 @@ +package client + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" +) + +const ( + UDPMaxMessageSize = 2048 +) + +type Client struct { + host string + username string + password string + database string + httpClient *http.Client + udpConn *net.UDPConn + schema string + compression bool +} + +type ClientConfig struct { + Host string + Username string + Password string + Database string + HttpClient *http.Client + IsSecure bool + IsUDP bool +} + +var defaults *ClientConfig + +func init() { + defaults = &ClientConfig{ + Host: "localhost:8086", + Username: "root", + Password: "root", + Database: "", + HttpClient: http.DefaultClient, + } +} + +func getDefault(value, defaultValue string) string { + if value == "" { + return defaultValue + } + return value +} + +func New(config *ClientConfig) (*Client, error) { + return NewClient(config) +} + +func NewClient(config *ClientConfig) (*Client, error) { + host := getDefault(config.Host, defaults.Host) + username := getDefault(config.Username, defaults.Username) + password := getDefault(config.Password, defaults.Password) + database := getDefault(config.Database, defaults.Database) + if config.HttpClient == nil { + config.HttpClient = defaults.HttpClient + } + var udpConn *net.UDPConn + if config.IsUDP { + serverAddr, err := net.ResolveUDPAddr("udp", host) + if err != nil { + return nil, err + } + udpConn, err = net.DialUDP("udp", nil, serverAddr) + if err != nil { + return nil, err + } + } + + schema := "http" + if config.IsSecure { + schema = "https" + } + return &Client{host, username, password, database, config.HttpClient, udpConn, schema, false}, nil +} + +func (self *Client) DisableCompression() { + self.compression = false +} + +func (self *Client) getUrl(path string) string { + return self.getUrlWithUserAndPass(path, self.username, self.password) +} + +func (self *Client) getUrlWithUserAndPass(path, username, password string) string { + return fmt.Sprintf("%s://%s%s?u=%s&p=%s", self.schema, self.host, path, username, password) +} + +func responseToError(response *http.Response, err error, closeResponse bool) error { + if err != nil { + return err + } + if closeResponse { + defer response.Body.Close() + } + if response.StatusCode >= 200 && response.StatusCode < 300 { + return nil + } + defer response.Body.Close() + body, err := ioutil.ReadAll(response.Body) + if err != nil { + return err + } + return fmt.Errorf("Server returned (%d): %s", response.StatusCode, string(body)) +} + +func (self *Client) CreateDatabase(name string) error { + url := self.getUrl("/db") + payload := map[string]string{"name": name} + data, err := json.Marshal(payload) + if err != nil { + return err + } + resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) + return responseToError(resp, err, true) +} + +func (self *Client) del(url string) (*http.Response, error) { + return self.delWithBody(url, nil) +} + +func (self *Client) delWithBody(url string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("DELETE", url, body) + if err != nil { + return nil, err + } + return self.httpClient.Do(req) +} + +func (self *Client) DeleteDatabase(name string) error { + url := self.getUrl("/db/" + name) + resp, err := self.del(url) + return responseToError(resp, err, true) +} + +func (self *Client) get(url string) ([]byte, error) { + resp, err := self.httpClient.Get(url) + err = responseToError(resp, err, false) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + return body, err +} + +func (self *Client) getWithVersion(url string) ([]byte, string, error) { + resp, err := self.httpClient.Get(url) + err = responseToError(resp, err, false) + if err != nil { + return nil, "", err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + version := resp.Header.Get("X-Influxdb-Version") + fields := strings.Fields(version) + if len(fields) > 2 { + return body, fields[1], err + } + return body, "", err +} + +func (self *Client) listSomething(url string) ([]map[string]interface{}, error) { + body, err := self.get(url) + if err != nil { + return nil, err + } + somethings := []map[string]interface{}{} + err = json.Unmarshal(body, &somethings) + if err != nil { + return nil, err + } + return somethings, nil +} + +func (self *Client) GetDatabaseList() ([]map[string]interface{}, error) { + url := self.getUrl("/db") + return self.listSomething(url) +} + +func (self *Client) CreateClusterAdmin(name, password string) error { + url := self.getUrl("/cluster_admins") + payload := map[string]string{"name": name, "password": password} + data, err := json.Marshal(payload) + if err != nil { + return err + } + resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) + return responseToError(resp, err, true) +} + +func (self *Client) UpdateClusterAdmin(name, password string) error { + url := self.getUrl("/cluster_admins/" + name) + payload := map[string]string{"password": password} + data, err := json.Marshal(payload) + if err != nil { + return err + } + resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) + return responseToError(resp, err, true) +} + +func (self *Client) DeleteClusterAdmin(name string) error { + url := self.getUrl("/cluster_admins/" + name) + resp, err := self.del(url) + return responseToError(resp, err, true) +} + +func (self *Client) GetClusterAdminList() ([]map[string]interface{}, error) { + url := self.getUrl("/cluster_admins") + return self.listSomething(url) +} + +func (self *Client) Servers() ([]map[string]interface{}, error) { + url := self.getUrl("/cluster/servers") + return self.listSomething(url) +} + +func (self *Client) RemoveServer(id int) error { + resp, err := self.del(self.getUrl(fmt.Sprintf("/cluster/servers/%d", id))) + return responseToError(resp, err, true) +} + +// Creates a new database user for the given database. permissions can +// be omitted in which case the user will be able to read and write to +// all time series. If provided, there should be two strings, the +// first for read and the second for write. The strings are regexes +// that are used to match the time series name to determine whether +// the user has the ability to read/write to the given time series. +// +// client.CreateDatabaseUser("db", "user", "pass") +// // the following user cannot read from any series and can write +// // to the limited time series only +// client.CreateDatabaseUser("db", "limited", "pass", "^$", "limited") +func (self *Client) CreateDatabaseUser(database, name, password string, permissions ...string) error { + readMatcher, writeMatcher := ".*", ".*" + switch len(permissions) { + case 0: + case 2: + readMatcher, writeMatcher = permissions[0], permissions[1] + default: + return fmt.Errorf("You have to provide two ") + } + + url := self.getUrl("/db/" + database + "/users") + payload := map[string]string{"name": name, "password": password, "readFrom": readMatcher, "writeTo": writeMatcher} + data, err := json.Marshal(payload) + if err != nil { + return err + } + resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) + return responseToError(resp, err, true) +} + +// Change the cluster admin password +func (self *Client) ChangeClusterAdminPassword(name, newPassword string) error { + url := self.getUrl("/cluster_admins/" + name) + payload := map[string]interface{}{"password": newPassword} + data, err := json.Marshal(payload) + if err != nil { + return err + } + resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) + return responseToError(resp, err, true) +} + +// Change the user password, adming flag and optionally permissions +func (self *Client) ChangeDatabaseUser(database, name, newPassword string, isAdmin bool, newPermissions ...string) error { + switch len(newPermissions) { + case 0, 2: + default: + return fmt.Errorf("You have to provide two ") + } + + url := self.getUrl("/db/" + database + "/users/" + name) + payload := map[string]interface{}{"password": newPassword, "admin": isAdmin} + if len(newPermissions) == 2 { + payload["readFrom"] = newPermissions[0] + payload["writeTo"] = newPermissions[1] + } + data, err := json.Marshal(payload) + if err != nil { + return err + } + resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) + return responseToError(resp, err, true) +} + +// See Client.CreateDatabaseUser for more info on the permissions +// argument +func (self *Client) updateDatabaseUserCommon(database, name string, password *string, isAdmin *bool, permissions ...string) error { + url := self.getUrl("/db/" + database + "/users/" + name) + payload := map[string]interface{}{} + if password != nil { + payload["password"] = *password + } + if isAdmin != nil { + payload["admin"] = *isAdmin + } + switch len(permissions) { + case 0: + case 2: + payload["readFrom"] = permissions[0] + payload["writeTo"] = permissions[1] + default: + } + + data, err := json.Marshal(payload) + if err != nil { + return err + } + resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) + return responseToError(resp, err, true) +} + +func (self *Client) UpdateDatabaseUser(database, name, password string) error { + return self.updateDatabaseUserCommon(database, name, &password, nil) +} + +func (self *Client) UpdateDatabaseUserPermissions(database, name, readPermission, writePermissions string) error { + return self.updateDatabaseUserCommon(database, name, nil, nil, readPermission, writePermissions) +} + +func (self *Client) DeleteDatabaseUser(database, name string) error { + url := self.getUrl("/db/" + database + "/users/" + name) + resp, err := self.del(url) + return responseToError(resp, err, true) +} + +func (self *Client) GetDatabaseUserList(database string) ([]map[string]interface{}, error) { + url := self.getUrl("/db/" + database + "/users") + return self.listSomething(url) +} + +func (self *Client) AlterDatabasePrivilege(database, name string, isAdmin bool, permissions ...string) error { + return self.updateDatabaseUserCommon(database, name, nil, &isAdmin, permissions...) +} + +type TimePrecision string + +const ( + Second TimePrecision = "s" + Millisecond TimePrecision = "m" + Microsecond TimePrecision = "u" +) + +func (self *Client) WriteSeries(series []*Series) error { + return self.writeSeriesCommon(series, nil) +} + +func (self *Client) WriteSeriesOverUDP(series []*Series) error { + if self.udpConn == nil { + return fmt.Errorf("UDP isn't enabled. Make sure to set config.IsUDP to true") + } + + data, err := json.Marshal(series) + if err != nil { + return err + } + // because max of msg over upd is 2048 bytes + // https://github.com/influxdb/influxdb/blob/master/src/api/udp/api.go#L65 + if len(data) >= UDPMaxMessageSize { + err = fmt.Errorf("data size over limit %v limit is %v", len(data), UDPMaxMessageSize) + fmt.Println(err) + return err + } + _, err = self.udpConn.Write(data) + if err != nil { + return err + } + return nil +} + +func (self *Client) WriteSeriesWithTimePrecision(series []*Series, timePrecision TimePrecision) error { + return self.writeSeriesCommon(series, map[string]string{"time_precision": string(timePrecision)}) +} + +func (self *Client) writeSeriesCommon(series []*Series, options map[string]string) error { + data, err := json.Marshal(series) + if err != nil { + return err + } + url := self.getUrl("/db/" + self.database + "/series") + for name, value := range options { + url += fmt.Sprintf("&%s=%s", name, value) + } + var b *bytes.Buffer + if self.compression { + b = bytes.NewBuffer(nil) + w := gzip.NewWriter(b) + if _, err := w.Write(data); err != nil { + return err + } + w.Flush() + w.Close() + } else { + b = bytes.NewBuffer(data) + } + req, err := http.NewRequest("POST", url, b) + if err != nil { + return err + } + if self.compression { + req.Header.Set("Content-Encoding", "gzip") + } + resp, err := self.httpClient.Do(req) + return responseToError(resp, err, true) +} + +func (self *Client) Query(query string, precision ...TimePrecision) ([]*Series, error) { + return self.queryCommon(query, false, precision...) +} + +func (self *Client) QueryWithNumbers(query string, precision ...TimePrecision) ([]*Series, error) { + return self.queryCommon(query, true, precision...) +} + +func (self *Client) queryCommon(query string, useNumber bool, precision ...TimePrecision) ([]*Series, error) { + escapedQuery := url.QueryEscape(query) + url := self.getUrl("/db/" + self.database + "/series") + if len(precision) > 0 { + url += "&time_precision=" + string(precision[0]) + } + url += "&q=" + escapedQuery + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + if !self.compression { + req.Header.Set("Accept-Encoding", "identity") + } + resp, err := self.httpClient.Do(req) + err = responseToError(resp, err, false) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + series := []*Series{} + decoder := json.NewDecoder(resp.Body) + if useNumber { + decoder.UseNumber() + } + err = decoder.Decode(&series) + if err != nil { + return nil, err + } + return series, nil +} + +func (self *Client) Ping() error { + url := self.getUrl("/ping") + resp, err := self.httpClient.Get(url) + return responseToError(resp, err, true) +} + +func (self *Client) AuthenticateDatabaseUser(database, username, password string) error { + url := self.getUrlWithUserAndPass(fmt.Sprintf("/db/%s/authenticate", database), username, password) + resp, err := self.httpClient.Get(url) + return responseToError(resp, err, true) +} + +func (self *Client) AuthenticateClusterAdmin(username, password string) error { + url := self.getUrlWithUserAndPass("/cluster_admins/authenticate", username, password) + resp, err := self.httpClient.Get(url) + return responseToError(resp, err, true) +} + +func (self *Client) GetContinuousQueries() ([]map[string]interface{}, error) { + url := self.getUrlWithUserAndPass(fmt.Sprintf("/db/%s/continuous_queries", self.database), self.username, self.password) + return self.listSomething(url) +} + +func (self *Client) DeleteContinuousQueries(id int) error { + url := self.getUrlWithUserAndPass(fmt.Sprintf("/db/%s/continuous_queries/%d", self.database, id), self.username, self.password) + resp, err := self.del(url) + return responseToError(resp, err, true) +} + +type LongTermShortTermShards struct { + // Long term shards, (doesn't get populated for version >= 0.8.0) + LongTerm []*Shard `json:"longTerm"` + // Short term shards, (doesn't get populated for version >= 0.8.0) + ShortTerm []*Shard `json:"shortTerm"` + // All shards in the system (Long + Short term shards for version < 0.8.0) + All []*Shard `json:"-"` +} + +type Shard struct { + Id uint32 `json:"id"` + EndTime int64 `json:"endTime"` + StartTime int64 `json:"startTime"` + ServerIds []uint32 `json:"serverIds"` + SpaceName string `json:"spaceName"` + Database string `json:"database"` +} + +type ShardSpaceCollection struct { + ShardSpaces []ShardSpace +} + +func (self *Client) GetShards() (*LongTermShortTermShards, error) { + url := self.getUrlWithUserAndPass("/cluster/shards", self.username, self.password) + body, version, err := self.getWithVersion(url) + if err != nil { + return nil, err + } + return parseShards(body, version) +} + +func isOrNewerThan(version, reference string) bool { + if version == "vdev" { + return true + } + majorMinor := strings.Split(version[1:], ".")[:2] + refMajorMinor := strings.Split(reference[1:], ".")[:2] + if majorMinor[0] > refMajorMinor[0] { + return true + } + if majorMinor[1] > refMajorMinor[1] { + return true + } + return majorMinor[1] == refMajorMinor[1] +} + +func parseShards(body []byte, version string) (*LongTermShortTermShards, error) { + // strip the initial v in `v0.8.0` and split on the dots + if version != "" && isOrNewerThan(version, "v0.8") { + return parseNewShards(body) + } + shards := &LongTermShortTermShards{} + err := json.Unmarshal(body, &shards) + if err != nil { + return nil, err + } + + shards.All = make([]*Shard, len(shards.LongTerm)+len(shards.ShortTerm)) + copy(shards.All, shards.LongTerm) + copy(shards.All[len(shards.LongTerm):], shards.ShortTerm) + return shards, nil +} + +func parseNewShards(body []byte) (*LongTermShortTermShards, error) { + shards := []*Shard{} + err := json.Unmarshal(body, &shards) + if err != nil { + return nil, err + } + + return &LongTermShortTermShards{All: shards}, nil +} + +// Added to InfluxDB in 0.8.0 +func (self *Client) GetShardSpaces() ([]*ShardSpace, error) { + url := self.getUrlWithUserAndPass("/cluster/shard_spaces", self.username, self.password) + body, err := self.get(url) + if err != nil { + return nil, err + } + spaces := []*ShardSpace{} + err = json.Unmarshal(body, &spaces) + if err != nil { + return nil, err + } + + return spaces, nil +} + +// Added to InfluxDB in 0.8.0 +func (self *Client) DropShardSpace(database, name string) error { + url := self.getUrlWithUserAndPass(fmt.Sprintf("/cluster/shard_spaces/%s/%s", database, name), self.username, self.password) + _, err := self.del(url) + return err +} + +// Added to InfluxDB in 0.8.0 +func (self *Client) CreateShardSpace(space *ShardSpace) error { + url := self.getUrl(fmt.Sprintf("/cluster/shard_spaces")) + data, err := json.Marshal(space) + if err != nil { + return err + } + resp, err := self.httpClient.Post(url, "application/json", bytes.NewBuffer(data)) + return responseToError(resp, err, true) +} + +func (self *Client) DropShard(id uint32, serverIds []uint32) error { + url := self.getUrlWithUserAndPass(fmt.Sprintf("/cluster/shards/%d", id), self.username, self.password) + ids := map[string][]uint32{"serverIds": serverIds} + body, err := json.Marshal(ids) + if err != nil { + return err + } + _, err = self.delWithBody(url, bytes.NewBuffer(body)) + return err +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb_test.go new file mode 100644 index 00000000..0e3b5d84 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb_test.go @@ -0,0 +1,190 @@ +package client + +import ( + "testing" +) + +func TestClient(t *testing.T) { + internalTest(t, true) +} + +func TestClientWithoutCompression(t *testing.T) { + internalTest(t, false) +} + +func internalTest(t *testing.T, compression bool) { + client, err := NewClient(&ClientConfig{}) + if err != nil { + t.Error(err) + } + + admins, err := client.GetClusterAdminList() + if err != nil { + t.Error(err) + } + + if len(admins) == 1 { + if err := client.CreateClusterAdmin("admin", "password"); err != nil { + t.Error(err) + } + } + + admins, err = client.GetClusterAdminList() + if err != nil { + t.Error(err) + } + + if len(admins) != 2 { + t.Error("more than two admins returned") + } + + dbs, err := client.GetDatabaseList() + if err != nil { + t.Error(err) + } + + if len(dbs) == 0 { + if err := client.CreateDatabase("foobar"); err != nil { + t.Error(err) + } + } + + dbs, err = client.GetDatabaseList() + if err != nil { + t.Error(err) + } + + if len(dbs) != 1 && dbs[0]["foobar"] == nil { + t.Errorf("List of databases don't match") + } + + users, err := client.GetDatabaseUserList("foobar") + if err != nil { + t.Error(err) + } + + if len(users) == 0 { + if err := client.CreateDatabaseUser("foobar", "dbuser", "pass"); err != nil { + t.Error(err) + } + + if err := client.AlterDatabasePrivilege("foobar", "dbuser", true); err != nil { + t.Error(err) + } + } + + users, err = client.GetDatabaseUserList("foobar") + if err != nil { + t.Error(err) + } + + if len(users) != 1 { + t.Error("more than one user returned") + } + + client, err = NewClient(&ClientConfig{ + Username: "dbuser", + Password: "pass", + Database: "foobar", + }) + + if !compression { + client.DisableCompression() + } + + if err != nil { + t.Error(err) + } + + name := "ts9" + if !compression { + name = "ts9_uncompressed" + } + + series := &Series{ + Name: name, + Columns: []string{"value"}, + Points: [][]interface{}{ + {1.0}, + }, + } + if err := client.WriteSeries([]*Series{series}); err != nil { + t.Error(err) + } + + result, err := client.Query("select * from " + name) + if err != nil { + t.Error(err) + } + + if len(result) != 1 { + t.Fatalf("expected one time series returned: %d", len(result)) + } + + if len(result[0].Points) != 1 { + t.Error("Expected one point: ", len(result[0].Points)) + } + + if result[0].Points[0][2].(float64) != 1 { + t.Fail() + } + + client, err = NewClient(&ClientConfig{ + Username: "root", + Password: "root", + }) + + if err != nil { + t.Error(err) + } + + spaces, err := client.GetShardSpaces() + if err != nil || len(spaces) == 0 { + t.Fail() + } + if spaces[0].Name != "default" { + t.Fail() + } + space := &ShardSpace{Name: "foo", Database: "foobar", Regex: "/^paul_is_rad/"} + err = client.CreateShardSpace(space) + if err != nil { + t.Error(err) + } + spaces, _ = client.GetShardSpaces() + if spaces[1].Name != "foo" { + t.Fail() + } + shards, err := client.GetShards() + if err != nil { + t.Fail() + } + + client.database = "foobar" + series = &Series{ + Name: "paul_is_rad", + Columns: []string{"value"}, + Points: [][]interface{}{ + {1.0}, + }, + } + if err := client.WriteSeries([]*Series{series}); err != nil { + t.Error(err) + } + + spaces, _ = client.GetShardSpaces() + count := 0 + for _, s := range shards.All { + if s.SpaceName == "foo" { + count++ + } + } + + if err := client.DropShardSpace("foobar", "foo"); err != nil { + t.Fail() + } + + spaces, err = client.GetShardSpaces() + if err != nil || len(spaces) != 1 || spaces[0].Name != "default" { + t.Fail() + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/series.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/series.go new file mode 100644 index 00000000..f18b8bbb --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/series.go @@ -0,0 +1,19 @@ +package client + +type Series struct { + Name string `json:"name"` + Columns []string `json:"columns"` + Points [][]interface{} `json:"points"` +} + +func (self *Series) GetName() string { + return self.Name +} + +func (self *Series) GetColumns() []string { + return self.Columns +} + +func (self *Series) GetPoints() [][]interface{} { + return self.Points +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/shard_space.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/shard_space.go new file mode 100644 index 00000000..87dea117 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/shard_space.go @@ -0,0 +1,15 @@ +package client + +type ShardSpace struct { + // required, must be unique within the database + Name string `json:"name"` + // required, a database has many shard spaces and a shard space belongs to a database + Database string `json:"database"` + // this is optional, if they don't set it, we'll set to /.*/ + Regex string `json:"regex"` + // this is optional, if they don't set it, it will default to the storage.dir in the config + RetentionPolicy string `json:"retentionPolicy"` + ShardDuration string `json:"shardDuration"` + ReplicationFactor uint32 `json:"replicationFactor"` + Split uint32 `json:"split"` +} diff --git a/Godeps/_workspace/src/github.com/kr/pretty/.gitignore b/Godeps/_workspace/src/github.com/kr/pretty/.gitignore new file mode 100644 index 00000000..1f0a99f2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pretty/.gitignore @@ -0,0 +1,4 @@ +[568].out +_go* +_test* +_obj diff --git a/Godeps/_workspace/src/github.com/kr/pretty/License b/Godeps/_workspace/src/github.com/kr/pretty/License new file mode 100644 index 00000000..05c783cc --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pretty/License @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/kr/pretty/Readme b/Godeps/_workspace/src/github.com/kr/pretty/Readme new file mode 100644 index 00000000..c589fc62 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pretty/Readme @@ -0,0 +1,9 @@ +package pretty + + import "github.com/kr/pretty" + + Package pretty provides pretty-printing for Go values. + +Documentation + + http://godoc.org/github.com/kr/pretty diff --git a/Godeps/_workspace/src/github.com/kr/pretty/diff.go b/Godeps/_workspace/src/github.com/kr/pretty/diff.go new file mode 100644 index 00000000..64fac640 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pretty/diff.go @@ -0,0 +1,148 @@ +package pretty + +import ( + "fmt" + "io" + "reflect" +) + +type sbuf []string + +func (s *sbuf) Write(b []byte) (int, error) { + *s = append(*s, string(b)) + return len(b), nil +} + +// Diff returns a slice where each element describes +// a difference between a and b. +func Diff(a, b interface{}) (desc []string) { + Fdiff((*sbuf)(&desc), a, b) + return desc +} + +// Fdiff writes to w a description of the differences between a and b. +func Fdiff(w io.Writer, a, b interface{}) { + diffWriter{w: w}.diff(reflect.ValueOf(a), reflect.ValueOf(b)) +} + +type diffWriter struct { + w io.Writer + l string // label +} + +func (w diffWriter) printf(f string, a ...interface{}) { + var l string + if w.l != "" { + l = w.l + ": " + } + fmt.Fprintf(w.w, l+f, a...) +} + +func (w diffWriter) diff(av, bv reflect.Value) { + if !av.IsValid() && bv.IsValid() { + w.printf("nil != %#v", bv.Interface()) + return + } + if av.IsValid() && !bv.IsValid() { + w.printf("%#v != nil", av.Interface()) + return + } + if !av.IsValid() && !bv.IsValid() { + return + } + + at := av.Type() + bt := bv.Type() + if at != bt { + w.printf("%v != %v", at, bt) + return + } + + // numeric types, including bool + if at.Kind() < reflect.Array { + a, b := av.Interface(), bv.Interface() + if a != b { + w.printf("%#v != %#v", a, b) + } + return + } + + switch at.Kind() { + case reflect.String: + a, b := av.Interface(), bv.Interface() + if a != b { + w.printf("%q != %q", a, b) + } + case reflect.Ptr: + switch { + case av.IsNil() && !bv.IsNil(): + w.printf("nil != %v", bv.Interface()) + case !av.IsNil() && bv.IsNil(): + w.printf("%v != nil", av.Interface()) + case !av.IsNil() && !bv.IsNil(): + w.diff(av.Elem(), bv.Elem()) + } + case reflect.Struct: + for i := 0; i < av.NumField(); i++ { + w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i)) + } + case reflect.Map: + ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys()) + for _, k := range ak { + w := w.relabel(fmt.Sprintf("[%#v]", k.Interface())) + w.printf("%q != (missing)", av.MapIndex(k)) + } + for _, k := range both { + w := w.relabel(fmt.Sprintf("[%#v]", k.Interface())) + w.diff(av.MapIndex(k), bv.MapIndex(k)) + } + for _, k := range bk { + w := w.relabel(fmt.Sprintf("[%#v]", k.Interface())) + w.printf("(missing) != %q", bv.MapIndex(k)) + } + case reflect.Interface: + w.diff(reflect.ValueOf(av.Interface()), reflect.ValueOf(bv.Interface())) + default: + if !reflect.DeepEqual(av.Interface(), bv.Interface()) { + w.printf("%# v != %# v", Formatter(av.Interface()), Formatter(bv.Interface())) + } + } +} + +func (d diffWriter) relabel(name string) (d1 diffWriter) { + d1 = d + if d.l != "" && name[0] != '[' { + d1.l += "." + } + d1.l += name + return d1 +} + +func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) { + for _, av := range a { + inBoth := false + for _, bv := range b { + if reflect.DeepEqual(av.Interface(), bv.Interface()) { + inBoth = true + both = append(both, av) + break + } + } + if !inBoth { + ak = append(ak, av) + } + } + for _, bv := range b { + inBoth := false + for _, av := range a { + if reflect.DeepEqual(av.Interface(), bv.Interface()) { + inBoth = true + break + } + } + if !inBoth { + bk = append(bk, bv) + } + } + return +} diff --git a/Godeps/_workspace/src/github.com/kr/pretty/diff_test.go b/Godeps/_workspace/src/github.com/kr/pretty/diff_test.go new file mode 100644 index 00000000..02d19535 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pretty/diff_test.go @@ -0,0 +1,73 @@ +package pretty + +import ( + "testing" +) + +type difftest struct { + a interface{} + b interface{} + exp []string +} + +type S struct { + A int + S *S + I interface{} + C []int +} + +var diffs = []difftest{ + {a: nil, b: nil}, + {a: S{A: 1}, b: S{A: 1}}, + + {0, "", []string{`int != string`}}, + {0, 1, []string{`0 != 1`}}, + {S{}, new(S), []string{`pretty.S != *pretty.S`}}, + {"a", "b", []string{`"a" != "b"`}}, + {S{}, S{A: 1}, []string{`A: 0 != 1`}}, + {new(S), &S{A: 1}, []string{`A: 0 != 1`}}, + {S{S: new(S)}, S{S: &S{A: 1}}, []string{`S.A: 0 != 1`}}, + {S{}, S{I: 0}, []string{`I: nil != 0`}}, + {S{I: 1}, S{I: "x"}, []string{`I: int != string`}}, + {S{}, S{C: []int{1}}, []string{`C: []int(nil) != []int{1}`}}, + {S{C: []int{}}, S{C: []int{1}}, []string{`C: []int{} != []int{1}`}}, + {S{}, S{A: 1, S: new(S)}, []string{`A: 0 != 1`, `S: nil != &{0 []}`}}, +} + +func TestDiff(t *testing.T) { + for _, tt := range diffs { + got := Diff(tt.a, tt.b) + eq := len(got) == len(tt.exp) + if eq { + for i := range got { + eq = eq && got[i] == tt.exp[i] + } + } + if !eq { + t.Errorf("diffing % #v", tt.a) + t.Errorf("with % #v", tt.b) + diffdiff(t, got, tt.exp) + continue + } + } +} + +func diffdiff(t *testing.T, got, exp []string) { + minus(t, "unexpected:", got, exp) + minus(t, "missing:", exp, got) +} + +func minus(t *testing.T, s string, a, b []string) { + var i, j int + for i = 0; i < len(a); i++ { + for j = 0; j < len(b); j++ { + if a[i] == b[j] { + break + } + } + if j == len(b) { + t.Error(s, a[i]) + } + } +} diff --git a/Godeps/_workspace/src/github.com/kr/pretty/example_test.go b/Godeps/_workspace/src/github.com/kr/pretty/example_test.go new file mode 100644 index 00000000..ecf40f3f --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pretty/example_test.go @@ -0,0 +1,20 @@ +package pretty_test + +import ( + "fmt" + "github.com/kr/pretty" +) + +func Example() { + type myType struct { + a, b int + } + var x = []myType{{1, 2}, {3, 4}, {5, 6}} + fmt.Printf("%# v", pretty.Formatter(x)) + // output: + // []pretty_test.myType{ + // {a:1, b:2}, + // {a:3, b:4}, + // {a:5, b:6}, + // } +} diff --git a/Godeps/_workspace/src/github.com/kr/pretty/formatter.go b/Godeps/_workspace/src/github.com/kr/pretty/formatter.go new file mode 100644 index 00000000..c834d464 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pretty/formatter.go @@ -0,0 +1,309 @@ +package pretty + +import ( + "fmt" + "io" + "reflect" + "strconv" + "text/tabwriter" + + "github.com/kr/text" +) + +const ( + limit = 50 +) + +type formatter struct { + x interface{} + force bool + quote bool +} + +// Formatter makes a wrapper, f, that will format x as go source with line +// breaks and tabs. Object f responds to the "%v" formatting verb when both the +// "#" and " " (space) flags are set, for example: +// +// fmt.Sprintf("%# v", Formatter(x)) +// +// If one of these two flags is not set, or any other verb is used, f will +// format x according to the usual rules of package fmt. +// In particular, if x satisfies fmt.Formatter, then x.Format will be called. +func Formatter(x interface{}) (f fmt.Formatter) { + return formatter{x: x, quote: true} +} + +func (fo formatter) String() string { + return fmt.Sprint(fo.x) // unwrap it +} + +func (fo formatter) passThrough(f fmt.State, c rune) { + s := "%" + for i := 0; i < 128; i++ { + if f.Flag(i) { + s += string(i) + } + } + if w, ok := f.Width(); ok { + s += fmt.Sprintf("%d", w) + } + if p, ok := f.Precision(); ok { + s += fmt.Sprintf(".%d", p) + } + s += string(c) + fmt.Fprintf(f, s, fo.x) +} + +func (fo formatter) Format(f fmt.State, c rune) { + if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') { + w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0) + p := &printer{tw: w, Writer: w} + p.printValue(reflect.ValueOf(fo.x), true, fo.quote) + w.Flush() + return + } + fo.passThrough(f, c) +} + +type printer struct { + io.Writer + tw *tabwriter.Writer +} + +func (p *printer) indent() *printer { + q := *p + q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0) + q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'}) + return &q +} + +func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) { + if showType { + io.WriteString(p, v.Type().String()) + fmt.Fprintf(p, "(%#v)", x) + } else { + fmt.Fprintf(p, "%#v", x) + } +} + +func (p *printer) printValue(v reflect.Value, showType, quote bool) { + switch v.Kind() { + case reflect.Bool: + p.printInline(v, v.Bool(), showType) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.printInline(v, v.Int(), showType) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p.printInline(v, v.Uint(), showType) + case reflect.Float32, reflect.Float64: + p.printInline(v, v.Float(), showType) + case reflect.Complex64, reflect.Complex128: + fmt.Fprintf(p, "%#v", v.Complex()) + case reflect.String: + p.fmtString(v.String(), quote) + case reflect.Map: + t := v.Type() + if showType { + io.WriteString(p, t.String()) + } + writeByte(p, '{') + if nonzero(v) { + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + keys := v.MapKeys() + for i := 0; i < v.Len(); i++ { + showTypeInStruct := true + k := keys[i] + mv := v.MapIndex(k) + pp.printValue(k, false, true) + writeByte(pp, ':') + if expand { + writeByte(pp, '\t') + } + showTypeInStruct = t.Elem().Kind() == reflect.Interface + pp.printValue(mv, showTypeInStruct, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.Len()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + } + writeByte(p, '}') + case reflect.Struct: + t := v.Type() + if showType { + io.WriteString(p, t.String()) + } + writeByte(p, '{') + if nonzero(v) { + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + for i := 0; i < v.NumField(); i++ { + showTypeInStruct := true + if f := t.Field(i); f.Name != "" { + io.WriteString(pp, f.Name) + writeByte(pp, ':') + if expand { + writeByte(pp, '\t') + } + showTypeInStruct = labelType(f.Type) + } + pp.printValue(getField(v, i), showTypeInStruct, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.NumField()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + } + writeByte(p, '}') + case reflect.Interface: + switch e := v.Elem(); { + case e.Kind() == reflect.Invalid: + io.WriteString(p, "nil") + case e.IsValid(): + p.printValue(e, showType, true) + default: + io.WriteString(p, v.Type().String()) + io.WriteString(p, "(nil)") + } + case reflect.Array, reflect.Slice: + t := v.Type() + if showType { + io.WriteString(p, t.String()) + } + if v.Kind() == reflect.Slice && v.IsNil() && showType { + io.WriteString(p, "(nil)") + break + } + if v.Kind() == reflect.Slice && v.IsNil() { + io.WriteString(p, "nil") + break + } + writeByte(p, '{') + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + for i := 0; i < v.Len(); i++ { + showTypeInSlice := t.Elem().Kind() == reflect.Interface + pp.printValue(v.Index(i), showTypeInSlice, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.Len()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + writeByte(p, '}') + case reflect.Ptr: + e := v.Elem() + if !e.IsValid() { + writeByte(p, '(') + io.WriteString(p, v.Type().String()) + io.WriteString(p, ")(nil)") + } else { + writeByte(p, '&') + p.printValue(e, true, true) + } + case reflect.Chan: + x := v.Pointer() + if showType { + writeByte(p, '(') + io.WriteString(p, v.Type().String()) + fmt.Fprintf(p, ")(%#v)", x) + } else { + fmt.Fprintf(p, "%#v", x) + } + case reflect.Func: + io.WriteString(p, v.Type().String()) + io.WriteString(p, " {...}") + case reflect.UnsafePointer: + p.printInline(v, v.Pointer(), showType) + case reflect.Invalid: + io.WriteString(p, "nil") + } +} + +func canInline(t reflect.Type) bool { + switch t.Kind() { + case reflect.Map: + return !canExpand(t.Elem()) + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + if canExpand(t.Field(i).Type) { + return false + } + } + return true + case reflect.Interface: + return false + case reflect.Array, reflect.Slice: + return !canExpand(t.Elem()) + case reflect.Ptr: + return false + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + return false + } + return true +} + +func canExpand(t reflect.Type) bool { + switch t.Kind() { + case reflect.Map, reflect.Struct, + reflect.Interface, reflect.Array, reflect.Slice, + reflect.Ptr: + return true + } + return false +} + +func labelType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Interface, reflect.Struct: + return true + } + return false +} + +func (p *printer) fmtString(s string, quote bool) { + if quote { + s = strconv.Quote(s) + } + io.WriteString(p, s) +} + +func tryDeepEqual(a, b interface{}) bool { + defer func() { recover() }() + return reflect.DeepEqual(a, b) +} + +func writeByte(w io.Writer, b byte) { + w.Write([]byte{b}) +} + +func getField(v reflect.Value, i int) reflect.Value { + val := v.Field(i) + if val.Kind() == reflect.Interface && !val.IsNil() { + val = val.Elem() + } + return val +} diff --git a/Godeps/_workspace/src/github.com/kr/pretty/formatter_test.go b/Godeps/_workspace/src/github.com/kr/pretty/formatter_test.go new file mode 100644 index 00000000..ec9bbca6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pretty/formatter_test.go @@ -0,0 +1,148 @@ +package pretty + +import ( + "fmt" + "io" + "testing" + "unsafe" +) + +type test struct { + v interface{} + s string +} + +type LongStructTypeName struct { + longFieldName interface{} + otherLongFieldName interface{} +} + +type SA struct { + t *T + v T +} + +type T struct { + x, y int +} + +type F int + +func (f F) Format(s fmt.State, c rune) { + fmt.Fprintf(s, "F(%d)", int(f)) +} + +var long = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + +var gosyntax = []test{ + {nil, `nil`}, + {"", `""`}, + {"a", `"a"`}, + {1, "int(1)"}, + {1.0, "float64(1)"}, + {[]int(nil), "[]int(nil)"}, + {[0]int{}, "[0]int{}"}, + {complex(1, 0), "(1+0i)"}, + //{make(chan int), "(chan int)(0x1234)"}, + {unsafe.Pointer(uintptr(1)), "unsafe.Pointer(0x1)"}, + {func(int) {}, "func(int) {...}"}, + {map[int]int{1: 1}, "map[int]int{1:1}"}, + {int32(1), "int32(1)"}, + {io.EOF, `&errors.errorString{s:"EOF"}`}, + {[]string{"a"}, `[]string{"a"}`}, + { + []string{long}, + `[]string{"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"}`, + }, + {F(5), "pretty.F(5)"}, + { + SA{&T{1, 2}, T{3, 4}}, + `pretty.SA{ + t: &pretty.T{x:1, y:2}, + v: pretty.T{x:3, y:4}, +}`, + }, + { + map[int][]byte{1: []byte{}}, + `map[int][]uint8{ + 1: {}, +}`, + }, + { + map[int]T{1: T{}}, + `map[int]pretty.T{ + 1: {}, +}`, + }, + { + long, + `"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"`, + }, + { + LongStructTypeName{ + longFieldName: LongStructTypeName{}, + otherLongFieldName: long, + }, + `pretty.LongStructTypeName{ + longFieldName: pretty.LongStructTypeName{}, + otherLongFieldName: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789", +}`, + }, + { + &LongStructTypeName{ + longFieldName: &LongStructTypeName{}, + otherLongFieldName: (*LongStructTypeName)(nil), + }, + `&pretty.LongStructTypeName{ + longFieldName: &pretty.LongStructTypeName{}, + otherLongFieldName: (*pretty.LongStructTypeName)(nil), +}`, + }, + { + []LongStructTypeName{ + {nil, nil}, + {3, 3}, + {long, nil}, + }, + `[]pretty.LongStructTypeName{ + {}, + { + longFieldName: int(3), + otherLongFieldName: int(3), + }, + { + longFieldName: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789", + otherLongFieldName: nil, + }, +}`, + }, + { + []interface{}{ + LongStructTypeName{nil, nil}, + []byte{1, 2, 3}, + T{3, 4}, + LongStructTypeName{long, nil}, + }, + `[]interface {}{ + pretty.LongStructTypeName{}, + []uint8{0x1, 0x2, 0x3}, + pretty.T{x:3, y:4}, + pretty.LongStructTypeName{ + longFieldName: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789", + otherLongFieldName: nil, + }, +}`, + }, +} + +func TestGoSyntax(t *testing.T) { + for _, tt := range gosyntax { + s := fmt.Sprintf("%# v", Formatter(tt.v)) + if tt.s != s { + t.Errorf("expected %q", tt.s) + t.Errorf("got %q", s) + t.Errorf("expraw\n%s", tt.s) + t.Errorf("gotraw\n%s", s) + } + } +} diff --git a/Godeps/_workspace/src/github.com/kr/pretty/pretty.go b/Godeps/_workspace/src/github.com/kr/pretty/pretty.go new file mode 100644 index 00000000..d3df8686 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pretty/pretty.go @@ -0,0 +1,98 @@ +// Package pretty provides pretty-printing for Go values. This is +// useful during debugging, to avoid wrapping long output lines in +// the terminal. +// +// It provides a function, Formatter, that can be used with any +// function that accepts a format string. It also provides +// convenience wrappers for functions in packages fmt and log. +package pretty + +import ( + "fmt" + "io" + "log" +) + +// Errorf is a convenience wrapper for fmt.Errorf. +// +// Calling Errorf(f, x, y) is equivalent to +// fmt.Errorf(f, Formatter(x), Formatter(y)). +func Errorf(format string, a ...interface{}) error { + return fmt.Errorf(format, wrap(a, false)...) +} + +// Fprintf is a convenience wrapper for fmt.Fprintf. +// +// Calling Fprintf(w, f, x, y) is equivalent to +// fmt.Fprintf(w, f, Formatter(x), Formatter(y)). +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) { + return fmt.Fprintf(w, format, wrap(a, false)...) +} + +// Log is a convenience wrapper for log.Printf. +// +// Calling Log(x, y) is equivalent to +// log.Print(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Log(a ...interface{}) { + log.Print(wrap(a, true)...) +} + +// Logf is a convenience wrapper for log.Printf. +// +// Calling Logf(f, x, y) is equivalent to +// log.Printf(f, Formatter(x), Formatter(y)). +func Logf(format string, a ...interface{}) { + log.Printf(format, wrap(a, false)...) +} + +// Logln is a convenience wrapper for log.Printf. +// +// Calling Logln(x, y) is equivalent to +// log.Println(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Logln(a ...interface{}) { + log.Println(wrap(a, true)...) +} + +// Print pretty-prints its operands and writes to standard output. +// +// Calling Print(x, y) is equivalent to +// fmt.Print(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Print(a ...interface{}) (n int, errno error) { + return fmt.Print(wrap(a, true)...) +} + +// Printf is a convenience wrapper for fmt.Printf. +// +// Calling Printf(f, x, y) is equivalent to +// fmt.Printf(f, Formatter(x), Formatter(y)). +func Printf(format string, a ...interface{}) (n int, errno error) { + return fmt.Printf(format, wrap(a, false)...) +} + +// Println pretty-prints its operands and writes to standard output. +// +// Calling Print(x, y) is equivalent to +// fmt.Println(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Println(a ...interface{}) (n int, errno error) { + return fmt.Println(wrap(a, true)...) +} + +// Sprintf is a convenience wrapper for fmt.Sprintf. +// +// Calling Sprintf(f, x, y) is equivalent to +// fmt.Sprintf(f, Formatter(x), Formatter(y)). +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, wrap(a, false)...) +} + +func wrap(a []interface{}, force bool) []interface{} { + w := make([]interface{}, len(a)) + for i, x := range a { + w[i] = formatter{x: x, force: force} + } + return w +} diff --git a/Godeps/_workspace/src/github.com/kr/pretty/zero.go b/Godeps/_workspace/src/github.com/kr/pretty/zero.go new file mode 100644 index 00000000..abb5b6fc --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/pretty/zero.go @@ -0,0 +1,41 @@ +package pretty + +import ( + "reflect" +) + +func nonzero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() != 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() != 0 + case reflect.Float32, reflect.Float64: + return v.Float() != 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() != complex(0, 0) + case reflect.String: + return v.String() != "" + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if nonzero(getField(v, i)) { + return true + } + } + return false + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if nonzero(v.Index(i)) { + return true + } + } + return false + case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func: + return !v.IsNil() + case reflect.UnsafePointer: + return v.Pointer() != 0 + } + return true +} diff --git a/Godeps/_workspace/src/github.com/kr/text/License b/Godeps/_workspace/src/github.com/kr/text/License new file mode 100644 index 00000000..480a3280 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/text/License @@ -0,0 +1,19 @@ +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/kr/text/Readme b/Godeps/_workspace/src/github.com/kr/text/Readme new file mode 100644 index 00000000..7e6e7c06 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/text/Readme @@ -0,0 +1,3 @@ +This is a Go package for manipulating paragraphs of text. + +See http://go.pkgdoc.org/github.com/kr/text for full documentation. diff --git a/Godeps/_workspace/src/github.com/kr/text/colwriter/Readme b/Godeps/_workspace/src/github.com/kr/text/colwriter/Readme new file mode 100644 index 00000000..1c1f4e68 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/text/colwriter/Readme @@ -0,0 +1,5 @@ +Package colwriter provides a write filter that formats +input lines in multiple columns. + +The package is a straightforward translation from +/src/cmd/draw/mc.c in Plan 9 from User Space. diff --git a/Godeps/_workspace/src/github.com/kr/text/colwriter/column.go b/Godeps/_workspace/src/github.com/kr/text/colwriter/column.go new file mode 100644 index 00000000..7302ce9f --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/text/colwriter/column.go @@ -0,0 +1,147 @@ +// Package colwriter provides a write filter that formats +// input lines in multiple columns. +// +// The package is a straightforward translation from +// /src/cmd/draw/mc.c in Plan 9 from User Space. +package colwriter + +import ( + "bytes" + "io" + "unicode/utf8" +) + +const ( + tab = 4 +) + +const ( + // Print each input line ending in a colon ':' separately. + BreakOnColon uint = 1 << iota +) + +// A Writer is a filter that arranges input lines in as many columns as will +// fit in its width. Tab '\t' chars in the input are translated to sequences +// of spaces ending at multiples of 4 positions. +// +// If BreakOnColon is set, each input line ending in a colon ':' is written +// separately. +// +// The Writer assumes that all Unicode code points have the same width; this +// may not be true in some fonts. +type Writer struct { + w io.Writer + buf []byte + width int + flag uint +} + +// NewWriter allocates and initializes a new Writer writing to w. +// Parameter width controls the total number of characters on each line +// across all columns. +func NewWriter(w io.Writer, width int, flag uint) *Writer { + return &Writer{ + w: w, + width: width, + flag: flag, + } +} + +// Write writes p to the writer w. The only errors returned are ones +// encountered while writing to the underlying output stream. +func (w *Writer) Write(p []byte) (n int, err error) { + var linelen int + var lastWasColon bool + for i, c := range p { + w.buf = append(w.buf, c) + linelen++ + if c == '\t' { + w.buf[len(w.buf)-1] = ' ' + for linelen%tab != 0 { + w.buf = append(w.buf, ' ') + linelen++ + } + } + if w.flag&BreakOnColon != 0 && c == ':' { + lastWasColon = true + } else if lastWasColon { + if c == '\n' { + pos := bytes.LastIndex(w.buf[:len(w.buf)-1], []byte{'\n'}) + if pos < 0 { + pos = 0 + } + line := w.buf[pos:] + w.buf = w.buf[:pos] + if err = w.columnate(); err != nil { + if len(line) < i { + return i - len(line), err + } + return 0, err + } + if n, err := w.w.Write(line); err != nil { + if r := len(line) - n; r < i { + return i - r, err + } + return 0, err + } + } + lastWasColon = false + } + if c == '\n' { + linelen = 0 + } + } + return len(p), nil +} + +// Flush should be called after the last call to Write to ensure that any data +// buffered in the Writer is written to output. +func (w *Writer) Flush() error { + return w.columnate() +} + +func (w *Writer) columnate() error { + words := bytes.Split(w.buf, []byte{'\n'}) + w.buf = nil + if len(words[len(words)-1]) == 0 { + words = words[:len(words)-1] + } + maxwidth := 0 + for _, wd := range words { + if n := utf8.RuneCount(wd); n > maxwidth { + maxwidth = n + } + } + maxwidth++ // space char + wordsPerLine := w.width / maxwidth + if wordsPerLine <= 0 { + wordsPerLine = 1 + } + nlines := (len(words) + wordsPerLine - 1) / wordsPerLine + for i := 0; i < nlines; i++ { + col := 0 + endcol := 0 + for j := i; j < len(words); j += nlines { + endcol += maxwidth + _, err := w.w.Write(words[j]) + if err != nil { + return err + } + col += utf8.RuneCount(words[j]) + if j+nlines < len(words) { + for col < endcol { + _, err := w.w.Write([]byte{' '}) + if err != nil { + return err + } + col++ + } + } + } + _, err := w.w.Write([]byte{'\n'}) + if err != nil { + return err + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/kr/text/colwriter/column_test.go b/Godeps/_workspace/src/github.com/kr/text/colwriter/column_test.go new file mode 100644 index 00000000..8d0bf8f3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/text/colwriter/column_test.go @@ -0,0 +1,90 @@ +package colwriter + +import ( + "bytes" + "testing" +) + +var src = ` +.git +.gitignore +.godir +Procfile: +README.md +api.go +apps.go +auth.go +darwin.go +data.go +dyno.go: +env.go +git.go +help.go +hkdist +linux.go +ls.go +main.go +plugin.go +run.go +scale.go +ssh.go +tail.go +term +unix.go +update.go +version.go +windows.go +`[1:] + +var tests = []struct{ + wid int + flag uint + src string + want string +}{ + {80, 0, "", ""}, + {80, 0, src, ` +.git README.md darwin.go git.go ls.go scale.go unix.go +.gitignore api.go data.go help.go main.go ssh.go update.go +.godir apps.go dyno.go: hkdist plugin.go tail.go version.go +Procfile: auth.go env.go linux.go run.go term windows.go +`[1:]}, + {80, BreakOnColon, src, ` +.git .gitignore .godir + +Procfile: +README.md api.go apps.go auth.go darwin.go data.go + +dyno.go: +env.go hkdist main.go scale.go term version.go +git.go linux.go plugin.go ssh.go unix.go windows.go +help.go ls.go run.go tail.go update.go +`[1:]}, + {20, 0, ` +Hello +Γειά σου +안녕 +今日は +`[1:], ` +Hello 안녕 +Γειά σου 今日は +`[1:]}, +} + +func TestWriter(t *testing.T) { + for _, test := range tests { + b := new(bytes.Buffer) + w := NewWriter(b, test.wid, test.flag) + if _, err := w.Write([]byte(test.src)); err != nil { + t.Error(err) + } + if err := w.Flush(); err != nil { + t.Error(err) + } + if g := b.String(); test.want != g { + t.Log("\n" + test.want) + t.Log("\n" + g) + t.Errorf("%q != %q", test.want, g) + } + } +} diff --git a/Godeps/_workspace/src/github.com/kr/text/doc.go b/Godeps/_workspace/src/github.com/kr/text/doc.go new file mode 100644 index 00000000..cf4c198f --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/text/doc.go @@ -0,0 +1,3 @@ +// Package text provides rudimentary functions for manipulating text in +// paragraphs. +package text diff --git a/Godeps/_workspace/src/github.com/kr/text/indent.go b/Godeps/_workspace/src/github.com/kr/text/indent.go new file mode 100644 index 00000000..4ebac45c --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/text/indent.go @@ -0,0 +1,74 @@ +package text + +import ( + "io" +) + +// Indent inserts prefix at the beginning of each non-empty line of s. The +// end-of-line marker is NL. +func Indent(s, prefix string) string { + return string(IndentBytes([]byte(s), []byte(prefix))) +} + +// IndentBytes inserts prefix at the beginning of each non-empty line of b. +// The end-of-line marker is NL. +func IndentBytes(b, prefix []byte) []byte { + var res []byte + bol := true + for _, c := range b { + if bol && c != '\n' { + res = append(res, prefix...) + } + res = append(res, c) + bol = c == '\n' + } + return res +} + +// Writer indents each line of its input. +type indentWriter struct { + w io.Writer + bol bool + pre [][]byte + sel int + off int +} + +// NewIndentWriter makes a new write filter that indents the input +// lines. Each line is prefixed in order with the corresponding +// element of pre. If there are more lines than elements, the last +// element of pre is repeated for each subsequent line. +func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer { + return &indentWriter{ + w: w, + pre: pre, + bol: true, + } +} + +// The only errors returned are from the underlying indentWriter. +func (w *indentWriter) Write(p []byte) (n int, err error) { + for _, c := range p { + if w.bol { + var i int + i, err = w.w.Write(w.pre[w.sel][w.off:]) + w.off += i + if err != nil { + return n, err + } + } + _, err = w.w.Write([]byte{c}) + if err != nil { + return n, err + } + n++ + w.bol = c == '\n' + if w.bol { + w.off = 0 + if w.sel < len(w.pre)-1 { + w.sel++ + } + } + } + return n, nil +} diff --git a/Godeps/_workspace/src/github.com/kr/text/indent_test.go b/Godeps/_workspace/src/github.com/kr/text/indent_test.go new file mode 100644 index 00000000..5c723eee --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/text/indent_test.go @@ -0,0 +1,119 @@ +package text + +import ( + "bytes" + "testing" +) + +type T struct { + inp, exp, pre string +} + +var tests = []T{ + { + "The quick brown fox\njumps over the lazy\ndog.\nBut not quickly.\n", + "xxxThe quick brown fox\nxxxjumps over the lazy\nxxxdog.\nxxxBut not quickly.\n", + "xxx", + }, + { + "The quick brown fox\njumps over the lazy\ndog.\n\nBut not quickly.", + "xxxThe quick brown fox\nxxxjumps over the lazy\nxxxdog.\n\nxxxBut not quickly.", + "xxx", + }, +} + +func TestIndent(t *testing.T) { + for _, test := range tests { + got := Indent(test.inp, test.pre) + if got != test.exp { + t.Errorf("mismatch %q != %q", got, test.exp) + } + } +} + +type IndentWriterTest struct { + inp, exp string + pre []string +} + +var ts = []IndentWriterTest{ + { + ` +The quick brown fox +jumps over the lazy +dog. +But not quickly. +`[1:], + ` +xxxThe quick brown fox +xxxjumps over the lazy +xxxdog. +xxxBut not quickly. +`[1:], + []string{"xxx"}, + }, + { + ` +The quick brown fox +jumps over the lazy +dog. +But not quickly. +`[1:], + ` +xxaThe quick brown fox +xxxjumps over the lazy +xxxdog. +xxxBut not quickly. +`[1:], + []string{"xxa", "xxx"}, + }, + { + ` +The quick brown fox +jumps over the lazy +dog. +But not quickly. +`[1:], + ` +xxaThe quick brown fox +xxbjumps over the lazy +xxcdog. +xxxBut not quickly. +`[1:], + []string{"xxa", "xxb", "xxc", "xxx"}, + }, + { + ` +The quick brown fox +jumps over the lazy +dog. + +But not quickly.`[1:], + ` +xxaThe quick brown fox +xxxjumps over the lazy +xxxdog. +xxx +xxxBut not quickly.`[1:], + []string{"xxa", "xxx"}, + }, +} + +func TestIndentWriter(t *testing.T) { + for _, test := range ts { + b := new(bytes.Buffer) + pre := make([][]byte, len(test.pre)) + for i := range test.pre { + pre[i] = []byte(test.pre[i]) + } + w := NewIndentWriter(b, pre...) + if _, err := w.Write([]byte(test.inp)); err != nil { + t.Error(err) + } + if got := b.String(); got != test.exp { + t.Errorf("mismatch %q != %q", got, test.exp) + t.Log(got) + t.Log(test.exp) + } + } +} diff --git a/Godeps/_workspace/src/github.com/kr/text/mc/Readme b/Godeps/_workspace/src/github.com/kr/text/mc/Readme new file mode 100644 index 00000000..519ddc00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/text/mc/Readme @@ -0,0 +1,9 @@ +Command mc prints in multiple columns. + + Usage: mc [-] [-N] [file...] + +Mc splits the input into as many columns as will fit in N +print positions. If the output is a tty, the default N is +the number of characters in a terminal line; otherwise the +default N is 80. Under option - each input line ending in +a colon ':' is printed separately. diff --git a/Godeps/_workspace/src/github.com/kr/text/mc/mc.go b/Godeps/_workspace/src/github.com/kr/text/mc/mc.go new file mode 100644 index 00000000..00169a30 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/text/mc/mc.go @@ -0,0 +1,62 @@ +// Command mc prints in multiple columns. +// +// Usage: mc [-] [-N] [file...] +// +// Mc splits the input into as many columns as will fit in N +// print positions. If the output is a tty, the default N is +// the number of characters in a terminal line; otherwise the +// default N is 80. Under option - each input line ending in +// a colon ':' is printed separately. +package main + +import ( + "github.com/kr/pty" + "github.com/kr/text/colwriter" + "io" + "log" + "os" + "strconv" +) + +func main() { + var width int + var flag uint + args := os.Args[1:] + for len(args) > 0 && len(args[0]) > 0 && args[0][0] == '-' { + if len(args[0]) > 1 { + width, _ = strconv.Atoi(args[0][1:]) + } else { + flag |= colwriter.BreakOnColon + } + args = args[1:] + } + if width < 1 { + _, width, _ = pty.Getsize(os.Stdout) + } + if width < 1 { + width = 80 + } + + w := colwriter.NewWriter(os.Stdout, width, flag) + if len(args) > 0 { + for _, s := range args { + if f, err := os.Open(s); err == nil { + copyin(w, f) + f.Close() + } else { + log.Println(err) + } + } + } else { + copyin(w, os.Stdin) + } +} + +func copyin(w *colwriter.Writer, r io.Reader) { + if _, err := io.Copy(w, r); err != nil { + log.Println(err) + } + if err := w.Flush(); err != nil { + log.Println(err) + } +} diff --git a/Godeps/_workspace/src/github.com/kr/text/wrap.go b/Godeps/_workspace/src/github.com/kr/text/wrap.go new file mode 100644 index 00000000..ca885651 --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/text/wrap.go @@ -0,0 +1,86 @@ +package text + +import ( + "bytes" + "math" +) + +var ( + nl = []byte{'\n'} + sp = []byte{' '} +) + +const defaultPenalty = 1e5 + +// Wrap wraps s into a paragraph of lines of length lim, with minimal +// raggedness. +func Wrap(s string, lim int) string { + return string(WrapBytes([]byte(s), lim)) +} + +// WrapBytes wraps b into a paragraph of lines of length lim, with minimal +// raggedness. +func WrapBytes(b []byte, lim int) []byte { + words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp) + var lines [][]byte + for _, line := range WrapWords(words, 1, lim, defaultPenalty) { + lines = append(lines, bytes.Join(line, sp)) + } + return bytes.Join(lines, nl) +} + +// WrapWords is the low-level line-breaking algorithm, useful if you need more +// control over the details of the text wrapping process. For most uses, either +// Wrap or WrapBytes will be sufficient and more convenient. +// +// WrapWords splits a list of words into lines with minimal "raggedness", +// treating each byte as one unit, accounting for spc units between adjacent +// words on each line, and attempting to limit lines to lim units. Raggedness +// is the total error over all lines, where error is the square of the +// difference of the length of the line and lim. Too-long lines (which only +// happen when a single word is longer than lim units) have pen penalty units +// added to the error. +func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte { + n := len(words) + + length := make([][]int, n) + for i := 0; i < n; i++ { + length[i] = make([]int, n) + length[i][i] = len(words[i]) + for j := i + 1; j < n; j++ { + length[i][j] = length[i][j-1] + spc + len(words[j]) + } + } + + nbrk := make([]int, n) + cost := make([]int, n) + for i := range cost { + cost[i] = math.MaxInt32 + } + for i := n - 1; i >= 0; i-- { + if length[i][n-1] <= lim { + cost[i] = 0 + nbrk[i] = n + } else { + for j := i + 1; j < n; j++ { + d := lim - length[i][j-1] + c := d*d + cost[j] + if length[i][j-1] > lim { + c += pen // too-long lines get a worse penalty + } + if c < cost[i] { + cost[i] = c + nbrk[i] = j + } + } + } + } + + var lines [][][]byte + i := 0 + for i < n { + lines = append(lines, words[i:nbrk[i]]) + i = nbrk[i] + } + return lines +} diff --git a/Godeps/_workspace/src/github.com/kr/text/wrap_test.go b/Godeps/_workspace/src/github.com/kr/text/wrap_test.go new file mode 100644 index 00000000..90f065cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/kr/text/wrap_test.go @@ -0,0 +1,44 @@ +package text + +import ( + "bytes" + "testing" +) + +var text = "The quick brown fox jumps over the lazy dog." + +func TestWrap(t *testing.T) { + exp := [][]string{ + {"The", "quick", "brown", "fox"}, + {"jumps", "over", "the", "lazy", "dog."}, + } + words := bytes.Split([]byte(text), sp) + got := WrapWords(words, 1, 24, defaultPenalty) + if len(exp) != len(got) { + t.Fail() + } + for i := range exp { + if len(exp[i]) != len(got[i]) { + t.Fail() + } + for j := range exp[i] { + if exp[i][j] != string(got[i][j]) { + t.Fatal(i, exp[i][j], got[i][j]) + } + } + } +} + +func TestWrapNarrow(t *testing.T) { + exp := "The\nquick\nbrown\nfox\njumps\nover\nthe\nlazy\ndog." + if Wrap(text, 5) != exp { + t.Fail() + } +} + +func TestWrapOneLine(t *testing.T) { + exp := "The quick brown fox jumps over the lazy dog." + if Wrap(text, 500) != exp { + t.Fail() + } +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/.gitignore b/Godeps/_workspace/src/github.com/stretchr/objx/.gitignore new file mode 100644 index 00000000..00268614 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/LICENSE.md b/Godeps/_workspace/src/github.com/stretchr/objx/LICENSE.md new file mode 100644 index 00000000..21999458 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/LICENSE.md @@ -0,0 +1,23 @@ +objx - by Mat Ryer and Tyler Bunnell + +The MIT License (MIT) + +Copyright (c) 2014 Stretchr, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/README.md b/Godeps/_workspace/src/github.com/stretchr/objx/README.md new file mode 100644 index 00000000..4aa18068 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/README.md @@ -0,0 +1,3 @@ +# objx + + * Jump into the [API Documentation](http://godoc.org/github.com/stretchr/objx) diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/accessors.go b/Godeps/_workspace/src/github.com/stretchr/objx/accessors.go new file mode 100644 index 00000000..721bcac7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/accessors.go @@ -0,0 +1,179 @@ +package objx + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// arrayAccesRegexString is the regex used to extract the array number +// from the access path +const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + +// arrayAccesRegex is the compiled arrayAccesRegexString +var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) + +// Get gets the value using the specified selector and +// returns it inside a new Obj object. +// +// If it cannot find the value, Get will return a nil +// value inside an instance of Obj. +// +// Get can only operate directly on map[string]interface{} and []interface. +// +// Example +// +// To access the title of the third chapter of the second book, do: +// +// o.Get("books[1].chapters[2].title") +func (m Map) Get(selector string) *Value { + rawObj := access(m, selector, nil, false, false) + return &Value{data: rawObj} +} + +// Set sets the value using the specified selector and +// returns the object on which Set was called. +// +// Set can only operate directly on map[string]interface{} and []interface +// +// Example +// +// To set the title of the third chapter of the second book, do: +// +// o.Set("books[1].chapters[2].title","Time to Go") +func (m Map) Set(selector string, value interface{}) Map { + access(m, selector, value, true, false) + return m +} + +// access accesses the object using the selector and performs the +// appropriate action. +func access(current, selector, value interface{}, isSet, panics bool) interface{} { + + switch selector.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + + if array, ok := current.([]interface{}); ok { + index := intFromInterface(selector) + + if index >= len(array) { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) + } + return nil + } + + return array[index] + } + + return nil + + case string: + + selStr := selector.(string) + selSegs := strings.SplitN(selStr, PathSeparator, 2) + thisSel := selSegs[0] + index := -1 + var err error + + // https://github.com/stretchr/objx/issues/12 + if strings.Contains(thisSel, "[") { + + arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) + + if len(arrayMatches) > 0 { + + // Get the key into the map + thisSel = arrayMatches[1] + + // Get the index into the array at the key + index, err = strconv.Atoi(arrayMatches[2]) + + if err != nil { + // This should never happen. If it does, something has gone + // seriously wrong. Panic. + panic("objx: Array index is not an integer. Must use array[int].") + } + + } + } + + if curMap, ok := current.(Map); ok { + current = map[string]interface{}(curMap) + } + + // get the object in question + switch current.(type) { + case map[string]interface{}: + curMSI := current.(map[string]interface{}) + if len(selSegs) <= 1 && isSet { + curMSI[thisSel] = value + return nil + } else { + current = curMSI[thisSel] + } + default: + current = nil + } + + if current == nil && panics { + panic(fmt.Sprintf("objx: '%v' invalid on object.", selector)) + } + + // do we need to access the item of an array? + if index > -1 { + if array, ok := current.([]interface{}); ok { + if index < len(array) { + current = array[index] + } else { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) + } + current = nil + } + } + } + + if len(selSegs) > 1 { + current = access(current, selSegs[1], value, isSet, panics) + } + + } + + return current + +} + +// intFromInterface converts an interface object to the largest +// representation of an unsigned integer using a type switch and +// assertions +func intFromInterface(selector interface{}) int { + var value int + switch selector.(type) { + case int: + value = selector.(int) + case int8: + value = int(selector.(int8)) + case int16: + value = int(selector.(int16)) + case int32: + value = int(selector.(int32)) + case int64: + value = int(selector.(int64)) + case uint: + value = int(selector.(uint)) + case uint8: + value = int(selector.(uint8)) + case uint16: + value = int(selector.(uint16)) + case uint32: + value = int(selector.(uint32)) + case uint64: + value = int(selector.(uint64)) + default: + panic("objx: array access argument is not an integer type (this should never happen)") + } + + return value +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go new file mode 100644 index 00000000..ce5d8e4a --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go @@ -0,0 +1,145 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAccessorsAccessGetSingleField(t *testing.T) { + + current := map[string]interface{}{"name": "Tyler"} + assert.Equal(t, "Tyler", access(current, "name", nil, false, true)) + +} +func TestAccessorsAccessGetDeep(t *testing.T) { + + current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}} + assert.Equal(t, "Tyler", access(current, "name.first", nil, false, true)) + assert.Equal(t, "Bunnell", access(current, "name.last", nil, false, true)) + +} +func TestAccessorsAccessGetDeepDeep(t *testing.T) { + + current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}} + assert.Equal(t, 4, access(current, "one.two.three.four", nil, false, true)) + +} +func TestAccessorsAccessGetInsideArray(t *testing.T) { + + current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}} + assert.Equal(t, "Tyler", access(current, "names[0].first", nil, false, true)) + assert.Equal(t, "Bunnell", access(current, "names[0].last", nil, false, true)) + assert.Equal(t, "Capitol", access(current, "names[1].first", nil, false, true)) + assert.Equal(t, "Bollocks", access(current, "names[1].last", nil, false, true)) + + assert.Panics(t, func() { + access(current, "names[2]", nil, false, true) + }) + assert.Nil(t, access(current, "names[2]", nil, false, false)) + +} + +func TestAccessorsAccessGetFromArrayWithInt(t *testing.T) { + + current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}} + one := access(current, 0, nil, false, false) + two := access(current, 1, nil, false, false) + three := access(current, 2, nil, false, false) + + assert.Equal(t, "Tyler", one.(map[string]interface{})["first"]) + assert.Equal(t, "Capitol", two.(map[string]interface{})["first"]) + assert.Nil(t, three) + +} + +func TestAccessorsGet(t *testing.T) { + + current := New(map[string]interface{}{"name": "Tyler"}) + assert.Equal(t, "Tyler", current.Get("name").data) + +} + +func TestAccessorsAccessSetSingleField(t *testing.T) { + + current := map[string]interface{}{"name": "Tyler"} + access(current, "name", "Mat", true, false) + assert.Equal(t, current["name"], "Mat") + + access(current, "age", 29, true, true) + assert.Equal(t, current["age"], 29) + +} + +func TestAccessorsAccessSetSingleFieldNotExisting(t *testing.T) { + + current := map[string]interface{}{} + access(current, "name", "Mat", true, false) + assert.Equal(t, current["name"], "Mat") + +} + +func TestAccessorsAccessSetDeep(t *testing.T) { + + current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}} + + access(current, "name.first", "Mat", true, true) + access(current, "name.last", "Ryer", true, true) + + assert.Equal(t, "Mat", access(current, "name.first", nil, false, true)) + assert.Equal(t, "Ryer", access(current, "name.last", nil, false, true)) + +} +func TestAccessorsAccessSetDeepDeep(t *testing.T) { + + current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}} + + access(current, "one.two.three.four", 5, true, true) + + assert.Equal(t, 5, access(current, "one.two.three.four", nil, false, true)) + +} +func TestAccessorsAccessSetArray(t *testing.T) { + + current := map[string]interface{}{"names": []interface{}{"Tyler"}} + + access(current, "names[0]", "Mat", true, true) + + assert.Equal(t, "Mat", access(current, "names[0]", nil, false, true)) + +} +func TestAccessorsAccessSetInsideArray(t *testing.T) { + + current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}} + + access(current, "names[0].first", "Mat", true, true) + access(current, "names[0].last", "Ryer", true, true) + access(current, "names[1].first", "Captain", true, true) + access(current, "names[1].last", "Underpants", true, true) + + assert.Equal(t, "Mat", access(current, "names[0].first", nil, false, true)) + assert.Equal(t, "Ryer", access(current, "names[0].last", nil, false, true)) + assert.Equal(t, "Captain", access(current, "names[1].first", nil, false, true)) + assert.Equal(t, "Underpants", access(current, "names[1].last", nil, false, true)) + +} + +func TestAccessorsAccessSetFromArrayWithInt(t *testing.T) { + + current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}} + one := access(current, 0, nil, false, false) + two := access(current, 1, nil, false, false) + three := access(current, 2, nil, false, false) + + assert.Equal(t, "Tyler", one.(map[string]interface{})["first"]) + assert.Equal(t, "Capitol", two.(map[string]interface{})["first"]) + assert.Nil(t, three) + +} + +func TestAccessorsSet(t *testing.T) { + + current := New(map[string]interface{}{"name": "Tyler"}) + current.Set("name", "Mat") + assert.Equal(t, "Mat", current.Get("name").data) + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt new file mode 100644 index 00000000..30602347 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt @@ -0,0 +1,14 @@ + case []{1}: + a := object.([]{1}) + if isSet { + a[index] = value.({1}) + } else { + if index >= len(a) { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range because the []{1} only contains %d items.", index, len(a))) + } + return nil + } else { + return a[index] + } + } diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html new file mode 100644 index 00000000..379ffc3c --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html @@ -0,0 +1,86 @@ + + + + Codegen + + + + + +

+ Template +

+

+ Use {x} as a placeholder for each argument. +

+ + +

+ Arguments (comma separated) +

+

+ One block per line +

+ + +

+ Output +

+ + + + + + + + diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt new file mode 100644 index 00000000..b396900b --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt @@ -0,0 +1,286 @@ +/* + {4} ({1} and []{1}) + -------------------------------------------------- +*/ + +// {4} gets the value as a {1}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) {4}(optionalDefault ...{1}) {1} { + if s, ok := v.data.({1}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return {3} +} + +// Must{4} gets the value as a {1}. +// +// Panics if the object is not a {1}. +func (v *Value) Must{4}() {1} { + return v.data.({1}) +} + +// {4}Slice gets the value as a []{1}, returns the optionalDefault +// value or nil if the value is not a []{1}. +func (v *Value) {4}Slice(optionalDefault ...[]{1}) []{1} { + if s, ok := v.data.([]{1}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// Must{4}Slice gets the value as a []{1}. +// +// Panics if the object is not a []{1}. +func (v *Value) Must{4}Slice() []{1} { + return v.data.([]{1}) +} + +// Is{4} gets whether the object contained is a {1} or not. +func (v *Value) Is{4}() bool { + _, ok := v.data.({1}) + return ok +} + +// Is{4}Slice gets whether the object contained is a []{1} or not. +func (v *Value) Is{4}Slice() bool { + _, ok := v.data.([]{1}) + return ok +} + +// Each{4} calls the specified callback for each object +// in the []{1}. +// +// Panics if the object is the wrong type. +func (v *Value) Each{4}(callback func(int, {1}) bool) *Value { + + for index, val := range v.Must{4}Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// Where{4} uses the specified decider function to select items +// from the []{1}. The object contained in the result will contain +// only the selected items. +func (v *Value) Where{4}(decider func(int, {1}) bool) *Value { + + var selected []{1} + + v.Each{4}(func(index int, val {1}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data:selected} + +} + +// Group{4} uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]{1}. +func (v *Value) Group{4}(grouper func(int, {1}) string) *Value { + + groups := make(map[string][]{1}) + + v.Each{4}(func(index int, val {1}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]{1}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data:groups} + +} + +// Replace{4} uses the specified function to replace each {1}s +// by iterating each item. The data in the returned result will be a +// []{1} containing the replaced items. +func (v *Value) Replace{4}(replacer func(int, {1}) {1}) *Value { + + arr := v.Must{4}Slice() + replaced := make([]{1}, len(arr)) + + v.Each{4}(func(index int, val {1}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data:replaced} + +} + +// Collect{4} uses the specified collector function to collect a value +// for each of the {1}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) Collect{4}(collector func(int, {1}) interface{}) *Value { + + arr := v.Must{4}Slice() + collected := make([]interface{}, len(arr)) + + v.Each{4}(func(index int, val {1}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data:collected} +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func Test{4}(t *testing.T) { + + val := {1}( {2} ) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").{4}()) + assert.Equal(t, val, New(m).Get("value").Must{4}()) + assert.Equal(t, {1}({3}), New(m).Get("nothing").{4}()) + assert.Equal(t, val, New(m).Get("nothing").{4}({2})) + + assert.Panics(t, func() { + New(m).Get("age").Must{4}() + }) + +} + +func Test{4}Slice(t *testing.T) { + + val := {1}( {2} ) + m := map[string]interface{}{"value": []{1}{ val }, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").{4}Slice()[0]) + assert.Equal(t, val, New(m).Get("value").Must{4}Slice()[0]) + assert.Equal(t, []{1}(nil), New(m).Get("nothing").{4}Slice()) + assert.Equal(t, val, New(m).Get("nothing").{4}Slice( []{1}{ {1}({2}) } )[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").Must{4}Slice() + }) + +} + +func TestIs{4}(t *testing.T) { + + var v *Value + + v = &Value{data: {1}({2})} + assert.True(t, v.Is{4}()) + + v = &Value{data: []{1}{ {1}({2}) }} + assert.True(t, v.Is{4}Slice()) + +} + +func TestEach{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + count := 0 + replacedVals := make([]{1}, 0) + assert.Equal(t, v, v.Each{4}(func(i int, val {1}) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.Must{4}Slice()[0]) + assert.Equal(t, replacedVals[1], v.Must{4}Slice()[1]) + assert.Equal(t, replacedVals[2], v.Must{4}Slice()[2]) + +} + +func TestWhere{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + selected := v.Where{4}(func(i int, val {1}) bool { + return i%2==0 + }).Must{4}Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroup{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + grouped := v.Group{4}(func(i int, val {1}) string { + return fmt.Sprintf("%v", i%2==0) + }).data.(map[string][]{1}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplace{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + rawArr := v.Must{4}Slice() + + replaced := v.Replace{4}(func(index int, val {1}) {1} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.Must{4}Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollect{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + collected := v.Collect{4}(func(index int, val {1}) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt new file mode 100644 index 00000000..069d43d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt @@ -0,0 +1,20 @@ +Interface,interface{},"something",nil,Inter +Map,map[string]interface{},map[string]interface{}{"name":"Tyler"},nil,MSI +ObjxMap,(Map),New(1),New(nil),ObjxMap +Bool,bool,true,false,Bool +String,string,"hello","",Str +Int,int,1,0,Int +Int8,int8,1,0,Int8 +Int16,int16,1,0,Int16 +Int32,int32,1,0,Int32 +Int64,int64,1,0,Int64 +Uint,uint,1,0,Uint +Uint8,uint8,1,0,Uint8 +Uint16,uint16,1,0,Uint16 +Uint32,uint32,1,0,Uint32 +Uint64,uint64,1,0,Uint64 +Uintptr,uintptr,1,0,Uintptr +Float32,float32,1,0,Float32 +Float64,float64,1,0,Float64 +Complex64,complex64,1,0,Complex64 +Complex128,complex128,1,0,Complex128 diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/constants.go b/Godeps/_workspace/src/github.com/stretchr/objx/constants.go new file mode 100644 index 00000000..f9eb42a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/constants.go @@ -0,0 +1,13 @@ +package objx + +const ( + // PathSeparator is the character used to separate the elements + // of the keypath. + // + // For example, `location.address.city` + PathSeparator string = "." + + // SignatureSeparator is the character that is used to + // separate the Base64 string from the security signature. + SignatureSeparator = "_" +) diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/conversions.go b/Godeps/_workspace/src/github.com/stretchr/objx/conversions.go new file mode 100644 index 00000000..9cdfa9f9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/conversions.go @@ -0,0 +1,117 @@ +package objx + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" +) + +// JSON converts the contained object to a JSON string +// representation +func (m Map) JSON() (string, error) { + + result, err := json.Marshal(m) + + if err != nil { + err = errors.New("objx: JSON encode failed with: " + err.Error()) + } + + return string(result), err + +} + +// MustJSON converts the contained object to a JSON string +// representation and panics if there is an error +func (m Map) MustJSON() string { + result, err := m.JSON() + if err != nil { + panic(err.Error()) + } + return result +} + +// Base64 converts the contained object to a Base64 string +// representation of the JSON string representation +func (m Map) Base64() (string, error) { + + var buf bytes.Buffer + + jsonData, err := m.JSON() + if err != nil { + return "", err + } + + encoder := base64.NewEncoder(base64.StdEncoding, &buf) + encoder.Write([]byte(jsonData)) + encoder.Close() + + return buf.String(), nil + +} + +// MustBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and panics +// if there is an error +func (m Map) MustBase64() string { + result, err := m.Base64() + if err != nil { + panic(err.Error()) + } + return result +} + +// SignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key. +func (m Map) SignedBase64(key string) (string, error) { + + base64, err := m.Base64() + if err != nil { + return "", err + } + + sig := HashWithKey(base64, key) + + return base64 + SignatureSeparator + sig, nil + +} + +// MustSignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key and panics if there is an error +func (m Map) MustSignedBase64(key string) string { + result, err := m.SignedBase64(key) + if err != nil { + panic(err.Error()) + } + return result +} + +/* + URL Query + ------------------------------------------------ +*/ + +// URLValues creates a url.Values object from an Obj. This +// function requires that the wrapped object be a map[string]interface{} +func (m Map) URLValues() url.Values { + + vals := make(url.Values) + + for k, v := range m { + //TODO: can this be done without sprintf? + vals.Set(k, fmt.Sprintf("%v", v)) + } + + return vals +} + +// URLQuery gets an encoded URL query representing the given +// Obj. This function requires that the wrapped object be a +// map[string]interface{} +func (m Map) URLQuery() (string, error) { + return m.URLValues().Encode(), nil +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go new file mode 100644 index 00000000..e9ccd298 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go @@ -0,0 +1,94 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestConversionJSON(t *testing.T) { + + jsonString := `{"name":"Mat"}` + o := MustFromJSON(jsonString) + + result, err := o.JSON() + + if assert.NoError(t, err) { + assert.Equal(t, jsonString, result) + } + + assert.Equal(t, jsonString, o.MustJSON()) + +} + +func TestConversionJSONWithError(t *testing.T) { + + o := MSI() + o["test"] = func() {} + + assert.Panics(t, func() { + o.MustJSON() + }) + + _, err := o.JSON() + + assert.Error(t, err) + +} + +func TestConversionBase64(t *testing.T) { + + o := New(map[string]interface{}{"name": "Mat"}) + + result, err := o.Base64() + + if assert.NoError(t, err) { + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", result) + } + + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", o.MustBase64()) + +} + +func TestConversionBase64WithError(t *testing.T) { + + o := MSI() + o["test"] = func() {} + + assert.Panics(t, func() { + o.MustBase64() + }) + + _, err := o.Base64() + + assert.Error(t, err) + +} + +func TestConversionSignedBase64(t *testing.T) { + + o := New(map[string]interface{}{"name": "Mat"}) + + result, err := o.SignedBase64("key") + + if assert.NoError(t, err) { + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", result) + } + + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", o.MustSignedBase64("key")) + +} + +func TestConversionSignedBase64WithError(t *testing.T) { + + o := MSI() + o["test"] = func() {} + + assert.Panics(t, func() { + o.MustSignedBase64("key") + }) + + _, err := o.SignedBase64("key") + + assert.Error(t, err) + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/doc.go b/Godeps/_workspace/src/github.com/stretchr/objx/doc.go new file mode 100644 index 00000000..47bf85e4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/doc.go @@ -0,0 +1,72 @@ +// objx - Go package for dealing with maps, slices, JSON and other data. +// +// Overview +// +// Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes +// a powerful `Get` method (among others) that allows you to easily and quickly get +// access to data within the map, without having to worry too much about type assertions, +// missing data, default values etc. +// +// Pattern +// +// Objx uses a preditable pattern to make access data from within `map[string]interface{}'s +// easy. +// +// Call one of the `objx.` functions to create your `objx.Map` to get going: +// +// m, err := objx.FromJSON(json) +// +// NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, +// the rest will be optimistic and try to figure things out without panicking. +// +// Use `Get` to access the value you're interested in. You can use dot and array +// notation too: +// +// m.Get("places[0].latlng") +// +// Once you have saught the `Value` you're interested in, you can use the `Is*` methods +// to determine its type. +// +// if m.Get("code").IsStr() { /* ... */ } +// +// Or you can just assume the type, and use one of the strong type methods to +// extract the real value: +// +// m.Get("code").Int() +// +// If there's no value there (or if it's the wrong type) then a default value +// will be returned, or you can be explicit about the default value. +// +// Get("code").Int(-1) +// +// If you're dealing with a slice of data as a value, Objx provides many useful +// methods for iterating, manipulating and selecting that data. You can find out more +// by exploring the index below. +// +// Reading data +// +// A simple example of how to use Objx: +// +// // use MustFromJSON to make an objx.Map from some JSON +// m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) +// +// // get the details +// name := m.Get("name").Str() +// age := m.Get("age").Int() +// +// // get their nickname (or use their name if they +// // don't have one) +// nickname := m.Get("nickname").Str(name) +// +// Ranging +// +// Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For +// example, to `range` the data, do what you would expect: +// +// m := objx.MustFromJSON(json) +// for key, value := range m { +// +// /* ... do your magic ... */ +// +// } +package objx diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go new file mode 100644 index 00000000..27f7d904 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go @@ -0,0 +1,98 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +var fixtures = []struct { + // name is the name of the fixture (used for reporting + // failures) + name string + // data is the JSON data to be worked on + data string + // get is the argument(s) to pass to Get + get interface{} + // output is the expected output + output interface{} +}{ + { + name: "Simple get", + data: `{"name": "Mat"}`, + get: "name", + output: "Mat", + }, + { + name: "Get with dot notation", + data: `{"address": {"city": "Boulder"}}`, + get: "address.city", + output: "Boulder", + }, + { + name: "Deep get with dot notation", + data: `{"one": {"two": {"three": {"four": "hello"}}}}`, + get: "one.two.three.four", + output: "hello", + }, + { + name: "Get missing with dot notation", + data: `{"one": {"two": {"three": {"four": "hello"}}}}`, + get: "one.ten", + output: nil, + }, + { + name: "Get with array notation", + data: `{"tags": ["one", "two", "three"]}`, + get: "tags[1]", + output: "two", + }, + { + name: "Get with array and dot notation", + data: `{"types": { "tags": ["one", "two", "three"]}}`, + get: "types.tags[1]", + output: "two", + }, + { + name: "Get with array and dot notation - field after array", + data: `{"tags": [{"name":"one"}, {"name":"two"}, {"name":"three"}]}`, + get: "tags[1].name", + output: "two", + }, + { + name: "Complex get with array and dot notation", + data: `{"tags": [{"list": [{"one":"pizza"}]}]}`, + get: "tags[0].list[0].one", + output: "pizza", + }, + { + name: "Get field from within string should be nil", + data: `{"name":"Tyler"}`, + get: "name.something", + output: nil, + }, + { + name: "Get field from within string (using array accessor) should be nil", + data: `{"numbers":["one", "two", "three"]}`, + get: "numbers[0].nope", + output: nil, + }, +} + +func TestFixtures(t *testing.T) { + + for _, fixture := range fixtures { + + m := MustFromJSON(fixture.data) + + // get the value + t.Logf("Running get fixture: \"%s\" (%v)", fixture.name, fixture) + value := m.Get(fixture.get.(string)) + + // make sure it matches + assert.Equal(t, fixture.output, value.data, + "Get fixture \"%s\" failed: %v", fixture.name, fixture, + ) + + } + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/map.go b/Godeps/_workspace/src/github.com/stretchr/objx/map.go new file mode 100644 index 00000000..eb6ed8e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/map.go @@ -0,0 +1,222 @@ +package objx + +import ( + "encoding/base64" + "encoding/json" + "errors" + "io/ioutil" + "net/url" + "strings" +) + +// MSIConvertable is an interface that defines methods for converting your +// custom types to a map[string]interface{} representation. +type MSIConvertable interface { + // MSI gets a map[string]interface{} (msi) representing the + // object. + MSI() map[string]interface{} +} + +// Map provides extended functionality for working with +// untyped data, in particular map[string]interface (msi). +type Map map[string]interface{} + +// Value returns the internal value instance +func (m Map) Value() *Value { + return &Value{data: m} +} + +// Nil represents a nil Map. +var Nil Map = New(nil) + +// New creates a new Map containing the map[string]interface{} in the data argument. +// If the data argument is not a map[string]interface, New attempts to call the +// MSI() method on the MSIConvertable interface to create one. +func New(data interface{}) Map { + if _, ok := data.(map[string]interface{}); !ok { + if converter, ok := data.(MSIConvertable); ok { + data = converter.MSI() + } else { + return nil + } + } + return Map(data.(map[string]interface{})) +} + +// MSI creates a map[string]interface{} and puts it inside a new Map. +// +// The arguments follow a key, value pattern. +// +// Panics +// +// Panics if any key arugment is non-string or if there are an odd number of arguments. +// +// Example +// +// To easily create Maps: +// +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// +// // creates an Map equivalent to +// m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}}) +func MSI(keyAndValuePairs ...interface{}) Map { + + newMap := make(map[string]interface{}) + keyAndValuePairsLen := len(keyAndValuePairs) + + if keyAndValuePairsLen%2 != 0 { + panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.") + } + + for i := 0; i < keyAndValuePairsLen; i = i + 2 { + + key := keyAndValuePairs[i] + value := keyAndValuePairs[i+1] + + // make sure the key is a string + keyString, keyStringOK := key.(string) + if !keyStringOK { + panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.") + } + + newMap[keyString] = value + + } + + return New(newMap) +} + +// ****** Conversion Constructors + +// MustFromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Panics if the JSON is invalid. +func MustFromJSON(jsonString string) Map { + o, err := FromJSON(jsonString) + + if err != nil { + panic("objx: MustFromJSON failed with error: " + err.Error()) + } + + return o +} + +// FromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Returns an error if the JSON is invalid. +func FromJSON(jsonString string) (Map, error) { + + var data interface{} + err := json.Unmarshal([]byte(jsonString), &data) + + if err != nil { + return Nil, err + } + + return New(data), nil + +} + +// FromBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by Base64 +func FromBase64(base64String string) (Map, error) { + + decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) + + decoded, err := ioutil.ReadAll(decoder) + if err != nil { + return nil, err + } + + return FromJSON(string(decoded)) +} + +// MustFromBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromBase64(base64String string) Map { + + result, err := FromBase64(base64String) + + if err != nil { + panic("objx: MustFromBase64 failed with error: " + err.Error()) + } + + return result +} + +// FromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by SignedBase64 +func FromSignedBase64(base64String, key string) (Map, error) { + parts := strings.Split(base64String, SignatureSeparator) + if len(parts) != 2 { + return nil, errors.New("objx: Signed base64 string is malformed.") + } + + sig := HashWithKey(parts[0], key) + if parts[1] != sig { + return nil, errors.New("objx: Signature for base64 data does not match.") + } + + return FromBase64(parts[0]) +} + +// MustFromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromSignedBase64(base64String, key string) Map { + + result, err := FromSignedBase64(base64String, key) + + if err != nil { + panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) + } + + return result +} + +// FromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +func FromURLQuery(query string) (Map, error) { + + vals, err := url.ParseQuery(query) + + if err != nil { + return nil, err + } + + m := make(map[string]interface{}) + for k, vals := range vals { + m[k] = vals[0] + } + + return New(m), nil +} + +// MustFromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +// +// Panics if it encounters an error +func MustFromURLQuery(query string) Map { + + o, err := FromURLQuery(query) + + if err != nil { + panic("objx: MustFromURLQuery failed with error: " + err.Error()) + } + + return o + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go new file mode 100644 index 00000000..6beb5067 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go @@ -0,0 +1,10 @@ +package objx + +var TestMap map[string]interface{} = map[string]interface{}{ + "name": "Tyler", + "address": map[string]interface{}{ + "city": "Salt Lake City", + "state": "UT", + }, + "numbers": []interface{}{"one", "two", "three", "four", "five"}, +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/map_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/map_test.go new file mode 100644 index 00000000..1f8b45c6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/map_test.go @@ -0,0 +1,147 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +type Convertable struct { + name string +} + +func (c *Convertable) MSI() map[string]interface{} { + return map[string]interface{}{"name": c.name} +} + +type Unconvertable struct { + name string +} + +func TestMapCreation(t *testing.T) { + + o := New(nil) + assert.Nil(t, o) + + o = New("Tyler") + assert.Nil(t, o) + + unconvertable := &Unconvertable{name: "Tyler"} + o = New(unconvertable) + assert.Nil(t, o) + + convertable := &Convertable{name: "Tyler"} + o = New(convertable) + if assert.NotNil(t, convertable) { + assert.Equal(t, "Tyler", o["name"], "Tyler") + } + + o = MSI() + if assert.NotNil(t, o) { + assert.NotNil(t, o) + } + + o = MSI("name", "Tyler") + if assert.NotNil(t, o) { + if assert.NotNil(t, o) { + assert.Equal(t, o["name"], "Tyler") + } + } + +} + +func TestMapMustFromJSONWithError(t *testing.T) { + + _, err := FromJSON(`"name":"Mat"}`) + assert.Error(t, err) + +} + +func TestMapFromJSON(t *testing.T) { + + o := MustFromJSON(`{"name":"Mat"}`) + + if assert.NotNil(t, o) { + if assert.NotNil(t, o) { + assert.Equal(t, "Mat", o["name"]) + } + } + +} + +func TestMapFromJSONWithError(t *testing.T) { + + var m Map + + assert.Panics(t, func() { + m = MustFromJSON(`"name":"Mat"}`) + }) + + assert.Nil(t, m) + +} + +func TestMapFromBase64String(t *testing.T) { + + base64String := "eyJuYW1lIjoiTWF0In0=" + + o, err := FromBase64(base64String) + + if assert.NoError(t, err) { + assert.Equal(t, o.Get("name").Str(), "Mat") + } + + assert.Equal(t, MustFromBase64(base64String).Get("name").Str(), "Mat") + +} + +func TestMapFromBase64StringWithError(t *testing.T) { + + base64String := "eyJuYW1lIjoiTWFasd0In0=" + + _, err := FromBase64(base64String) + + assert.Error(t, err) + + assert.Panics(t, func() { + MustFromBase64(base64String) + }) + +} + +func TestMapFromSignedBase64String(t *testing.T) { + + base64String := "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6" + + o, err := FromSignedBase64(base64String, "key") + + if assert.NoError(t, err) { + assert.Equal(t, o.Get("name").Str(), "Mat") + } + + assert.Equal(t, MustFromSignedBase64(base64String, "key").Get("name").Str(), "Mat") + +} + +func TestMapFromSignedBase64StringWithError(t *testing.T) { + + base64String := "eyJuYW1lasdIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6" + + _, err := FromSignedBase64(base64String, "key") + + assert.Error(t, err) + + assert.Panics(t, func() { + MustFromSignedBase64(base64String, "key") + }) + +} + +func TestMapFromURLQuery(t *testing.T) { + + m, err := FromURLQuery("name=tyler&state=UT") + if assert.NoError(t, err) && assert.NotNil(t, m) { + assert.Equal(t, "tyler", m.Get("name").Str()) + assert.Equal(t, "UT", m.Get("state").Str()) + } + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/mutations.go b/Godeps/_workspace/src/github.com/stretchr/objx/mutations.go new file mode 100644 index 00000000..b35c8639 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/mutations.go @@ -0,0 +1,81 @@ +package objx + +// Exclude returns a new Map with the keys in the specified []string +// excluded. +func (d Map) Exclude(exclude []string) Map { + + excluded := make(Map) + for k, v := range d { + var shouldInclude bool = true + for _, toExclude := range exclude { + if k == toExclude { + shouldInclude = false + break + } + } + if shouldInclude { + excluded[k] = v + } + } + + return excluded +} + +// Copy creates a shallow copy of the Obj. +func (m Map) Copy() Map { + copied := make(map[string]interface{}) + for k, v := range m { + copied[k] = v + } + return New(copied) +} + +// Merge blends the specified map with a copy of this map and returns the result. +// +// Keys that appear in both will be selected from the specified map. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) Merge(merge Map) Map { + return m.Copy().MergeHere(merge) +} + +// Merge blends the specified map with this map and returns the current map. +// +// Keys that appear in both will be selected from the specified map. The original map +// will be modified. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) MergeHere(merge Map) Map { + + for k, v := range merge { + m[k] = v + } + + return m + +} + +// Transform builds a new Obj giving the transformer a chance +// to change the keys and values as it goes. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { + newMap := make(map[string]interface{}) + for k, v := range m { + modifiedKey, modifiedVal := transformer(k, v) + newMap[modifiedKey] = modifiedVal + } + return New(newMap) +} + +// TransformKeys builds a new map using the specified key mapping. +// +// Unspecified keys will be unaltered. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) TransformKeys(mapping map[string]string) Map { + return m.Transform(func(key string, value interface{}) (string, interface{}) { + + if newKey, ok := mapping[key]; ok { + return newKey, value + } + + return key, value + }) +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go new file mode 100644 index 00000000..e20ee23b --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go @@ -0,0 +1,77 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestExclude(t *testing.T) { + + d := make(Map) + d["name"] = "Mat" + d["age"] = 29 + d["secret"] = "ABC" + + excluded := d.Exclude([]string{"secret"}) + + assert.Equal(t, d["name"], excluded["name"]) + assert.Equal(t, d["age"], excluded["age"]) + assert.False(t, excluded.Has("secret"), "secret should be excluded") + +} + +func TestCopy(t *testing.T) { + + d1 := make(map[string]interface{}) + d1["name"] = "Tyler" + d1["location"] = "UT" + + d1Obj := New(d1) + d2Obj := d1Obj.Copy() + + d2Obj["name"] = "Mat" + + assert.Equal(t, d1Obj.Get("name").Str(), "Tyler") + assert.Equal(t, d2Obj.Get("name").Str(), "Mat") + +} + +func TestMerge(t *testing.T) { + + d := make(map[string]interface{}) + d["name"] = "Mat" + + d1 := make(map[string]interface{}) + d1["name"] = "Tyler" + d1["location"] = "UT" + + dObj := New(d) + d1Obj := New(d1) + + merged := dObj.Merge(d1Obj) + + assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str()) + assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str()) + assert.Empty(t, dObj.Get("location").Str()) + +} + +func TestMergeHere(t *testing.T) { + + d := make(map[string]interface{}) + d["name"] = "Mat" + + d1 := make(map[string]interface{}) + d1["name"] = "Tyler" + d1["location"] = "UT" + + dObj := New(d) + d1Obj := New(d1) + + merged := dObj.MergeHere(d1Obj) + + assert.Equal(t, dObj, merged, "With MergeHere, it should return the first modified map") + assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str()) + assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str()) + assert.Equal(t, merged.Get("location").Str(), dObj.Get("location").Str()) +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/security.go b/Godeps/_workspace/src/github.com/stretchr/objx/security.go new file mode 100644 index 00000000..fdd6be9c --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/security.go @@ -0,0 +1,14 @@ +package objx + +import ( + "crypto/sha1" + "encoding/hex" +) + +// HashWithKey hashes the specified string using the security +// key. +func HashWithKey(data, key string) string { + hash := sha1.New() + hash.Write([]byte(data + ":" + key)) + return hex.EncodeToString(hash.Sum(nil)) +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/security_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/security_test.go new file mode 100644 index 00000000..8f0898f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/security_test.go @@ -0,0 +1,12 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestHashWithKey(t *testing.T) { + + assert.Equal(t, "0ce84d8d01f2c7b6e0882b784429c54d280ea2d9", HashWithKey("abc", "def")) + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go new file mode 100644 index 00000000..5408c7fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go @@ -0,0 +1,41 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSimpleExample(t *testing.T) { + + // build a map from a JSON object + o := MustFromJSON(`{"name":"Mat","foods":["indian","chinese"], "location":{"county":"hobbiton","city":"the shire"}}`) + + // Map can be used as a straight map[string]interface{} + assert.Equal(t, o["name"], "Mat") + + // Get an Value object + v := o.Get("name") + assert.Equal(t, v, &Value{data: "Mat"}) + + // Test the contained value + assert.False(t, v.IsInt()) + assert.False(t, v.IsBool()) + assert.True(t, v.IsStr()) + + // Get the contained value + assert.Equal(t, v.Str(), "Mat") + + // Get a default value if the contained value is not of the expected type or does not exist + assert.Equal(t, 1, v.Int(1)) + + // Get a value by using array notation + assert.Equal(t, "indian", o.Get("foods[0]").Data()) + + // Set a value by using array notation + o.Set("foods[0]", "italian") + assert.Equal(t, "italian", o.Get("foods[0]").Str()) + + // Get a value by using dot notation + assert.Equal(t, "hobbiton", o.Get("location.county").Str()) + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/tests.go b/Godeps/_workspace/src/github.com/stretchr/objx/tests.go new file mode 100644 index 00000000..d9e0b479 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/tests.go @@ -0,0 +1,17 @@ +package objx + +// Has gets whether there is something at the specified selector +// or not. +// +// If m is nil, Has will always return false. +func (m Map) Has(selector string) bool { + if m == nil { + return false + } + return !m.Get(selector).IsNil() +} + +// IsNil gets whether the data is nil or not. +func (v *Value) IsNil() bool { + return v == nil || v.data == nil +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go new file mode 100644 index 00000000..bcc1eb03 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go @@ -0,0 +1,24 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestHas(t *testing.T) { + + m := New(TestMap) + + assert.True(t, m.Has("name")) + assert.True(t, m.Has("address.state")) + assert.True(t, m.Has("numbers[4]")) + + assert.False(t, m.Has("address.state.nope")) + assert.False(t, m.Has("address.nope")) + assert.False(t, m.Has("nope")) + assert.False(t, m.Has("numbers[5]")) + + m = nil + assert.False(t, m.Has("nothing")) + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go b/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go new file mode 100644 index 00000000..f3ecb29b --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go @@ -0,0 +1,2881 @@ +package objx + +/* + Inter (interface{} and []interface{}) + -------------------------------------------------- +*/ + +// Inter gets the value as a interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Inter(optionalDefault ...interface{}) interface{} { + if s, ok := v.data.(interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInter gets the value as a interface{}. +// +// Panics if the object is not a interface{}. +func (v *Value) MustInter() interface{} { + return v.data.(interface{}) +} + +// InterSlice gets the value as a []interface{}, returns the optionalDefault +// value or nil if the value is not a []interface{}. +func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { + if s, ok := v.data.([]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInterSlice gets the value as a []interface{}. +// +// Panics if the object is not a []interface{}. +func (v *Value) MustInterSlice() []interface{} { + return v.data.([]interface{}) +} + +// IsInter gets whether the object contained is a interface{} or not. +func (v *Value) IsInter() bool { + _, ok := v.data.(interface{}) + return ok +} + +// IsInterSlice gets whether the object contained is a []interface{} or not. +func (v *Value) IsInterSlice() bool { + _, ok := v.data.([]interface{}) + return ok +} + +// EachInter calls the specified callback for each object +// in the []interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { + + for index, val := range v.MustInterSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInter uses the specified decider function to select items +// from the []interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { + + var selected []interface{} + + v.EachInter(func(index int, val interface{}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInter uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]interface{}. +func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { + + groups := make(map[string][]interface{}) + + v.EachInter(func(index int, val interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInter uses the specified function to replace each interface{}s +// by iterating each item. The data in the returned result will be a +// []interface{} containing the replaced items. +func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { + + arr := v.MustInterSlice() + replaced := make([]interface{}, len(arr)) + + v.EachInter(func(index int, val interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInter uses the specified collector function to collect a value +// for each of the interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { + + arr := v.MustInterSlice() + collected := make([]interface{}, len(arr)) + + v.EachInter(func(index int, val interface{}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + MSI (map[string]interface{} and []map[string]interface{}) + -------------------------------------------------- +*/ + +// MSI gets the value as a map[string]interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSI gets the value as a map[string]interface{}. +// +// Panics if the object is not a map[string]interface{}. +func (v *Value) MustMSI() map[string]interface{} { + return v.data.(map[string]interface{}) +} + +// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault +// value or nil if the value is not a []map[string]interface{}. +func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { + if s, ok := v.data.([]map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSISlice gets the value as a []map[string]interface{}. +// +// Panics if the object is not a []map[string]interface{}. +func (v *Value) MustMSISlice() []map[string]interface{} { + return v.data.([]map[string]interface{}) +} + +// IsMSI gets whether the object contained is a map[string]interface{} or not. +func (v *Value) IsMSI() bool { + _, ok := v.data.(map[string]interface{}) + return ok +} + +// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. +func (v *Value) IsMSISlice() bool { + _, ok := v.data.([]map[string]interface{}) + return ok +} + +// EachMSI calls the specified callback for each object +// in the []map[string]interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { + + for index, val := range v.MustMSISlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereMSI uses the specified decider function to select items +// from the []map[string]interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { + + var selected []map[string]interface{} + + v.EachMSI(func(index int, val map[string]interface{}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupMSI uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]map[string]interface{}. +func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { + + groups := make(map[string][]map[string]interface{}) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]map[string]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceMSI uses the specified function to replace each map[string]interface{}s +// by iterating each item. The data in the returned result will be a +// []map[string]interface{} containing the replaced items. +func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { + + arr := v.MustMSISlice() + replaced := make([]map[string]interface{}, len(arr)) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectMSI uses the specified collector function to collect a value +// for each of the map[string]interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { + + arr := v.MustMSISlice() + collected := make([]interface{}, len(arr)) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + ObjxMap ((Map) and [](Map)) + -------------------------------------------------- +*/ + +// ObjxMap gets the value as a (Map), returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { + if s, ok := v.data.((Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return New(nil) +} + +// MustObjxMap gets the value as a (Map). +// +// Panics if the object is not a (Map). +func (v *Value) MustObjxMap() Map { + return v.data.((Map)) +} + +// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault +// value or nil if the value is not a [](Map). +func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { + if s, ok := v.data.([](Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustObjxMapSlice gets the value as a [](Map). +// +// Panics if the object is not a [](Map). +func (v *Value) MustObjxMapSlice() [](Map) { + return v.data.([](Map)) +} + +// IsObjxMap gets whether the object contained is a (Map) or not. +func (v *Value) IsObjxMap() bool { + _, ok := v.data.((Map)) + return ok +} + +// IsObjxMapSlice gets whether the object contained is a [](Map) or not. +func (v *Value) IsObjxMapSlice() bool { + _, ok := v.data.([](Map)) + return ok +} + +// EachObjxMap calls the specified callback for each object +// in the [](Map). +// +// Panics if the object is the wrong type. +func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { + + for index, val := range v.MustObjxMapSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereObjxMap uses the specified decider function to select items +// from the [](Map). The object contained in the result will contain +// only the selected items. +func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { + + var selected [](Map) + + v.EachObjxMap(func(index int, val Map) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupObjxMap uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][](Map). +func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { + + groups := make(map[string][](Map)) + + v.EachObjxMap(func(index int, val Map) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([](Map), 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceObjxMap uses the specified function to replace each (Map)s +// by iterating each item. The data in the returned result will be a +// [](Map) containing the replaced items. +func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { + + arr := v.MustObjxMapSlice() + replaced := make([](Map), len(arr)) + + v.EachObjxMap(func(index int, val Map) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectObjxMap uses the specified collector function to collect a value +// for each of the (Map)s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { + + arr := v.MustObjxMapSlice() + collected := make([]interface{}, len(arr)) + + v.EachObjxMap(func(index int, val Map) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Bool (bool and []bool) + -------------------------------------------------- +*/ + +// Bool gets the value as a bool, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Bool(optionalDefault ...bool) bool { + if s, ok := v.data.(bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return false +} + +// MustBool gets the value as a bool. +// +// Panics if the object is not a bool. +func (v *Value) MustBool() bool { + return v.data.(bool) +} + +// BoolSlice gets the value as a []bool, returns the optionalDefault +// value or nil if the value is not a []bool. +func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { + if s, ok := v.data.([]bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustBoolSlice gets the value as a []bool. +// +// Panics if the object is not a []bool. +func (v *Value) MustBoolSlice() []bool { + return v.data.([]bool) +} + +// IsBool gets whether the object contained is a bool or not. +func (v *Value) IsBool() bool { + _, ok := v.data.(bool) + return ok +} + +// IsBoolSlice gets whether the object contained is a []bool or not. +func (v *Value) IsBoolSlice() bool { + _, ok := v.data.([]bool) + return ok +} + +// EachBool calls the specified callback for each object +// in the []bool. +// +// Panics if the object is the wrong type. +func (v *Value) EachBool(callback func(int, bool) bool) *Value { + + for index, val := range v.MustBoolSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereBool uses the specified decider function to select items +// from the []bool. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereBool(decider func(int, bool) bool) *Value { + + var selected []bool + + v.EachBool(func(index int, val bool) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupBool uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]bool. +func (v *Value) GroupBool(grouper func(int, bool) string) *Value { + + groups := make(map[string][]bool) + + v.EachBool(func(index int, val bool) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]bool, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceBool uses the specified function to replace each bools +// by iterating each item. The data in the returned result will be a +// []bool containing the replaced items. +func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { + + arr := v.MustBoolSlice() + replaced := make([]bool, len(arr)) + + v.EachBool(func(index int, val bool) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectBool uses the specified collector function to collect a value +// for each of the bools in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { + + arr := v.MustBoolSlice() + collected := make([]interface{}, len(arr)) + + v.EachBool(func(index int, val bool) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Str (string and []string) + -------------------------------------------------- +*/ + +// Str gets the value as a string, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Str(optionalDefault ...string) string { + if s, ok := v.data.(string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return "" +} + +// MustStr gets the value as a string. +// +// Panics if the object is not a string. +func (v *Value) MustStr() string { + return v.data.(string) +} + +// StrSlice gets the value as a []string, returns the optionalDefault +// value or nil if the value is not a []string. +func (v *Value) StrSlice(optionalDefault ...[]string) []string { + if s, ok := v.data.([]string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustStrSlice gets the value as a []string. +// +// Panics if the object is not a []string. +func (v *Value) MustStrSlice() []string { + return v.data.([]string) +} + +// IsStr gets whether the object contained is a string or not. +func (v *Value) IsStr() bool { + _, ok := v.data.(string) + return ok +} + +// IsStrSlice gets whether the object contained is a []string or not. +func (v *Value) IsStrSlice() bool { + _, ok := v.data.([]string) + return ok +} + +// EachStr calls the specified callback for each object +// in the []string. +// +// Panics if the object is the wrong type. +func (v *Value) EachStr(callback func(int, string) bool) *Value { + + for index, val := range v.MustStrSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereStr uses the specified decider function to select items +// from the []string. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereStr(decider func(int, string) bool) *Value { + + var selected []string + + v.EachStr(func(index int, val string) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupStr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]string. +func (v *Value) GroupStr(grouper func(int, string) string) *Value { + + groups := make(map[string][]string) + + v.EachStr(func(index int, val string) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]string, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceStr uses the specified function to replace each strings +// by iterating each item. The data in the returned result will be a +// []string containing the replaced items. +func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { + + arr := v.MustStrSlice() + replaced := make([]string, len(arr)) + + v.EachStr(func(index int, val string) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectStr uses the specified collector function to collect a value +// for each of the strings in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { + + arr := v.MustStrSlice() + collected := make([]interface{}, len(arr)) + + v.EachStr(func(index int, val string) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int (int and []int) + -------------------------------------------------- +*/ + +// Int gets the value as a int, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int(optionalDefault ...int) int { + if s, ok := v.data.(int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt gets the value as a int. +// +// Panics if the object is not a int. +func (v *Value) MustInt() int { + return v.data.(int) +} + +// IntSlice gets the value as a []int, returns the optionalDefault +// value or nil if the value is not a []int. +func (v *Value) IntSlice(optionalDefault ...[]int) []int { + if s, ok := v.data.([]int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustIntSlice gets the value as a []int. +// +// Panics if the object is not a []int. +func (v *Value) MustIntSlice() []int { + return v.data.([]int) +} + +// IsInt gets whether the object contained is a int or not. +func (v *Value) IsInt() bool { + _, ok := v.data.(int) + return ok +} + +// IsIntSlice gets whether the object contained is a []int or not. +func (v *Value) IsIntSlice() bool { + _, ok := v.data.([]int) + return ok +} + +// EachInt calls the specified callback for each object +// in the []int. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt(callback func(int, int) bool) *Value { + + for index, val := range v.MustIntSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt uses the specified decider function to select items +// from the []int. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt(decider func(int, int) bool) *Value { + + var selected []int + + v.EachInt(func(index int, val int) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int. +func (v *Value) GroupInt(grouper func(int, int) string) *Value { + + groups := make(map[string][]int) + + v.EachInt(func(index int, val int) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt uses the specified function to replace each ints +// by iterating each item. The data in the returned result will be a +// []int containing the replaced items. +func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { + + arr := v.MustIntSlice() + replaced := make([]int, len(arr)) + + v.EachInt(func(index int, val int) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt uses the specified collector function to collect a value +// for each of the ints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { + + arr := v.MustIntSlice() + collected := make([]interface{}, len(arr)) + + v.EachInt(func(index int, val int) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int8 (int8 and []int8) + -------------------------------------------------- +*/ + +// Int8 gets the value as a int8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int8(optionalDefault ...int8) int8 { + if s, ok := v.data.(int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt8 gets the value as a int8. +// +// Panics if the object is not a int8. +func (v *Value) MustInt8() int8 { + return v.data.(int8) +} + +// Int8Slice gets the value as a []int8, returns the optionalDefault +// value or nil if the value is not a []int8. +func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { + if s, ok := v.data.([]int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt8Slice gets the value as a []int8. +// +// Panics if the object is not a []int8. +func (v *Value) MustInt8Slice() []int8 { + return v.data.([]int8) +} + +// IsInt8 gets whether the object contained is a int8 or not. +func (v *Value) IsInt8() bool { + _, ok := v.data.(int8) + return ok +} + +// IsInt8Slice gets whether the object contained is a []int8 or not. +func (v *Value) IsInt8Slice() bool { + _, ok := v.data.([]int8) + return ok +} + +// EachInt8 calls the specified callback for each object +// in the []int8. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt8(callback func(int, int8) bool) *Value { + + for index, val := range v.MustInt8Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt8 uses the specified decider function to select items +// from the []int8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { + + var selected []int8 + + v.EachInt8(func(index int, val int8) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int8. +func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { + + groups := make(map[string][]int8) + + v.EachInt8(func(index int, val int8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt8 uses the specified function to replace each int8s +// by iterating each item. The data in the returned result will be a +// []int8 containing the replaced items. +func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { + + arr := v.MustInt8Slice() + replaced := make([]int8, len(arr)) + + v.EachInt8(func(index int, val int8) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt8 uses the specified collector function to collect a value +// for each of the int8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { + + arr := v.MustInt8Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt8(func(index int, val int8) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int16 (int16 and []int16) + -------------------------------------------------- +*/ + +// Int16 gets the value as a int16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int16(optionalDefault ...int16) int16 { + if s, ok := v.data.(int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt16 gets the value as a int16. +// +// Panics if the object is not a int16. +func (v *Value) MustInt16() int16 { + return v.data.(int16) +} + +// Int16Slice gets the value as a []int16, returns the optionalDefault +// value or nil if the value is not a []int16. +func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { + if s, ok := v.data.([]int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt16Slice gets the value as a []int16. +// +// Panics if the object is not a []int16. +func (v *Value) MustInt16Slice() []int16 { + return v.data.([]int16) +} + +// IsInt16 gets whether the object contained is a int16 or not. +func (v *Value) IsInt16() bool { + _, ok := v.data.(int16) + return ok +} + +// IsInt16Slice gets whether the object contained is a []int16 or not. +func (v *Value) IsInt16Slice() bool { + _, ok := v.data.([]int16) + return ok +} + +// EachInt16 calls the specified callback for each object +// in the []int16. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt16(callback func(int, int16) bool) *Value { + + for index, val := range v.MustInt16Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt16 uses the specified decider function to select items +// from the []int16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { + + var selected []int16 + + v.EachInt16(func(index int, val int16) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int16. +func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { + + groups := make(map[string][]int16) + + v.EachInt16(func(index int, val int16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt16 uses the specified function to replace each int16s +// by iterating each item. The data in the returned result will be a +// []int16 containing the replaced items. +func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { + + arr := v.MustInt16Slice() + replaced := make([]int16, len(arr)) + + v.EachInt16(func(index int, val int16) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt16 uses the specified collector function to collect a value +// for each of the int16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { + + arr := v.MustInt16Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt16(func(index int, val int16) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int32 (int32 and []int32) + -------------------------------------------------- +*/ + +// Int32 gets the value as a int32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int32(optionalDefault ...int32) int32 { + if s, ok := v.data.(int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt32 gets the value as a int32. +// +// Panics if the object is not a int32. +func (v *Value) MustInt32() int32 { + return v.data.(int32) +} + +// Int32Slice gets the value as a []int32, returns the optionalDefault +// value or nil if the value is not a []int32. +func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { + if s, ok := v.data.([]int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt32Slice gets the value as a []int32. +// +// Panics if the object is not a []int32. +func (v *Value) MustInt32Slice() []int32 { + return v.data.([]int32) +} + +// IsInt32 gets whether the object contained is a int32 or not. +func (v *Value) IsInt32() bool { + _, ok := v.data.(int32) + return ok +} + +// IsInt32Slice gets whether the object contained is a []int32 or not. +func (v *Value) IsInt32Slice() bool { + _, ok := v.data.([]int32) + return ok +} + +// EachInt32 calls the specified callback for each object +// in the []int32. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt32(callback func(int, int32) bool) *Value { + + for index, val := range v.MustInt32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt32 uses the specified decider function to select items +// from the []int32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { + + var selected []int32 + + v.EachInt32(func(index int, val int32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int32. +func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { + + groups := make(map[string][]int32) + + v.EachInt32(func(index int, val int32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt32 uses the specified function to replace each int32s +// by iterating each item. The data in the returned result will be a +// []int32 containing the replaced items. +func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { + + arr := v.MustInt32Slice() + replaced := make([]int32, len(arr)) + + v.EachInt32(func(index int, val int32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt32 uses the specified collector function to collect a value +// for each of the int32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { + + arr := v.MustInt32Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt32(func(index int, val int32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int64 (int64 and []int64) + -------------------------------------------------- +*/ + +// Int64 gets the value as a int64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int64(optionalDefault ...int64) int64 { + if s, ok := v.data.(int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt64 gets the value as a int64. +// +// Panics if the object is not a int64. +func (v *Value) MustInt64() int64 { + return v.data.(int64) +} + +// Int64Slice gets the value as a []int64, returns the optionalDefault +// value or nil if the value is not a []int64. +func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { + if s, ok := v.data.([]int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt64Slice gets the value as a []int64. +// +// Panics if the object is not a []int64. +func (v *Value) MustInt64Slice() []int64 { + return v.data.([]int64) +} + +// IsInt64 gets whether the object contained is a int64 or not. +func (v *Value) IsInt64() bool { + _, ok := v.data.(int64) + return ok +} + +// IsInt64Slice gets whether the object contained is a []int64 or not. +func (v *Value) IsInt64Slice() bool { + _, ok := v.data.([]int64) + return ok +} + +// EachInt64 calls the specified callback for each object +// in the []int64. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt64(callback func(int, int64) bool) *Value { + + for index, val := range v.MustInt64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt64 uses the specified decider function to select items +// from the []int64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { + + var selected []int64 + + v.EachInt64(func(index int, val int64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int64. +func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { + + groups := make(map[string][]int64) + + v.EachInt64(func(index int, val int64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt64 uses the specified function to replace each int64s +// by iterating each item. The data in the returned result will be a +// []int64 containing the replaced items. +func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { + + arr := v.MustInt64Slice() + replaced := make([]int64, len(arr)) + + v.EachInt64(func(index int, val int64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt64 uses the specified collector function to collect a value +// for each of the int64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { + + arr := v.MustInt64Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt64(func(index int, val int64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint (uint and []uint) + -------------------------------------------------- +*/ + +// Uint gets the value as a uint, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint(optionalDefault ...uint) uint { + if s, ok := v.data.(uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint gets the value as a uint. +// +// Panics if the object is not a uint. +func (v *Value) MustUint() uint { + return v.data.(uint) +} + +// UintSlice gets the value as a []uint, returns the optionalDefault +// value or nil if the value is not a []uint. +func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { + if s, ok := v.data.([]uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintSlice gets the value as a []uint. +// +// Panics if the object is not a []uint. +func (v *Value) MustUintSlice() []uint { + return v.data.([]uint) +} + +// IsUint gets whether the object contained is a uint or not. +func (v *Value) IsUint() bool { + _, ok := v.data.(uint) + return ok +} + +// IsUintSlice gets whether the object contained is a []uint or not. +func (v *Value) IsUintSlice() bool { + _, ok := v.data.([]uint) + return ok +} + +// EachUint calls the specified callback for each object +// in the []uint. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint(callback func(int, uint) bool) *Value { + + for index, val := range v.MustUintSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint uses the specified decider function to select items +// from the []uint. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint(decider func(int, uint) bool) *Value { + + var selected []uint + + v.EachUint(func(index int, val uint) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint. +func (v *Value) GroupUint(grouper func(int, uint) string) *Value { + + groups := make(map[string][]uint) + + v.EachUint(func(index int, val uint) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint uses the specified function to replace each uints +// by iterating each item. The data in the returned result will be a +// []uint containing the replaced items. +func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { + + arr := v.MustUintSlice() + replaced := make([]uint, len(arr)) + + v.EachUint(func(index int, val uint) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint uses the specified collector function to collect a value +// for each of the uints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { + + arr := v.MustUintSlice() + collected := make([]interface{}, len(arr)) + + v.EachUint(func(index int, val uint) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint8 (uint8 and []uint8) + -------------------------------------------------- +*/ + +// Uint8 gets the value as a uint8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint8(optionalDefault ...uint8) uint8 { + if s, ok := v.data.(uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint8 gets the value as a uint8. +// +// Panics if the object is not a uint8. +func (v *Value) MustUint8() uint8 { + return v.data.(uint8) +} + +// Uint8Slice gets the value as a []uint8, returns the optionalDefault +// value or nil if the value is not a []uint8. +func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { + if s, ok := v.data.([]uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint8Slice gets the value as a []uint8. +// +// Panics if the object is not a []uint8. +func (v *Value) MustUint8Slice() []uint8 { + return v.data.([]uint8) +} + +// IsUint8 gets whether the object contained is a uint8 or not. +func (v *Value) IsUint8() bool { + _, ok := v.data.(uint8) + return ok +} + +// IsUint8Slice gets whether the object contained is a []uint8 or not. +func (v *Value) IsUint8Slice() bool { + _, ok := v.data.([]uint8) + return ok +} + +// EachUint8 calls the specified callback for each object +// in the []uint8. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { + + for index, val := range v.MustUint8Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint8 uses the specified decider function to select items +// from the []uint8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { + + var selected []uint8 + + v.EachUint8(func(index int, val uint8) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint8. +func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { + + groups := make(map[string][]uint8) + + v.EachUint8(func(index int, val uint8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint8 uses the specified function to replace each uint8s +// by iterating each item. The data in the returned result will be a +// []uint8 containing the replaced items. +func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { + + arr := v.MustUint8Slice() + replaced := make([]uint8, len(arr)) + + v.EachUint8(func(index int, val uint8) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint8 uses the specified collector function to collect a value +// for each of the uint8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { + + arr := v.MustUint8Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint8(func(index int, val uint8) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint16 (uint16 and []uint16) + -------------------------------------------------- +*/ + +// Uint16 gets the value as a uint16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint16(optionalDefault ...uint16) uint16 { + if s, ok := v.data.(uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint16 gets the value as a uint16. +// +// Panics if the object is not a uint16. +func (v *Value) MustUint16() uint16 { + return v.data.(uint16) +} + +// Uint16Slice gets the value as a []uint16, returns the optionalDefault +// value or nil if the value is not a []uint16. +func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { + if s, ok := v.data.([]uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint16Slice gets the value as a []uint16. +// +// Panics if the object is not a []uint16. +func (v *Value) MustUint16Slice() []uint16 { + return v.data.([]uint16) +} + +// IsUint16 gets whether the object contained is a uint16 or not. +func (v *Value) IsUint16() bool { + _, ok := v.data.(uint16) + return ok +} + +// IsUint16Slice gets whether the object contained is a []uint16 or not. +func (v *Value) IsUint16Slice() bool { + _, ok := v.data.([]uint16) + return ok +} + +// EachUint16 calls the specified callback for each object +// in the []uint16. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { + + for index, val := range v.MustUint16Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint16 uses the specified decider function to select items +// from the []uint16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { + + var selected []uint16 + + v.EachUint16(func(index int, val uint16) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint16. +func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { + + groups := make(map[string][]uint16) + + v.EachUint16(func(index int, val uint16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint16 uses the specified function to replace each uint16s +// by iterating each item. The data in the returned result will be a +// []uint16 containing the replaced items. +func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { + + arr := v.MustUint16Slice() + replaced := make([]uint16, len(arr)) + + v.EachUint16(func(index int, val uint16) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint16 uses the specified collector function to collect a value +// for each of the uint16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { + + arr := v.MustUint16Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint16(func(index int, val uint16) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint32 (uint32 and []uint32) + -------------------------------------------------- +*/ + +// Uint32 gets the value as a uint32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint32(optionalDefault ...uint32) uint32 { + if s, ok := v.data.(uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint32 gets the value as a uint32. +// +// Panics if the object is not a uint32. +func (v *Value) MustUint32() uint32 { + return v.data.(uint32) +} + +// Uint32Slice gets the value as a []uint32, returns the optionalDefault +// value or nil if the value is not a []uint32. +func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { + if s, ok := v.data.([]uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint32Slice gets the value as a []uint32. +// +// Panics if the object is not a []uint32. +func (v *Value) MustUint32Slice() []uint32 { + return v.data.([]uint32) +} + +// IsUint32 gets whether the object contained is a uint32 or not. +func (v *Value) IsUint32() bool { + _, ok := v.data.(uint32) + return ok +} + +// IsUint32Slice gets whether the object contained is a []uint32 or not. +func (v *Value) IsUint32Slice() bool { + _, ok := v.data.([]uint32) + return ok +} + +// EachUint32 calls the specified callback for each object +// in the []uint32. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { + + for index, val := range v.MustUint32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint32 uses the specified decider function to select items +// from the []uint32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { + + var selected []uint32 + + v.EachUint32(func(index int, val uint32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint32. +func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { + + groups := make(map[string][]uint32) + + v.EachUint32(func(index int, val uint32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint32 uses the specified function to replace each uint32s +// by iterating each item. The data in the returned result will be a +// []uint32 containing the replaced items. +func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { + + arr := v.MustUint32Slice() + replaced := make([]uint32, len(arr)) + + v.EachUint32(func(index int, val uint32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint32 uses the specified collector function to collect a value +// for each of the uint32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { + + arr := v.MustUint32Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint32(func(index int, val uint32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint64 (uint64 and []uint64) + -------------------------------------------------- +*/ + +// Uint64 gets the value as a uint64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint64(optionalDefault ...uint64) uint64 { + if s, ok := v.data.(uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint64 gets the value as a uint64. +// +// Panics if the object is not a uint64. +func (v *Value) MustUint64() uint64 { + return v.data.(uint64) +} + +// Uint64Slice gets the value as a []uint64, returns the optionalDefault +// value or nil if the value is not a []uint64. +func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { + if s, ok := v.data.([]uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint64Slice gets the value as a []uint64. +// +// Panics if the object is not a []uint64. +func (v *Value) MustUint64Slice() []uint64 { + return v.data.([]uint64) +} + +// IsUint64 gets whether the object contained is a uint64 or not. +func (v *Value) IsUint64() bool { + _, ok := v.data.(uint64) + return ok +} + +// IsUint64Slice gets whether the object contained is a []uint64 or not. +func (v *Value) IsUint64Slice() bool { + _, ok := v.data.([]uint64) + return ok +} + +// EachUint64 calls the specified callback for each object +// in the []uint64. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { + + for index, val := range v.MustUint64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint64 uses the specified decider function to select items +// from the []uint64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { + + var selected []uint64 + + v.EachUint64(func(index int, val uint64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint64. +func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { + + groups := make(map[string][]uint64) + + v.EachUint64(func(index int, val uint64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint64 uses the specified function to replace each uint64s +// by iterating each item. The data in the returned result will be a +// []uint64 containing the replaced items. +func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { + + arr := v.MustUint64Slice() + replaced := make([]uint64, len(arr)) + + v.EachUint64(func(index int, val uint64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint64 uses the specified collector function to collect a value +// for each of the uint64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { + + arr := v.MustUint64Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint64(func(index int, val uint64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uintptr (uintptr and []uintptr) + -------------------------------------------------- +*/ + +// Uintptr gets the value as a uintptr, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { + if s, ok := v.data.(uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUintptr gets the value as a uintptr. +// +// Panics if the object is not a uintptr. +func (v *Value) MustUintptr() uintptr { + return v.data.(uintptr) +} + +// UintptrSlice gets the value as a []uintptr, returns the optionalDefault +// value or nil if the value is not a []uintptr. +func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { + if s, ok := v.data.([]uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintptrSlice gets the value as a []uintptr. +// +// Panics if the object is not a []uintptr. +func (v *Value) MustUintptrSlice() []uintptr { + return v.data.([]uintptr) +} + +// IsUintptr gets whether the object contained is a uintptr or not. +func (v *Value) IsUintptr() bool { + _, ok := v.data.(uintptr) + return ok +} + +// IsUintptrSlice gets whether the object contained is a []uintptr or not. +func (v *Value) IsUintptrSlice() bool { + _, ok := v.data.([]uintptr) + return ok +} + +// EachUintptr calls the specified callback for each object +// in the []uintptr. +// +// Panics if the object is the wrong type. +func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { + + for index, val := range v.MustUintptrSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUintptr uses the specified decider function to select items +// from the []uintptr. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { + + var selected []uintptr + + v.EachUintptr(func(index int, val uintptr) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUintptr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uintptr. +func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { + + groups := make(map[string][]uintptr) + + v.EachUintptr(func(index int, val uintptr) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uintptr, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUintptr uses the specified function to replace each uintptrs +// by iterating each item. The data in the returned result will be a +// []uintptr containing the replaced items. +func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { + + arr := v.MustUintptrSlice() + replaced := make([]uintptr, len(arr)) + + v.EachUintptr(func(index int, val uintptr) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUintptr uses the specified collector function to collect a value +// for each of the uintptrs in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { + + arr := v.MustUintptrSlice() + collected := make([]interface{}, len(arr)) + + v.EachUintptr(func(index int, val uintptr) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Float32 (float32 and []float32) + -------------------------------------------------- +*/ + +// Float32 gets the value as a float32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float32(optionalDefault ...float32) float32 { + if s, ok := v.data.(float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat32 gets the value as a float32. +// +// Panics if the object is not a float32. +func (v *Value) MustFloat32() float32 { + return v.data.(float32) +} + +// Float32Slice gets the value as a []float32, returns the optionalDefault +// value or nil if the value is not a []float32. +func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { + if s, ok := v.data.([]float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat32Slice gets the value as a []float32. +// +// Panics if the object is not a []float32. +func (v *Value) MustFloat32Slice() []float32 { + return v.data.([]float32) +} + +// IsFloat32 gets whether the object contained is a float32 or not. +func (v *Value) IsFloat32() bool { + _, ok := v.data.(float32) + return ok +} + +// IsFloat32Slice gets whether the object contained is a []float32 or not. +func (v *Value) IsFloat32Slice() bool { + _, ok := v.data.([]float32) + return ok +} + +// EachFloat32 calls the specified callback for each object +// in the []float32. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { + + for index, val := range v.MustFloat32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereFloat32 uses the specified decider function to select items +// from the []float32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { + + var selected []float32 + + v.EachFloat32(func(index int, val float32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupFloat32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float32. +func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { + + groups := make(map[string][]float32) + + v.EachFloat32(func(index int, val float32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceFloat32 uses the specified function to replace each float32s +// by iterating each item. The data in the returned result will be a +// []float32 containing the replaced items. +func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { + + arr := v.MustFloat32Slice() + replaced := make([]float32, len(arr)) + + v.EachFloat32(func(index int, val float32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectFloat32 uses the specified collector function to collect a value +// for each of the float32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { + + arr := v.MustFloat32Slice() + collected := make([]interface{}, len(arr)) + + v.EachFloat32(func(index int, val float32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Float64 (float64 and []float64) + -------------------------------------------------- +*/ + +// Float64 gets the value as a float64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float64(optionalDefault ...float64) float64 { + if s, ok := v.data.(float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat64 gets the value as a float64. +// +// Panics if the object is not a float64. +func (v *Value) MustFloat64() float64 { + return v.data.(float64) +} + +// Float64Slice gets the value as a []float64, returns the optionalDefault +// value or nil if the value is not a []float64. +func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { + if s, ok := v.data.([]float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat64Slice gets the value as a []float64. +// +// Panics if the object is not a []float64. +func (v *Value) MustFloat64Slice() []float64 { + return v.data.([]float64) +} + +// IsFloat64 gets whether the object contained is a float64 or not. +func (v *Value) IsFloat64() bool { + _, ok := v.data.(float64) + return ok +} + +// IsFloat64Slice gets whether the object contained is a []float64 or not. +func (v *Value) IsFloat64Slice() bool { + _, ok := v.data.([]float64) + return ok +} + +// EachFloat64 calls the specified callback for each object +// in the []float64. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { + + for index, val := range v.MustFloat64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereFloat64 uses the specified decider function to select items +// from the []float64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { + + var selected []float64 + + v.EachFloat64(func(index int, val float64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupFloat64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float64. +func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { + + groups := make(map[string][]float64) + + v.EachFloat64(func(index int, val float64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceFloat64 uses the specified function to replace each float64s +// by iterating each item. The data in the returned result will be a +// []float64 containing the replaced items. +func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { + + arr := v.MustFloat64Slice() + replaced := make([]float64, len(arr)) + + v.EachFloat64(func(index int, val float64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectFloat64 uses the specified collector function to collect a value +// for each of the float64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { + + arr := v.MustFloat64Slice() + collected := make([]interface{}, len(arr)) + + v.EachFloat64(func(index int, val float64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Complex64 (complex64 and []complex64) + -------------------------------------------------- +*/ + +// Complex64 gets the value as a complex64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex64(optionalDefault ...complex64) complex64 { + if s, ok := v.data.(complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex64 gets the value as a complex64. +// +// Panics if the object is not a complex64. +func (v *Value) MustComplex64() complex64 { + return v.data.(complex64) +} + +// Complex64Slice gets the value as a []complex64, returns the optionalDefault +// value or nil if the value is not a []complex64. +func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { + if s, ok := v.data.([]complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex64Slice gets the value as a []complex64. +// +// Panics if the object is not a []complex64. +func (v *Value) MustComplex64Slice() []complex64 { + return v.data.([]complex64) +} + +// IsComplex64 gets whether the object contained is a complex64 or not. +func (v *Value) IsComplex64() bool { + _, ok := v.data.(complex64) + return ok +} + +// IsComplex64Slice gets whether the object contained is a []complex64 or not. +func (v *Value) IsComplex64Slice() bool { + _, ok := v.data.([]complex64) + return ok +} + +// EachComplex64 calls the specified callback for each object +// in the []complex64. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { + + for index, val := range v.MustComplex64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereComplex64 uses the specified decider function to select items +// from the []complex64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { + + var selected []complex64 + + v.EachComplex64(func(index int, val complex64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupComplex64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex64. +func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { + + groups := make(map[string][]complex64) + + v.EachComplex64(func(index int, val complex64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceComplex64 uses the specified function to replace each complex64s +// by iterating each item. The data in the returned result will be a +// []complex64 containing the replaced items. +func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { + + arr := v.MustComplex64Slice() + replaced := make([]complex64, len(arr)) + + v.EachComplex64(func(index int, val complex64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectComplex64 uses the specified collector function to collect a value +// for each of the complex64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { + + arr := v.MustComplex64Slice() + collected := make([]interface{}, len(arr)) + + v.EachComplex64(func(index int, val complex64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Complex128 (complex128 and []complex128) + -------------------------------------------------- +*/ + +// Complex128 gets the value as a complex128, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex128(optionalDefault ...complex128) complex128 { + if s, ok := v.data.(complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex128 gets the value as a complex128. +// +// Panics if the object is not a complex128. +func (v *Value) MustComplex128() complex128 { + return v.data.(complex128) +} + +// Complex128Slice gets the value as a []complex128, returns the optionalDefault +// value or nil if the value is not a []complex128. +func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { + if s, ok := v.data.([]complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex128Slice gets the value as a []complex128. +// +// Panics if the object is not a []complex128. +func (v *Value) MustComplex128Slice() []complex128 { + return v.data.([]complex128) +} + +// IsComplex128 gets whether the object contained is a complex128 or not. +func (v *Value) IsComplex128() bool { + _, ok := v.data.(complex128) + return ok +} + +// IsComplex128Slice gets whether the object contained is a []complex128 or not. +func (v *Value) IsComplex128Slice() bool { + _, ok := v.data.([]complex128) + return ok +} + +// EachComplex128 calls the specified callback for each object +// in the []complex128. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { + + for index, val := range v.MustComplex128Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereComplex128 uses the specified decider function to select items +// from the []complex128. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { + + var selected []complex128 + + v.EachComplex128(func(index int, val complex128) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupComplex128 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex128. +func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { + + groups := make(map[string][]complex128) + + v.EachComplex128(func(index int, val complex128) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex128, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceComplex128 uses the specified function to replace each complex128s +// by iterating each item. The data in the returned result will be a +// []complex128 containing the replaced items. +func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { + + arr := v.MustComplex128Slice() + replaced := make([]complex128, len(arr)) + + v.EachComplex128(func(index int, val complex128) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectComplex128 uses the specified collector function to collect a value +// for each of the complex128s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { + + arr := v.MustComplex128Slice() + collected := make([]interface{}, len(arr)) + + v.EachComplex128(func(index int, val complex128) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go new file mode 100644 index 00000000..f7a4fcee --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go @@ -0,0 +1,2867 @@ +package objx + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "testing" +) + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInter(t *testing.T) { + + val := interface{}("something") + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Inter()) + assert.Equal(t, val, New(m).Get("value").MustInter()) + assert.Equal(t, interface{}(nil), New(m).Get("nothing").Inter()) + assert.Equal(t, val, New(m).Get("nothing").Inter("something")) + + assert.Panics(t, func() { + New(m).Get("age").MustInter() + }) + +} + +func TestInterSlice(t *testing.T) { + + val := interface{}("something") + m := map[string]interface{}{"value": []interface{}{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").InterSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInterSlice()[0]) + assert.Equal(t, []interface{}(nil), New(m).Get("nothing").InterSlice()) + assert.Equal(t, val, New(m).Get("nothing").InterSlice([]interface{}{interface{}("something")})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInterSlice() + }) + +} + +func TestIsInter(t *testing.T) { + + var v *Value + + v = &Value{data: interface{}("something")} + assert.True(t, v.IsInter()) + + v = &Value{data: []interface{}{interface{}("something")}} + assert.True(t, v.IsInterSlice()) + +} + +func TestEachInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + count := 0 + replacedVals := make([]interface{}, 0) + assert.Equal(t, v, v.EachInter(func(i int, val interface{}) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInterSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustInterSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustInterSlice()[2]) + +} + +func TestWhereInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + + selected := v.WhereInter(func(i int, val interface{}) bool { + return i%2 == 0 + }).MustInterSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + + grouped := v.GroupInter(func(i int, val interface{}) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]interface{}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + + rawArr := v.MustInterSlice() + + replaced := v.ReplaceInter(func(index int, val interface{}) interface{} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInterSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + + collected := v.CollectInter(func(index int, val interface{}) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestMSI(t *testing.T) { + + val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").MSI()) + assert.Equal(t, val, New(m).Get("value").MustMSI()) + assert.Equal(t, map[string]interface{}(nil), New(m).Get("nothing").MSI()) + assert.Equal(t, val, New(m).Get("nothing").MSI(map[string]interface{}{"name": "Tyler"})) + + assert.Panics(t, func() { + New(m).Get("age").MustMSI() + }) + +} + +func TestMSISlice(t *testing.T) { + + val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) + m := map[string]interface{}{"value": []map[string]interface{}{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").MSISlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustMSISlice()[0]) + assert.Equal(t, []map[string]interface{}(nil), New(m).Get("nothing").MSISlice()) + assert.Equal(t, val, New(m).Get("nothing").MSISlice([]map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustMSISlice() + }) + +} + +func TestIsMSI(t *testing.T) { + + var v *Value + + v = &Value{data: map[string]interface{}(map[string]interface{}{"name": "Tyler"})} + assert.True(t, v.IsMSI()) + + v = &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + assert.True(t, v.IsMSISlice()) + +} + +func TestEachMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + count := 0 + replacedVals := make([]map[string]interface{}, 0) + assert.Equal(t, v, v.EachMSI(func(i int, val map[string]interface{}) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustMSISlice()[0]) + assert.Equal(t, replacedVals[1], v.MustMSISlice()[1]) + assert.Equal(t, replacedVals[2], v.MustMSISlice()[2]) + +} + +func TestWhereMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + selected := v.WhereMSI(func(i int, val map[string]interface{}) bool { + return i%2 == 0 + }).MustMSISlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + grouped := v.GroupMSI(func(i int, val map[string]interface{}) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]map[string]interface{}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + rawArr := v.MustMSISlice() + + replaced := v.ReplaceMSI(func(index int, val map[string]interface{}) map[string]interface{} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustMSISlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + collected := v.CollectMSI(func(index int, val map[string]interface{}) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestObjxMap(t *testing.T) { + + val := (Map)(New(1)) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").ObjxMap()) + assert.Equal(t, val, New(m).Get("value").MustObjxMap()) + assert.Equal(t, (Map)(New(nil)), New(m).Get("nothing").ObjxMap()) + assert.Equal(t, val, New(m).Get("nothing").ObjxMap(New(1))) + + assert.Panics(t, func() { + New(m).Get("age").MustObjxMap() + }) + +} + +func TestObjxMapSlice(t *testing.T) { + + val := (Map)(New(1)) + m := map[string]interface{}{"value": [](Map){val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").ObjxMapSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustObjxMapSlice()[0]) + assert.Equal(t, [](Map)(nil), New(m).Get("nothing").ObjxMapSlice()) + assert.Equal(t, val, New(m).Get("nothing").ObjxMapSlice([](Map){(Map)(New(1))})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustObjxMapSlice() + }) + +} + +func TestIsObjxMap(t *testing.T) { + + var v *Value + + v = &Value{data: (Map)(New(1))} + assert.True(t, v.IsObjxMap()) + + v = &Value{data: [](Map){(Map)(New(1))}} + assert.True(t, v.IsObjxMapSlice()) + +} + +func TestEachObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + count := 0 + replacedVals := make([](Map), 0) + assert.Equal(t, v, v.EachObjxMap(func(i int, val Map) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustObjxMapSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustObjxMapSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustObjxMapSlice()[2]) + +} + +func TestWhereObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + + selected := v.WhereObjxMap(func(i int, val Map) bool { + return i%2 == 0 + }).MustObjxMapSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + + grouped := v.GroupObjxMap(func(i int, val Map) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][](Map)) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + + rawArr := v.MustObjxMapSlice() + + replaced := v.ReplaceObjxMap(func(index int, val Map) Map { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustObjxMapSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + + collected := v.CollectObjxMap(func(index int, val Map) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestBool(t *testing.T) { + + val := bool(true) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Bool()) + assert.Equal(t, val, New(m).Get("value").MustBool()) + assert.Equal(t, bool(false), New(m).Get("nothing").Bool()) + assert.Equal(t, val, New(m).Get("nothing").Bool(true)) + + assert.Panics(t, func() { + New(m).Get("age").MustBool() + }) + +} + +func TestBoolSlice(t *testing.T) { + + val := bool(true) + m := map[string]interface{}{"value": []bool{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").BoolSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustBoolSlice()[0]) + assert.Equal(t, []bool(nil), New(m).Get("nothing").BoolSlice()) + assert.Equal(t, val, New(m).Get("nothing").BoolSlice([]bool{bool(true)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustBoolSlice() + }) + +} + +func TestIsBool(t *testing.T) { + + var v *Value + + v = &Value{data: bool(true)} + assert.True(t, v.IsBool()) + + v = &Value{data: []bool{bool(true)}} + assert.True(t, v.IsBoolSlice()) + +} + +func TestEachBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true)}} + count := 0 + replacedVals := make([]bool, 0) + assert.Equal(t, v, v.EachBool(func(i int, val bool) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustBoolSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustBoolSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustBoolSlice()[2]) + +} + +func TestWhereBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + + selected := v.WhereBool(func(i int, val bool) bool { + return i%2 == 0 + }).MustBoolSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + + grouped := v.GroupBool(func(i int, val bool) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]bool) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + + rawArr := v.MustBoolSlice() + + replaced := v.ReplaceBool(func(index int, val bool) bool { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustBoolSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + + collected := v.CollectBool(func(index int, val bool) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestStr(t *testing.T) { + + val := string("hello") + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Str()) + assert.Equal(t, val, New(m).Get("value").MustStr()) + assert.Equal(t, string(""), New(m).Get("nothing").Str()) + assert.Equal(t, val, New(m).Get("nothing").Str("hello")) + + assert.Panics(t, func() { + New(m).Get("age").MustStr() + }) + +} + +func TestStrSlice(t *testing.T) { + + val := string("hello") + m := map[string]interface{}{"value": []string{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").StrSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustStrSlice()[0]) + assert.Equal(t, []string(nil), New(m).Get("nothing").StrSlice()) + assert.Equal(t, val, New(m).Get("nothing").StrSlice([]string{string("hello")})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustStrSlice() + }) + +} + +func TestIsStr(t *testing.T) { + + var v *Value + + v = &Value{data: string("hello")} + assert.True(t, v.IsStr()) + + v = &Value{data: []string{string("hello")}} + assert.True(t, v.IsStrSlice()) + +} + +func TestEachStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + count := 0 + replacedVals := make([]string, 0) + assert.Equal(t, v, v.EachStr(func(i int, val string) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustStrSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustStrSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustStrSlice()[2]) + +} + +func TestWhereStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + + selected := v.WhereStr(func(i int, val string) bool { + return i%2 == 0 + }).MustStrSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + + grouped := v.GroupStr(func(i int, val string) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]string) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + + rawArr := v.MustStrSlice() + + replaced := v.ReplaceStr(func(index int, val string) string { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustStrSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + + collected := v.CollectStr(func(index int, val string) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt(t *testing.T) { + + val := int(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int()) + assert.Equal(t, val, New(m).Get("value").MustInt()) + assert.Equal(t, int(0), New(m).Get("nothing").Int()) + assert.Equal(t, val, New(m).Get("nothing").Int(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt() + }) + +} + +func TestIntSlice(t *testing.T) { + + val := int(1) + m := map[string]interface{}{"value": []int{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").IntSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustIntSlice()[0]) + assert.Equal(t, []int(nil), New(m).Get("nothing").IntSlice()) + assert.Equal(t, val, New(m).Get("nothing").IntSlice([]int{int(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustIntSlice() + }) + +} + +func TestIsInt(t *testing.T) { + + var v *Value + + v = &Value{data: int(1)} + assert.True(t, v.IsInt()) + + v = &Value{data: []int{int(1)}} + assert.True(t, v.IsIntSlice()) + +} + +func TestEachInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1)}} + count := 0 + replacedVals := make([]int, 0) + assert.Equal(t, v, v.EachInt(func(i int, val int) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustIntSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustIntSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustIntSlice()[2]) + +} + +func TestWhereInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + + selected := v.WhereInt(func(i int, val int) bool { + return i%2 == 0 + }).MustIntSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + + grouped := v.GroupInt(func(i int, val int) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + + rawArr := v.MustIntSlice() + + replaced := v.ReplaceInt(func(index int, val int) int { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustIntSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + + collected := v.CollectInt(func(index int, val int) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt8(t *testing.T) { + + val := int8(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int8()) + assert.Equal(t, val, New(m).Get("value").MustInt8()) + assert.Equal(t, int8(0), New(m).Get("nothing").Int8()) + assert.Equal(t, val, New(m).Get("nothing").Int8(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt8() + }) + +} + +func TestInt8Slice(t *testing.T) { + + val := int8(1) + m := map[string]interface{}{"value": []int8{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int8Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInt8Slice()[0]) + assert.Equal(t, []int8(nil), New(m).Get("nothing").Int8Slice()) + assert.Equal(t, val, New(m).Get("nothing").Int8Slice([]int8{int8(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInt8Slice() + }) + +} + +func TestIsInt8(t *testing.T) { + + var v *Value + + v = &Value{data: int8(1)} + assert.True(t, v.IsInt8()) + + v = &Value{data: []int8{int8(1)}} + assert.True(t, v.IsInt8Slice()) + +} + +func TestEachInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1)}} + count := 0 + replacedVals := make([]int8, 0) + assert.Equal(t, v, v.EachInt8(func(i int, val int8) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInt8Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustInt8Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustInt8Slice()[2]) + +} + +func TestWhereInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + + selected := v.WhereInt8(func(i int, val int8) bool { + return i%2 == 0 + }).MustInt8Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + + grouped := v.GroupInt8(func(i int, val int8) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int8) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + + rawArr := v.MustInt8Slice() + + replaced := v.ReplaceInt8(func(index int, val int8) int8 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInt8Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + + collected := v.CollectInt8(func(index int, val int8) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt16(t *testing.T) { + + val := int16(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int16()) + assert.Equal(t, val, New(m).Get("value").MustInt16()) + assert.Equal(t, int16(0), New(m).Get("nothing").Int16()) + assert.Equal(t, val, New(m).Get("nothing").Int16(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt16() + }) + +} + +func TestInt16Slice(t *testing.T) { + + val := int16(1) + m := map[string]interface{}{"value": []int16{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int16Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInt16Slice()[0]) + assert.Equal(t, []int16(nil), New(m).Get("nothing").Int16Slice()) + assert.Equal(t, val, New(m).Get("nothing").Int16Slice([]int16{int16(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInt16Slice() + }) + +} + +func TestIsInt16(t *testing.T) { + + var v *Value + + v = &Value{data: int16(1)} + assert.True(t, v.IsInt16()) + + v = &Value{data: []int16{int16(1)}} + assert.True(t, v.IsInt16Slice()) + +} + +func TestEachInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1)}} + count := 0 + replacedVals := make([]int16, 0) + assert.Equal(t, v, v.EachInt16(func(i int, val int16) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInt16Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustInt16Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustInt16Slice()[2]) + +} + +func TestWhereInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + + selected := v.WhereInt16(func(i int, val int16) bool { + return i%2 == 0 + }).MustInt16Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + + grouped := v.GroupInt16(func(i int, val int16) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int16) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + + rawArr := v.MustInt16Slice() + + replaced := v.ReplaceInt16(func(index int, val int16) int16 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInt16Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + + collected := v.CollectInt16(func(index int, val int16) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt32(t *testing.T) { + + val := int32(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int32()) + assert.Equal(t, val, New(m).Get("value").MustInt32()) + assert.Equal(t, int32(0), New(m).Get("nothing").Int32()) + assert.Equal(t, val, New(m).Get("nothing").Int32(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt32() + }) + +} + +func TestInt32Slice(t *testing.T) { + + val := int32(1) + m := map[string]interface{}{"value": []int32{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int32Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInt32Slice()[0]) + assert.Equal(t, []int32(nil), New(m).Get("nothing").Int32Slice()) + assert.Equal(t, val, New(m).Get("nothing").Int32Slice([]int32{int32(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInt32Slice() + }) + +} + +func TestIsInt32(t *testing.T) { + + var v *Value + + v = &Value{data: int32(1)} + assert.True(t, v.IsInt32()) + + v = &Value{data: []int32{int32(1)}} + assert.True(t, v.IsInt32Slice()) + +} + +func TestEachInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1)}} + count := 0 + replacedVals := make([]int32, 0) + assert.Equal(t, v, v.EachInt32(func(i int, val int32) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInt32Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustInt32Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustInt32Slice()[2]) + +} + +func TestWhereInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + + selected := v.WhereInt32(func(i int, val int32) bool { + return i%2 == 0 + }).MustInt32Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + + grouped := v.GroupInt32(func(i int, val int32) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int32) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + + rawArr := v.MustInt32Slice() + + replaced := v.ReplaceInt32(func(index int, val int32) int32 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInt32Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + + collected := v.CollectInt32(func(index int, val int32) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt64(t *testing.T) { + + val := int64(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int64()) + assert.Equal(t, val, New(m).Get("value").MustInt64()) + assert.Equal(t, int64(0), New(m).Get("nothing").Int64()) + assert.Equal(t, val, New(m).Get("nothing").Int64(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt64() + }) + +} + +func TestInt64Slice(t *testing.T) { + + val := int64(1) + m := map[string]interface{}{"value": []int64{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int64Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInt64Slice()[0]) + assert.Equal(t, []int64(nil), New(m).Get("nothing").Int64Slice()) + assert.Equal(t, val, New(m).Get("nothing").Int64Slice([]int64{int64(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInt64Slice() + }) + +} + +func TestIsInt64(t *testing.T) { + + var v *Value + + v = &Value{data: int64(1)} + assert.True(t, v.IsInt64()) + + v = &Value{data: []int64{int64(1)}} + assert.True(t, v.IsInt64Slice()) + +} + +func TestEachInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1)}} + count := 0 + replacedVals := make([]int64, 0) + assert.Equal(t, v, v.EachInt64(func(i int, val int64) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInt64Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustInt64Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustInt64Slice()[2]) + +} + +func TestWhereInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + + selected := v.WhereInt64(func(i int, val int64) bool { + return i%2 == 0 + }).MustInt64Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + + grouped := v.GroupInt64(func(i int, val int64) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int64) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + + rawArr := v.MustInt64Slice() + + replaced := v.ReplaceInt64(func(index int, val int64) int64 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInt64Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + + collected := v.CollectInt64(func(index int, val int64) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint(t *testing.T) { + + val := uint(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint()) + assert.Equal(t, val, New(m).Get("value").MustUint()) + assert.Equal(t, uint(0), New(m).Get("nothing").Uint()) + assert.Equal(t, val, New(m).Get("nothing").Uint(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint() + }) + +} + +func TestUintSlice(t *testing.T) { + + val := uint(1) + m := map[string]interface{}{"value": []uint{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").UintSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUintSlice()[0]) + assert.Equal(t, []uint(nil), New(m).Get("nothing").UintSlice()) + assert.Equal(t, val, New(m).Get("nothing").UintSlice([]uint{uint(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUintSlice() + }) + +} + +func TestIsUint(t *testing.T) { + + var v *Value + + v = &Value{data: uint(1)} + assert.True(t, v.IsUint()) + + v = &Value{data: []uint{uint(1)}} + assert.True(t, v.IsUintSlice()) + +} + +func TestEachUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1)}} + count := 0 + replacedVals := make([]uint, 0) + assert.Equal(t, v, v.EachUint(func(i int, val uint) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUintSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustUintSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustUintSlice()[2]) + +} + +func TestWhereUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + + selected := v.WhereUint(func(i int, val uint) bool { + return i%2 == 0 + }).MustUintSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + + grouped := v.GroupUint(func(i int, val uint) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + + rawArr := v.MustUintSlice() + + replaced := v.ReplaceUint(func(index int, val uint) uint { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUintSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + + collected := v.CollectUint(func(index int, val uint) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint8(t *testing.T) { + + val := uint8(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint8()) + assert.Equal(t, val, New(m).Get("value").MustUint8()) + assert.Equal(t, uint8(0), New(m).Get("nothing").Uint8()) + assert.Equal(t, val, New(m).Get("nothing").Uint8(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint8() + }) + +} + +func TestUint8Slice(t *testing.T) { + + val := uint8(1) + m := map[string]interface{}{"value": []uint8{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint8Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUint8Slice()[0]) + assert.Equal(t, []uint8(nil), New(m).Get("nothing").Uint8Slice()) + assert.Equal(t, val, New(m).Get("nothing").Uint8Slice([]uint8{uint8(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUint8Slice() + }) + +} + +func TestIsUint8(t *testing.T) { + + var v *Value + + v = &Value{data: uint8(1)} + assert.True(t, v.IsUint8()) + + v = &Value{data: []uint8{uint8(1)}} + assert.True(t, v.IsUint8Slice()) + +} + +func TestEachUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + count := 0 + replacedVals := make([]uint8, 0) + assert.Equal(t, v, v.EachUint8(func(i int, val uint8) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUint8Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustUint8Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustUint8Slice()[2]) + +} + +func TestWhereUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + + selected := v.WhereUint8(func(i int, val uint8) bool { + return i%2 == 0 + }).MustUint8Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + + grouped := v.GroupUint8(func(i int, val uint8) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint8) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + + rawArr := v.MustUint8Slice() + + replaced := v.ReplaceUint8(func(index int, val uint8) uint8 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUint8Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + + collected := v.CollectUint8(func(index int, val uint8) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint16(t *testing.T) { + + val := uint16(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint16()) + assert.Equal(t, val, New(m).Get("value").MustUint16()) + assert.Equal(t, uint16(0), New(m).Get("nothing").Uint16()) + assert.Equal(t, val, New(m).Get("nothing").Uint16(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint16() + }) + +} + +func TestUint16Slice(t *testing.T) { + + val := uint16(1) + m := map[string]interface{}{"value": []uint16{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint16Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUint16Slice()[0]) + assert.Equal(t, []uint16(nil), New(m).Get("nothing").Uint16Slice()) + assert.Equal(t, val, New(m).Get("nothing").Uint16Slice([]uint16{uint16(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUint16Slice() + }) + +} + +func TestIsUint16(t *testing.T) { + + var v *Value + + v = &Value{data: uint16(1)} + assert.True(t, v.IsUint16()) + + v = &Value{data: []uint16{uint16(1)}} + assert.True(t, v.IsUint16Slice()) + +} + +func TestEachUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + count := 0 + replacedVals := make([]uint16, 0) + assert.Equal(t, v, v.EachUint16(func(i int, val uint16) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUint16Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustUint16Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustUint16Slice()[2]) + +} + +func TestWhereUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + + selected := v.WhereUint16(func(i int, val uint16) bool { + return i%2 == 0 + }).MustUint16Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + + grouped := v.GroupUint16(func(i int, val uint16) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint16) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + + rawArr := v.MustUint16Slice() + + replaced := v.ReplaceUint16(func(index int, val uint16) uint16 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUint16Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + + collected := v.CollectUint16(func(index int, val uint16) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint32(t *testing.T) { + + val := uint32(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint32()) + assert.Equal(t, val, New(m).Get("value").MustUint32()) + assert.Equal(t, uint32(0), New(m).Get("nothing").Uint32()) + assert.Equal(t, val, New(m).Get("nothing").Uint32(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint32() + }) + +} + +func TestUint32Slice(t *testing.T) { + + val := uint32(1) + m := map[string]interface{}{"value": []uint32{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint32Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUint32Slice()[0]) + assert.Equal(t, []uint32(nil), New(m).Get("nothing").Uint32Slice()) + assert.Equal(t, val, New(m).Get("nothing").Uint32Slice([]uint32{uint32(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUint32Slice() + }) + +} + +func TestIsUint32(t *testing.T) { + + var v *Value + + v = &Value{data: uint32(1)} + assert.True(t, v.IsUint32()) + + v = &Value{data: []uint32{uint32(1)}} + assert.True(t, v.IsUint32Slice()) + +} + +func TestEachUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + count := 0 + replacedVals := make([]uint32, 0) + assert.Equal(t, v, v.EachUint32(func(i int, val uint32) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUint32Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustUint32Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustUint32Slice()[2]) + +} + +func TestWhereUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + + selected := v.WhereUint32(func(i int, val uint32) bool { + return i%2 == 0 + }).MustUint32Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + + grouped := v.GroupUint32(func(i int, val uint32) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint32) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + + rawArr := v.MustUint32Slice() + + replaced := v.ReplaceUint32(func(index int, val uint32) uint32 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUint32Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + + collected := v.CollectUint32(func(index int, val uint32) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint64(t *testing.T) { + + val := uint64(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint64()) + assert.Equal(t, val, New(m).Get("value").MustUint64()) + assert.Equal(t, uint64(0), New(m).Get("nothing").Uint64()) + assert.Equal(t, val, New(m).Get("nothing").Uint64(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint64() + }) + +} + +func TestUint64Slice(t *testing.T) { + + val := uint64(1) + m := map[string]interface{}{"value": []uint64{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint64Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUint64Slice()[0]) + assert.Equal(t, []uint64(nil), New(m).Get("nothing").Uint64Slice()) + assert.Equal(t, val, New(m).Get("nothing").Uint64Slice([]uint64{uint64(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUint64Slice() + }) + +} + +func TestIsUint64(t *testing.T) { + + var v *Value + + v = &Value{data: uint64(1)} + assert.True(t, v.IsUint64()) + + v = &Value{data: []uint64{uint64(1)}} + assert.True(t, v.IsUint64Slice()) + +} + +func TestEachUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + count := 0 + replacedVals := make([]uint64, 0) + assert.Equal(t, v, v.EachUint64(func(i int, val uint64) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUint64Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustUint64Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustUint64Slice()[2]) + +} + +func TestWhereUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + + selected := v.WhereUint64(func(i int, val uint64) bool { + return i%2 == 0 + }).MustUint64Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + + grouped := v.GroupUint64(func(i int, val uint64) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint64) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + + rawArr := v.MustUint64Slice() + + replaced := v.ReplaceUint64(func(index int, val uint64) uint64 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUint64Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + + collected := v.CollectUint64(func(index int, val uint64) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUintptr(t *testing.T) { + + val := uintptr(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uintptr()) + assert.Equal(t, val, New(m).Get("value").MustUintptr()) + assert.Equal(t, uintptr(0), New(m).Get("nothing").Uintptr()) + assert.Equal(t, val, New(m).Get("nothing").Uintptr(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUintptr() + }) + +} + +func TestUintptrSlice(t *testing.T) { + + val := uintptr(1) + m := map[string]interface{}{"value": []uintptr{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").UintptrSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUintptrSlice()[0]) + assert.Equal(t, []uintptr(nil), New(m).Get("nothing").UintptrSlice()) + assert.Equal(t, val, New(m).Get("nothing").UintptrSlice([]uintptr{uintptr(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUintptrSlice() + }) + +} + +func TestIsUintptr(t *testing.T) { + + var v *Value + + v = &Value{data: uintptr(1)} + assert.True(t, v.IsUintptr()) + + v = &Value{data: []uintptr{uintptr(1)}} + assert.True(t, v.IsUintptrSlice()) + +} + +func TestEachUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + count := 0 + replacedVals := make([]uintptr, 0) + assert.Equal(t, v, v.EachUintptr(func(i int, val uintptr) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUintptrSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustUintptrSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustUintptrSlice()[2]) + +} + +func TestWhereUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + + selected := v.WhereUintptr(func(i int, val uintptr) bool { + return i%2 == 0 + }).MustUintptrSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + + grouped := v.GroupUintptr(func(i int, val uintptr) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uintptr) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + + rawArr := v.MustUintptrSlice() + + replaced := v.ReplaceUintptr(func(index int, val uintptr) uintptr { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUintptrSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + + collected := v.CollectUintptr(func(index int, val uintptr) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestFloat32(t *testing.T) { + + val := float32(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Float32()) + assert.Equal(t, val, New(m).Get("value").MustFloat32()) + assert.Equal(t, float32(0), New(m).Get("nothing").Float32()) + assert.Equal(t, val, New(m).Get("nothing").Float32(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustFloat32() + }) + +} + +func TestFloat32Slice(t *testing.T) { + + val := float32(1) + m := map[string]interface{}{"value": []float32{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Float32Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustFloat32Slice()[0]) + assert.Equal(t, []float32(nil), New(m).Get("nothing").Float32Slice()) + assert.Equal(t, val, New(m).Get("nothing").Float32Slice([]float32{float32(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustFloat32Slice() + }) + +} + +func TestIsFloat32(t *testing.T) { + + var v *Value + + v = &Value{data: float32(1)} + assert.True(t, v.IsFloat32()) + + v = &Value{data: []float32{float32(1)}} + assert.True(t, v.IsFloat32Slice()) + +} + +func TestEachFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1)}} + count := 0 + replacedVals := make([]float32, 0) + assert.Equal(t, v, v.EachFloat32(func(i int, val float32) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustFloat32Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustFloat32Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustFloat32Slice()[2]) + +} + +func TestWhereFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + + selected := v.WhereFloat32(func(i int, val float32) bool { + return i%2 == 0 + }).MustFloat32Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + + grouped := v.GroupFloat32(func(i int, val float32) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]float32) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + + rawArr := v.MustFloat32Slice() + + replaced := v.ReplaceFloat32(func(index int, val float32) float32 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustFloat32Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + + collected := v.CollectFloat32(func(index int, val float32) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestFloat64(t *testing.T) { + + val := float64(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Float64()) + assert.Equal(t, val, New(m).Get("value").MustFloat64()) + assert.Equal(t, float64(0), New(m).Get("nothing").Float64()) + assert.Equal(t, val, New(m).Get("nothing").Float64(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustFloat64() + }) + +} + +func TestFloat64Slice(t *testing.T) { + + val := float64(1) + m := map[string]interface{}{"value": []float64{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Float64Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustFloat64Slice()[0]) + assert.Equal(t, []float64(nil), New(m).Get("nothing").Float64Slice()) + assert.Equal(t, val, New(m).Get("nothing").Float64Slice([]float64{float64(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustFloat64Slice() + }) + +} + +func TestIsFloat64(t *testing.T) { + + var v *Value + + v = &Value{data: float64(1)} + assert.True(t, v.IsFloat64()) + + v = &Value{data: []float64{float64(1)}} + assert.True(t, v.IsFloat64Slice()) + +} + +func TestEachFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1)}} + count := 0 + replacedVals := make([]float64, 0) + assert.Equal(t, v, v.EachFloat64(func(i int, val float64) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustFloat64Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustFloat64Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustFloat64Slice()[2]) + +} + +func TestWhereFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + + selected := v.WhereFloat64(func(i int, val float64) bool { + return i%2 == 0 + }).MustFloat64Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + + grouped := v.GroupFloat64(func(i int, val float64) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]float64) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + + rawArr := v.MustFloat64Slice() + + replaced := v.ReplaceFloat64(func(index int, val float64) float64 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustFloat64Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + + collected := v.CollectFloat64(func(index int, val float64) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestComplex64(t *testing.T) { + + val := complex64(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Complex64()) + assert.Equal(t, val, New(m).Get("value").MustComplex64()) + assert.Equal(t, complex64(0), New(m).Get("nothing").Complex64()) + assert.Equal(t, val, New(m).Get("nothing").Complex64(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustComplex64() + }) + +} + +func TestComplex64Slice(t *testing.T) { + + val := complex64(1) + m := map[string]interface{}{"value": []complex64{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Complex64Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustComplex64Slice()[0]) + assert.Equal(t, []complex64(nil), New(m).Get("nothing").Complex64Slice()) + assert.Equal(t, val, New(m).Get("nothing").Complex64Slice([]complex64{complex64(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustComplex64Slice() + }) + +} + +func TestIsComplex64(t *testing.T) { + + var v *Value + + v = &Value{data: complex64(1)} + assert.True(t, v.IsComplex64()) + + v = &Value{data: []complex64{complex64(1)}} + assert.True(t, v.IsComplex64Slice()) + +} + +func TestEachComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + count := 0 + replacedVals := make([]complex64, 0) + assert.Equal(t, v, v.EachComplex64(func(i int, val complex64) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustComplex64Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustComplex64Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustComplex64Slice()[2]) + +} + +func TestWhereComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + + selected := v.WhereComplex64(func(i int, val complex64) bool { + return i%2 == 0 + }).MustComplex64Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + + grouped := v.GroupComplex64(func(i int, val complex64) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]complex64) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + + rawArr := v.MustComplex64Slice() + + replaced := v.ReplaceComplex64(func(index int, val complex64) complex64 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustComplex64Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + + collected := v.CollectComplex64(func(index int, val complex64) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestComplex128(t *testing.T) { + + val := complex128(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Complex128()) + assert.Equal(t, val, New(m).Get("value").MustComplex128()) + assert.Equal(t, complex128(0), New(m).Get("nothing").Complex128()) + assert.Equal(t, val, New(m).Get("nothing").Complex128(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustComplex128() + }) + +} + +func TestComplex128Slice(t *testing.T) { + + val := complex128(1) + m := map[string]interface{}{"value": []complex128{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Complex128Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustComplex128Slice()[0]) + assert.Equal(t, []complex128(nil), New(m).Get("nothing").Complex128Slice()) + assert.Equal(t, val, New(m).Get("nothing").Complex128Slice([]complex128{complex128(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustComplex128Slice() + }) + +} + +func TestIsComplex128(t *testing.T) { + + var v *Value + + v = &Value{data: complex128(1)} + assert.True(t, v.IsComplex128()) + + v = &Value{data: []complex128{complex128(1)}} + assert.True(t, v.IsComplex128Slice()) + +} + +func TestEachComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + count := 0 + replacedVals := make([]complex128, 0) + assert.Equal(t, v, v.EachComplex128(func(i int, val complex128) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustComplex128Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustComplex128Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustComplex128Slice()[2]) + +} + +func TestWhereComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + + selected := v.WhereComplex128(func(i int, val complex128) bool { + return i%2 == 0 + }).MustComplex128Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + + grouped := v.GroupComplex128(func(i int, val complex128) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]complex128) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + + rawArr := v.MustComplex128Slice() + + replaced := v.ReplaceComplex128(func(index int, val complex128) complex128 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustComplex128Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + + collected := v.CollectComplex128(func(index int, val complex128) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/value.go b/Godeps/_workspace/src/github.com/stretchr/objx/value.go new file mode 100644 index 00000000..7aaef06b --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/value.go @@ -0,0 +1,13 @@ +package objx + +// Value provides methods for extracting interface{} data in various +// types. +type Value struct { + // data contains the raw data being managed by this Value + data interface{} +} + +// Data returns the raw data contained by this Value +func (v *Value) Data() interface{} { + return v.data +} diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/value_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/value_test.go new file mode 100644 index 00000000..0bc65d92 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/objx/value_test.go @@ -0,0 +1 @@ +package objx diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go new file mode 100644 index 00000000..95dc6759 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go @@ -0,0 +1,657 @@ +package assert + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "time" +) + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) +} + +// Comparison a custom function that returns true on success and false on failure +type Comparison func() (success bool) + +/* + Helper functions +*/ + +// ObjectsAreEqual determines if two objects are considered equal. +// +// This function does no assertion of any kind. +func ObjectsAreEqual(expected, actual interface{}) bool { + + if expected == nil || actual == nil { + return expected == actual + } + + if reflect.DeepEqual(expected, actual) { + return true + } + + expectedValue := reflect.ValueOf(expected) + actualValue := reflect.ValueOf(actual) + if expectedValue == actualValue { + return true + } + + // Attempt comparison after type conversion + if actualValue.Type().ConvertibleTo(expectedValue.Type()) && expectedValue == actualValue.Convert(expectedValue.Type()) { + return true + } + + // Last ditch effort + if fmt.Sprintf("%#v", expected) == fmt.Sprintf("%#v", actual) { + return true + } + + return false + +} + +/* CallerInfo is necessary because the assert functions use the testing object +internally, causing it to print the file:line of the assert method, rather than where +the problem actually occured in calling code.*/ + +// CallerInfo returns a string containing the file and line number of the assert call +// that failed. +func CallerInfo() string { + + file := "" + line := 0 + ok := false + + for i := 0; ; i++ { + _, file, line, ok = runtime.Caller(i) + if !ok { + return "" + } + parts := strings.Split(file, "/") + dir := parts[len(parts)-2] + file = parts[len(parts)-1] + if (dir != "assert" && dir != "mock") || file == "mock_test.go" { + break + } + } + + return fmt.Sprintf("%s:%d", file, line) +} + +// getWhitespaceString returns a string that is long enough to overwrite the default +// output from the go testing framework. +func getWhitespaceString() string { + + _, file, line, ok := runtime.Caller(1) + if !ok { + return "" + } + parts := strings.Split(file, "/") + file = parts[len(parts)-1] + + return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) + +} + +func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + return msgAndArgs[0].(string) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + + if len(message) > 0 { + t.Errorf("\r%s\r\tLocation:\t%s\n\r\tError:\t\t%s\n\r\tMessages:\t%s\n\r", getWhitespaceString(), CallerInfo(), failureMessage, message) + } else { + t.Errorf("\r%s\r\tLocation:\t%s\n\r\tError:\t\t%s\n\r", getWhitespaceString(), CallerInfo(), failureMessage) + } + + return false +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if !reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("Object must implement %v", interfaceType), msgAndArgs...) + } + + return true + +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { + return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) + } + + return true +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqual(expected, actual) { + return Fail(t, fmt.Sprintf("Not equal: %#v != %#v", expected, actual), msgAndArgs...) + } + + return true + +} + +// Exactly asserts that two objects are equal is value and type. +// +// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + aType := reflect.TypeOf(expected) + bType := reflect.TypeOf(actual) + + if aType != bType { + return Fail(t, "Types expected to match exactly", "%v != %v", aType, bType) + } + + return Equal(t, expected, actual, msgAndArgs...) + +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + success := true + + if object == nil { + success = false + } else { + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + success = false + } + } + + if !success { + Fail(t, "Expected not to be nil.", msgAndArgs...) + } + + return success +} + +// isNil checks if a specified object is nil or not, without Failing. +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + return true + } + + return false +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if isNil(object) { + return true + } + return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) +} + +var zeros = []interface{}{ + int(0), + int8(0), + int16(0), + int32(0), + int64(0), + uint(0), + uint8(0), + uint16(0), + uint32(0), + uint64(0), + float32(0), + float64(0), +} + +// isEmpty gets whether the specified object is considered empty or not. +func isEmpty(object interface{}) bool { + + if object == nil { + return true + } else if object == "" { + return true + } else if object == false { + return true + } + + for _, v := range zeros { + if object == v { + return true + } + } + + objValue := reflect.ValueOf(object) + + switch objValue.Kind() { + case reflect.Map: + fallthrough + case reflect.Slice, reflect.Chan: + { + return (objValue.Len() == 0) + } + case reflect.Ptr: + { + switch object.(type) { + case *time.Time: + return object.(*time.Time).IsZero() + default: + return false + } + } + } + return false +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +// +// Returns whether the assertion was successful (true) or not (false). +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := !isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// getLen try to get length of object. +// return (false, 0) if impossible. +func getLen(x interface{}) (ok bool, length int) { + v := reflect.ValueOf(x) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + return true, v.Len() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + ok, l := getLen(object) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + } + + if l != length { + return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but have %d", object, length, l), msgAndArgs...) + } + return true +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != true { + return Fail(t, "Should be true", msgAndArgs...) + } + + return true + +} + +// False asserts that the specified value is true. +// +// assert.False(t, myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != false { + return Fail(t, "Should be false", msgAndArgs...) + } + + return true + +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if ObjectsAreEqual(expected, actual) { + return Fail(t, "Should not be equal", msgAndArgs...) + } + + return true + +} + +// Contains asserts that the specified string contains the specified substring. +// +// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") +// +// Returns whether the assertion was successful (true) or not (false). +func Contains(t TestingT, s, contains string, msgAndArgs ...interface{}) bool { + + if !strings.Contains(s, contains) { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// NotContains asserts that the specified string does NOT contain the specified substring. +// +// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func NotContains(t TestingT, s, contains string, msgAndArgs ...interface{}) bool { + + if strings.Contains(s, contains) { + return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { + result := comp() + if !result { + Fail(t, "Condition failed!", msgAndArgs...) + } + return result +} + +// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics +// methods, and represents a simple func that takes no arguments, and returns nothing. +type PanicTestFunc func() + +// didPanic returns true if the function passed to it panics. Otherwise, it returns false. +func didPanic(f PanicTestFunc) (bool, interface{}) { + + didPanic := false + var message interface{} + func() { + + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() + + // call the target function + f() + + }() + + return didPanic, message + +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + + dt := expected.Sub(actual) + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +func toFloat(x interface{}) (float64, bool) { + var xf float64 + xok := true + + switch xn := x.(type) { + case uint8: + xf = float64(xn) + case uint16: + xf = float64(xn) + case uint32: + xf = float64(xn) + case uint64: + xf = float64(xn) + case int: + xf = float64(xn) + case int8: + xf = float64(xn) + case int16: + xf = float64(xn) + case int32: + xf = float64(xn) + case int64: + xf = float64(xn) + case float32: + xf = float64(xn) + case float64: + xf = float64(xn) + default: + xok = false + } + + return xf, xok +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + + af, aok := toFloat(expected) + bf, bok := toFloat(actual) + + if !aok || !bok { + return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + } + + dt := af - bf + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +// min(|expected|, |actual|) * epsilon +func calcEpsilonDelta(expected, actual interface{}, epsilon float64) float64 { + af, aok := toFloat(expected) + bf, bok := toFloat(actual) + + if !aok || !bok { + // invalid input + return 0 + } + + if af < 0 { + af = -af + } + if bf < 0 { + bf = -bf + } + var delta float64 + if af < bf { + delta = af * epsilon + } else { + delta = bf * epsilon + } + return delta +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + delta := calcEpsilonDelta(expected, actual, epsilon) + + return InDelta(t, expected, actual, delta, msgAndArgs...) +} + +/* + Errors +*/ + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if isNil(err) { + return true + } + + return Fail(t, fmt.Sprintf("No error is expected but got %v", err), msgAndArgs...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + return NotNil(t, err, "An error is expected but got nil. %s", message) + +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + if !NotNil(t, theError, "An error is expected but got nil. %s", message) { + return false + } + s := "An error with value \"%s\" is expected but got \"%s\". %s" + return Equal(t, theError.Error(), errString, + s, errString, theError.Error(), message) +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go new file mode 100644 index 00000000..4f91c215 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go @@ -0,0 +1,538 @@ +package assert + +import ( + "errors" + "testing" + "time" +) + +// AssertionTesterInterface defines an interface to be used for testing assertion methods +type AssertionTesterInterface interface { + TestMethod() +} + +// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface +type AssertionTesterConformingObject struct { +} + +func (a *AssertionTesterConformingObject) TestMethod() { +} + +// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface +type AssertionTesterNonConformingObject struct { +} + +func TestObjectsAreEqual(t *testing.T) { + + if !ObjectsAreEqual("Hello World", "Hello World") { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(123, 123) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(123.5, 123.5) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual([]byte("Hello World"), []byte("Hello World")) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(nil, nil) { + t.Error("objectsAreEqual should return true") + } + +} + +func TestImplements(t *testing.T) { + + mockT := new(testing.T) + + if !Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { + t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") + } + if Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { + t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") + } + +} + +func TestIsType(t *testing.T) { + + mockT := new(testing.T) + + if !IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") + } + if IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { + t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") + } + +} + +func TestEqual(t *testing.T) { + + mockT := new(testing.T) + + if !Equal(mockT, "Hello World", "Hello World") { + t.Error("Equal should return true") + } + if !Equal(mockT, 123, 123) { + t.Error("Equal should return true") + } + if !Equal(mockT, 123.5, 123.5) { + t.Error("Equal should return true") + } + if !Equal(mockT, []byte("Hello World"), []byte("Hello World")) { + t.Error("Equal should return true") + } + if !Equal(mockT, nil, nil) { + t.Error("Equal should return true") + } + if !Equal(mockT, int32(123), int64(123)) { + t.Error("Equal should return true") + } + if !Equal(mockT, int64(123), uint64(123)) { + t.Error("Equal should return true") + } + +} + +func TestNotNil(t *testing.T) { + + mockT := new(testing.T) + + if !NotNil(mockT, new(AssertionTesterConformingObject)) { + t.Error("NotNil should return true: object is not nil") + } + if NotNil(mockT, nil) { + t.Error("NotNil should return false: object is nil") + } + +} + +func TestNil(t *testing.T) { + + mockT := new(testing.T) + + if !Nil(mockT, nil) { + t.Error("Nil should return true: object is nil") + } + if Nil(mockT, new(AssertionTesterConformingObject)) { + t.Error("Nil should return false: object is not nil") + } + +} + +func TestTrue(t *testing.T) { + + mockT := new(testing.T) + + if !True(mockT, true) { + t.Error("True should return true") + } + if True(mockT, false) { + t.Error("True should return false") + } + +} + +func TestFalse(t *testing.T) { + + mockT := new(testing.T) + + if !False(mockT, false) { + t.Error("False should return true") + } + if False(mockT, true) { + t.Error("False should return false") + } + +} + +func TestExactly(t *testing.T) { + + mockT := new(testing.T) + + a := float32(1) + b := float64(1) + c := float32(1) + d := float32(2) + + if Exactly(mockT, a, b) { + t.Error("Exactly should return false") + } + if Exactly(mockT, a, d) { + t.Error("Exactly should return false") + } + if !Exactly(mockT, a, c) { + t.Error("Exactly should return true") + } + + if Exactly(mockT, nil, a) { + t.Error("Exactly should return false") + } + if Exactly(mockT, a, nil) { + t.Error("Exactly should return false") + } + +} + +func TestNotEqual(t *testing.T) { + + mockT := new(testing.T) + + if !NotEqual(mockT, "Hello World", "Hello World!") { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, 123, 1234) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, 123.5, 123.55) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, []byte("Hello World"), []byte("Hello World!")) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, nil, new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return true") + } +} + +func TestContains(t *testing.T) { + + mockT := new(testing.T) + + if !Contains(mockT, "Hello World", "Hello") { + t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") + } + if Contains(mockT, "Hello World", "Salut") { + t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") + } + +} + +func TestNotContains(t *testing.T) { + + mockT := new(testing.T) + + if !NotContains(mockT, "Hello World", "Hello!") { + t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") + } + if NotContains(mockT, "Hello World", "Hello") { + t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") + } + +} + +func TestDidPanic(t *testing.T) { + + if funcDidPanic, _ := didPanic(func() { + panic("Panic!") + }); !funcDidPanic { + t.Error("didPanic should return true") + } + + if funcDidPanic, _ := didPanic(func() { + }); funcDidPanic { + t.Error("didPanic should return false") + } + +} + +func TestPanics(t *testing.T) { + + mockT := new(testing.T) + + if !Panics(mockT, func() { + panic("Panic!") + }) { + t.Error("Panics should return true") + } + + if Panics(mockT, func() { + }) { + t.Error("Panics should return false") + } + +} + +func TestNotPanics(t *testing.T) { + + mockT := new(testing.T) + + if !NotPanics(mockT, func() { + }) { + t.Error("NotPanics should return true") + } + + if NotPanics(mockT, func() { + panic("Panic!") + }) { + t.Error("NotPanics should return false") + } + +} + +func TestEqual_Funcs(t *testing.T) { + + type f func() int + f1 := func() int { return 1 } + f2 := func() int { return 2 } + + f1Copy := f1 + + Equal(t, f1Copy, f1, "Funcs are the same and should be considered equal") + NotEqual(t, f1, f2, "f1 and f2 are different") + +} + +func TestNoError(t *testing.T) { + + mockT := new(testing.T) + + // start with a nil error + var err error + + True(t, NoError(mockT, err), "NoError should return True for nil arg") + + // now set an error + err = errors.New("some error") + + False(t, NoError(mockT, err), "NoError with error should return False") + +} + +func TestError(t *testing.T) { + + mockT := new(testing.T) + + // start with a nil error + var err error + + False(t, Error(mockT, err), "Error should return False for nil arg") + + // now set an error + err = errors.New("some error") + + True(t, Error(mockT, err), "Error with error should return True") + +} + +func TestEqualError(t *testing.T) { + mockT := new(testing.T) + + // start with a nil error + var err error + False(t, EqualError(mockT, err, ""), + "EqualError should return false for nil arg") + + // now set an error + err = errors.New("some error") + False(t, EqualError(mockT, err, "Not some error"), + "EqualError should return false for different error string") + True(t, EqualError(mockT, err, "some error"), + "EqualError should return true") +} + +func Test_isEmpty(t *testing.T) { + + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + True(t, isEmpty("")) + True(t, isEmpty(nil)) + True(t, isEmpty([]string{})) + True(t, isEmpty(0)) + True(t, isEmpty(int32(0))) + True(t, isEmpty(int64(0))) + True(t, isEmpty(false)) + True(t, isEmpty(map[string]string{})) + True(t, isEmpty(new(time.Time))) + True(t, isEmpty(make(chan struct{}))) + False(t, isEmpty("something")) + False(t, isEmpty(errors.New("something"))) + False(t, isEmpty([]string{"something"})) + False(t, isEmpty(1)) + False(t, isEmpty(true)) + False(t, isEmpty(map[string]string{"Hello": "World"})) + False(t, isEmpty(chWithValue)) + +} + +func TestEmpty(t *testing.T) { + + mockT := new(testing.T) + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + True(t, Empty(mockT, ""), "Empty string is empty") + True(t, Empty(mockT, nil), "Nil is empty") + True(t, Empty(mockT, []string{}), "Empty string array is empty") + True(t, Empty(mockT, 0), "Zero int value is empty") + True(t, Empty(mockT, false), "False value is empty") + True(t, Empty(mockT, make(chan struct{})), "Channel without values is empty") + + False(t, Empty(mockT, "something"), "Non Empty string is not empty") + False(t, Empty(mockT, errors.New("something")), "Non nil object is not empty") + False(t, Empty(mockT, []string{"something"}), "Non empty string array is not empty") + False(t, Empty(mockT, 1), "Non-zero int value is not empty") + False(t, Empty(mockT, true), "True value is not empty") + False(t, Empty(mockT, chWithValue), "Channel with values is not empty") +} + +func TestNotEmpty(t *testing.T) { + + mockT := new(testing.T) + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + False(t, NotEmpty(mockT, ""), "Empty string is empty") + False(t, NotEmpty(mockT, nil), "Nil is empty") + False(t, NotEmpty(mockT, []string{}), "Empty string array is empty") + False(t, NotEmpty(mockT, 0), "Zero int value is empty") + False(t, NotEmpty(mockT, false), "False value is empty") + False(t, NotEmpty(mockT, make(chan struct{})), "Channel without values is empty") + + True(t, NotEmpty(mockT, "something"), "Non Empty string is not empty") + True(t, NotEmpty(mockT, errors.New("something")), "Non nil object is not empty") + True(t, NotEmpty(mockT, []string{"something"}), "Non empty string array is not empty") + True(t, NotEmpty(mockT, 1), "Non-zero int value is not empty") + True(t, NotEmpty(mockT, true), "True value is not empty") + True(t, NotEmpty(mockT, chWithValue), "Channel with values is not empty") +} + +func Test_getLen(t *testing.T) { + falseCases := []interface{}{ + nil, + 0, + true, + false, + 'A', + struct{}{}, + } + for _, v := range falseCases { + ok, l := getLen(v) + False(t, ok, "Expected getLen fail to get length of %#v", v) + Equal(t, 0, l, "getLen should return 0 for %#v", v) + } + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + trueCases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range trueCases { + ok, l := getLen(c.v) + True(t, ok, "Expected getLen success to get length of %#v", c.v) + Equal(t, c.l, l) + } +} + +func TestLen(t *testing.T) { + mockT := new(testing.T) + + False(t, Len(mockT, nil, 0), "nil does not have length") + False(t, Len(mockT, 0, 0), "int does not have length") + False(t, Len(mockT, true, 0), "true does not have length") + False(t, Len(mockT, false, 0), "false does not have length") + False(t, Len(mockT, 'A', 0), "Rune does not have length") + False(t, Len(mockT, struct{}{}, 0), "Struct does not have length") + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + + cases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range cases { + True(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) + } +} + +func TestWithinDuration(t *testing.T) { + + mockT := new(testing.T) + a := time.Now() + b := a.Add(10 * time.Second) + + True(t, WithinDuration(mockT, a, b, 10*time.Second), "A 10s difference is within a 10s time difference") + True(t, WithinDuration(mockT, b, a, 10*time.Second), "A 10s difference is within a 10s time difference") + + False(t, WithinDuration(mockT, a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") + + False(t, WithinDuration(mockT, a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") + + False(t, WithinDuration(mockT, a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") +} + +func TestInDelta(t *testing.T) { + mockT := new(testing.T) + + True(t, InDelta(mockT, 1.001, 1, 0.01), "|1.001 - 1| <= 0.01") + True(t, InDelta(mockT, 1, 1.001, 0.01), "|1 - 1.001| <= 0.01") + True(t, InDelta(mockT, 1, 2, 1), "|1 - 2| <= 1") + False(t, InDelta(mockT, 1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") + False(t, InDelta(mockT, 2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") + False(t, InDelta(mockT, "", nil, 1), "Expected non numerals to fail") + + cases := []struct { + a, b interface{} + delta float64 + }{ + {uint8(2), uint8(1), 1}, + {uint16(2), uint16(1), 1}, + {uint32(2), uint32(1), 1}, + {uint64(2), uint64(1), 1}, + + {int(2), int(1), 1}, + {int8(2), int8(1), 1}, + {int16(2), int16(1), 1}, + {int32(2), int32(1), 1}, + {int64(2), int64(1), 1}, + + {float32(2), float32(1), 1}, + {float64(2), float64(1), 1}, + } + + for _, tc := range cases { + True(t, InDelta(mockT, tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) + } +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go new file mode 100644 index 00000000..4c72f42c --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go @@ -0,0 +1,150 @@ +// A set of comprehensive testing tools for use with the normal Go testing system. +// +// Example Usage +// +// The following is a complete example using assert in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// if you assert many times, use the below: +// +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// assert := assert.New(t) +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(a, b, "The two words should be the same.") +// } +// +// Assertions +// +// Assertions allow you to easily write test code, and are global funcs in the `assert` package. +// All assertion functions take, as the first argument, the `*testing.T` object provided by the +// testing framework. This allows the assertion funcs to write the failings and other details to +// the correct place. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +// +// Here is an overview of the assert functions: +// +// assert.Equal(t, expected, actual [, message [, format-args]) +// +// assert.NotEqual(t, notExpected, actual [, message [, format-args]]) +// +// assert.True(t, actualBool [, message [, format-args]]) +// +// assert.False(t, actualBool [, message [, format-args]]) +// +// assert.Nil(t, actualObject [, message [, format-args]]) +// +// assert.NotNil(t, actualObject [, message [, format-args]]) +// +// assert.Empty(t, actualObject [, message [, format-args]]) +// +// assert.NotEmpty(t, actualObject [, message [, format-args]]) +// +// assert.Len(t, actualObject, expectedLength, [, message [, format-args]]) +// +// assert.Error(t, errorObject [, message [, format-args]]) +// +// assert.NoError(t, errorObject [, message [, format-args]]) +// +// assert.EqualError(t, theError, errString [, message [, format-args]]) +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject) [,message [, format-args]]) +// +// assert.IsType(t, expectedObject, actualObject [, message [, format-args]]) +// +// assert.Contains(t, string, substring [, message [, format-args]]) +// +// assert.NotContains(t, string, substring [, message [, format-args]]) +// +// assert.Panics(t, func(){ +// +// // call code that should panic +// +// } [, message [, format-args]]) +// +// assert.NotPanics(t, func(){ +// +// // call code that should not panic +// +// } [, message [, format-args]]) +// +// assert.WithinDuration(t, timeA, timeB, deltaTime, [, message [, format-args]]) +// +// assert.InDelta(t, numA, numB, delta, [, message [, format-args]]) +// +// assert.InEpsilon(t, numA, numB, epsilon, [, message [, format-args]]) +// +// assert package contains Assertions object. it has assertion methods. +// +// Here is an overview of the assert functions: +// assert.Equal(expected, actual [, message [, format-args]) +// +// assert.NotEqual(notExpected, actual [, message [, format-args]]) +// +// assert.True(actualBool [, message [, format-args]]) +// +// assert.False(actualBool [, message [, format-args]]) +// +// assert.Nil(actualObject [, message [, format-args]]) +// +// assert.NotNil(actualObject [, message [, format-args]]) +// +// assert.Empty(actualObject [, message [, format-args]]) +// +// assert.NotEmpty(actualObject [, message [, format-args]]) +// +// assert.Len(actualObject, expectedLength, [, message [, format-args]]) +// +// assert.Error(errorObject [, message [, format-args]]) +// +// assert.NoError(errorObject [, message [, format-args]]) +// +// assert.EqualError(theError, errString [, message [, format-args]]) +// +// assert.Implements((*MyInterface)(nil), new(MyObject) [,message [, format-args]]) +// +// assert.IsType(expectedObject, actualObject [, message [, format-args]]) +// +// assert.Contains(string, substring [, message [, format-args]]) +// +// assert.NotContains(string, substring [, message [, format-args]]) +// +// assert.Panics(func(){ +// +// // call code that should panic +// +// } [, message [, format-args]]) +// +// assert.NotPanics(func(){ +// +// // call code that should not panic +// +// } [, message [, format-args]]) +// +// assert.WithinDuration(timeA, timeB, deltaTime, [, message [, format-args]]) +// +// assert.InDelta(numA, numB, delta, [, message [, format-args]]) +// +// assert.InEpsilon(numA, numB, epsilon, [, message [, format-args]]) +package assert diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go new file mode 100644 index 00000000..ac9dc9d1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go @@ -0,0 +1,10 @@ +package assert + +import ( + "errors" +) + +// AnError is an error instance useful for testing. If the code does not care +// about error specifics, and only needs to return the error for example, this +// error should be used to make the test code more readable. +var AnError = errors.New("assert.AnError general error for testing") diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go new file mode 100644 index 00000000..7c2eea76 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go @@ -0,0 +1,232 @@ +package assert + +import "time" + +type Assertions struct { + t TestingT +} + +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { + return Fail(a.t, failureMessage, msgAndArgs...) +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements((*MyInterface)(nil), new(MyObject), "MyObject") +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return Implements(a.t, interfaceObject, object, msgAndArgs...) +} + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return IsType(a.t, expectedType, object, msgAndArgs...) +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Equal(expected, actual interface{}, msgAndArgs ...interface{}) bool { + return Equal(a.t, expected, actual, msgAndArgs...) +} + +// Exactly asserts that two objects are equal is value and type. +// +// assert.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Exactly(expected, actual interface{}, msgAndArgs ...interface{}) bool { + return Exactly(a.t, expected, actual, msgAndArgs...) +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + return NotNil(a.t, object, msgAndArgs...) +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + return Nil(a.t, object, msgAndArgs...) +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or a +// slice with len == 0. +// +// assert.Empty(obj) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + return Empty(a.t, object, msgAndArgs...) +} + +// Empty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or a +// slice with len == 0. +// +// if assert.NotEmpty(obj) { +// assert.Equal("two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + return NotEmpty(a.t, object, msgAndArgs...) +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + return Len(a.t, object, length, msgAndArgs...) +} + +// True asserts that the specified value is true. +// +// assert.True(myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + return True(a.t, value, msgAndArgs...) +} + +// False asserts that the specified value is true. +// +// assert.False(myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + return False(a.t, value, msgAndArgs...) +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEqual(expected, actual interface{}, msgAndArgs ...interface{}) bool { + return NotEqual(a.t, expected, actual, msgAndArgs...) +} + +// Contains asserts that the specified string contains the specified substring. +// +// assert.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Contains(s, contains string, msgAndArgs ...interface{}) bool { + return Contains(a.t, s, contains, msgAndArgs...) +} + +// NotContains asserts that the specified string does NOT contain the specified substring. +// +// assert.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotContains(s, contains string, msgAndArgs ...interface{}) bool { + return NotContains(a.t, s, contains, msgAndArgs...) +} + +// Uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { + return Condition(a.t, comp, msgAndArgs...) +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return Panics(a.t, f, msgAndArgs...) +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return NotPanics(a.t, f, msgAndArgs...) +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) WithinDuration(expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(err) { +// assert.Equal(actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NoError(theError error, msgAndArgs ...interface{}) bool { + return NoError(a.t, theError, msgAndArgs...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(err, "An error was expected") { +// assert.Equal(err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Error(theError error, msgAndArgs ...interface{}) bool { + return Error(a.t, theError, msgAndArgs...) +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if assert.Error(err, "An error was expected") { +// assert.Equal(err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + return EqualError(a.t, theError, errString, msgAndArgs...) +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go new file mode 100644 index 00000000..7982b07a --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go @@ -0,0 +1,380 @@ +package assert + +import ( + "errors" + "testing" + "time" +) + +func TestImplementsWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { + t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") + } + if assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { + t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") + } +} + +func TestIsTypeWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") + } + if assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { + t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") + } + +} + +func TestEqualWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Equal("Hello World", "Hello World") { + t.Error("Equal should return true") + } + if !assert.Equal(123, 123) { + t.Error("Equal should return true") + } + if !assert.Equal(123.5, 123.5) { + t.Error("Equal should return true") + } + if !assert.Equal([]byte("Hello World"), []byte("Hello World")) { + t.Error("Equal should return true") + } + if !assert.Equal(nil, nil) { + t.Error("Equal should return true") + } +} + +func TestNotNilWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.NotNil(new(AssertionTesterConformingObject)) { + t.Error("NotNil should return true: object is not nil") + } + if assert.NotNil(nil) { + t.Error("NotNil should return false: object is nil") + } + +} + +func TestNilWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Nil(nil) { + t.Error("Nil should return true: object is nil") + } + if assert.Nil(new(AssertionTesterConformingObject)) { + t.Error("Nil should return false: object is not nil") + } + +} + +func TestTrueWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.True(true) { + t.Error("True should return true") + } + if assert.True(false) { + t.Error("True should return false") + } + +} + +func TestFalseWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.False(false) { + t.Error("False should return true") + } + if assert.False(true) { + t.Error("False should return false") + } + +} + +func TestExactlyWrapper(t *testing.T) { + assert := New(new(testing.T)) + + a := float32(1) + b := float64(1) + c := float32(1) + d := float32(2) + + if assert.Exactly(a, b) { + t.Error("Exactly should return false") + } + if assert.Exactly(a, d) { + t.Error("Exactly should return false") + } + if !assert.Exactly(a, c) { + t.Error("Exactly should return true") + } + + if assert.Exactly(nil, a) { + t.Error("Exactly should return false") + } + if assert.Exactly(a, nil) { + t.Error("Exactly should return false") + } + +} + +func TestNotEqualWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.NotEqual("Hello World", "Hello World!") { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(123, 1234) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(123.5, 123.55) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual([]byte("Hello World"), []byte("Hello World!")) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(nil, new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return true") + } +} + +func TestContainsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.Contains("Hello World", "Hello") { + t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") + } + if assert.Contains("Hello World", "Salut") { + t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") + } + +} + +func TestNotContainsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.NotContains("Hello World", "Hello!") { + t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") + } + if assert.NotContains("Hello World", "Hello") { + t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") + } + +} + +func TestDidPanicWrapper(t *testing.T) { + + if funcDidPanic, _ := didPanic(func() { + panic("Panic!") + }); !funcDidPanic { + t.Error("didPanic should return true") + } + + if funcDidPanic, _ := didPanic(func() { + }); funcDidPanic { + t.Error("didPanic should return false") + } + +} + +func TestPanicsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.Panics(func() { + panic("Panic!") + }) { + t.Error("Panics should return true") + } + + if assert.Panics(func() { + }) { + t.Error("Panics should return false") + } + +} + +func TestNotPanicsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.NotPanics(func() { + }) { + t.Error("NotPanics should return true") + } + + if assert.NotPanics(func() { + panic("Panic!") + }) { + t.Error("NotPanics should return false") + } + +} + +func TestEqualWrapper_Funcs(t *testing.T) { + + assert := New(t) + + type f func() int + var f1 f = func() int { return 1 } + var f2 f = func() int { return 2 } + + var f1_copy f = f1 + + assert.Equal(f1_copy, f1, "Funcs are the same and should be considered equal") + assert.NotEqual(f1, f2, "f1 and f2 are different") + +} + +func TestNoErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error = nil + + assert.True(mockAssert.NoError(err), "NoError should return True for nil arg") + + // now set an error + err = errors.New("Some error") + + assert.False(mockAssert.NoError(err), "NoError with error should return False") + +} + +func TestErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error = nil + + assert.False(mockAssert.Error(err), "Error should return False for nil arg") + + // now set an error + err = errors.New("Some error") + + assert.True(mockAssert.Error(err), "Error with error should return True") + +} + +func TestEqualErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + assert.False(mockAssert.EqualError(err, ""), + "EqualError should return false for nil arg") + + // now set an error + err = errors.New("some error") + assert.False(mockAssert.EqualError(err, "Not some error"), + "EqualError should return false for different error string") + assert.True(mockAssert.EqualError(err, "some error"), + "EqualError should return true") +} + +func TestEmptyWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.True(mockAssert.Empty(""), "Empty string is empty") + assert.True(mockAssert.Empty(nil), "Nil is empty") + assert.True(mockAssert.Empty([]string{}), "Empty string array is empty") + assert.True(mockAssert.Empty(0), "Zero int value is empty") + assert.True(mockAssert.Empty(false), "False value is empty") + + assert.False(mockAssert.Empty("something"), "Non Empty string is not empty") + assert.False(mockAssert.Empty(errors.New("something")), "Non nil object is not empty") + assert.False(mockAssert.Empty([]string{"something"}), "Non empty string array is not empty") + assert.False(mockAssert.Empty(1), "Non-zero int value is not empty") + assert.False(mockAssert.Empty(true), "True value is not empty") + +} + +func TestNotEmptyWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.False(mockAssert.NotEmpty(""), "Empty string is empty") + assert.False(mockAssert.NotEmpty(nil), "Nil is empty") + assert.False(mockAssert.NotEmpty([]string{}), "Empty string array is empty") + assert.False(mockAssert.NotEmpty(0), "Zero int value is empty") + assert.False(mockAssert.NotEmpty(false), "False value is empty") + + assert.True(mockAssert.NotEmpty("something"), "Non Empty string is not empty") + assert.True(mockAssert.NotEmpty(errors.New("something")), "Non nil object is not empty") + assert.True(mockAssert.NotEmpty([]string{"something"}), "Non empty string array is not empty") + assert.True(mockAssert.NotEmpty(1), "Non-zero int value is not empty") + assert.True(mockAssert.NotEmpty(true), "True value is not empty") + +} + +func TestLenWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.False(mockAssert.Len(nil, 0), "nil does not have length") + assert.False(mockAssert.Len(0, 0), "int does not have length") + assert.False(mockAssert.Len(true, 0), "true does not have length") + assert.False(mockAssert.Len(false, 0), "false does not have length") + assert.False(mockAssert.Len('A', 0), "Rune does not have length") + assert.False(mockAssert.Len(struct{}{}, 0), "Struct does not have length") + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + + cases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range cases { + assert.True(mockAssert.Len(c.v, c.l), "%#v have %d items", c.v, c.l) + } +} + +func TestWithinDurationWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + a := time.Now() + b := a.Add(10 * time.Second) + + assert.True(mockAssert.WithinDuration(a, b, 10*time.Second), "A 10s difference is within a 10s time difference") + assert.True(mockAssert.WithinDuration(b, a, 10*time.Second), "A 10s difference is within a 10s time difference") + + assert.False(mockAssert.WithinDuration(a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") + + assert.False(mockAssert.WithinDuration(a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") + + assert.False(mockAssert.WithinDuration(a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go b/Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go new file mode 100644 index 00000000..7d4e7b8d --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go @@ -0,0 +1,43 @@ +// Provides a system by which it is possible to mock your objects and verify calls are happening as expected. +// +// Example Usage +// +// The mock package provides an object, Mock, that tracks activity on another object. It is usually +// embedded into a test object as shown below: +// +// type MyTestObject struct { +// // add a Mock object instance +// mock.Mock +// +// // other fields go here as normal +// } +// +// When implementing the methods of an interface, you wire your functions up +// to call the Mock.Called(args...) method, and return the appropriate values. +// +// For example, to mock a method that saves the name and age of a person and returns +// the year of their birth or an error, you might write this: +// +// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { +// args := o.Mock.Called(firstname, lastname, age) +// return args.Int(0), args.Error(1) +// } +// +// The Int, Error and Bool methods are examples of strongly typed getters that take the argument +// index position. Given this argument list: +// +// (12, true, "Something") +// +// You could read them out strongly typed like this: +// +// args.Int(0) +// args.Bool(1) +// args.String(2) +// +// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: +// +// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) +// +// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those +// cases you should check for nil first. +package mock diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go b/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go new file mode 100644 index 00000000..eb003672 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go @@ -0,0 +1,505 @@ +package mock + +import ( + "fmt" + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" + "reflect" + "runtime" + "strings" +) + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Logf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) +} + +/* + Call +*/ + +// Call represents a method call and is used for setting expectations, +// as well as recording activity. +type Call struct { + + // The name of the method that was or will be called. + Method string + + // Holds the arguments of the method. + Arguments Arguments + + // Holds the arguments that should be returned when + // this method is called. + ReturnArguments Arguments + + // The number of times to return the return arguments when setting + // expectations. 0 means to always return the value. + Repeatability int +} + +// Mock is the workhorse used to track activity on another object. +// For an example of its usage, refer to the "Example Usage" section at the top of this document. +type Mock struct { + + // The method name that is currently + // being referred to by the On method. + onMethodName string + + // An array of the arguments that are + // currently being referred to by the On method. + onMethodArguments Arguments + + // Represents the calls that are expected of + // an object. + ExpectedCalls []Call + + // Holds the calls that were made to this mocked object. + Calls []Call + + // TestData holds any data that might be useful for testing. Testify ignores + // this data completely allowing you to do whatever you like with it. + testData objx.Map +} + +// TestData holds any data that might be useful for testing. Testify ignores +// this data completely allowing you to do whatever you like with it. +func (m *Mock) TestData() objx.Map { + + if m.testData == nil { + m.testData = make(objx.Map) + } + + return m.testData +} + +/* + Setting expectations +*/ + +// On starts a description of an expectation of the specified method +// being called. +// +// Mock.On("MyMethod", arg1, arg2) +func (m *Mock) On(methodName string, arguments ...interface{}) *Mock { + m.onMethodName = methodName + m.onMethodArguments = arguments + return m +} + +// Return finishes a description of an expectation of the method (and arguments) +// specified in the most recent On method call. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2) +func (m *Mock) Return(returnArguments ...interface{}) *Mock { + m.ExpectedCalls = append(m.ExpectedCalls, Call{m.onMethodName, m.onMethodArguments, returnArguments, 0}) + return m +} + +// Once indicates that that the mock should only return the value once. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() +func (m *Mock) Once() { + m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = 1 +} + +// Twice indicates that that the mock should only return the value twice. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() +func (m *Mock) Twice() { + m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = 2 +} + +// Times indicates that that the mock should only return the indicated number +// of times. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) +func (m *Mock) Times(i int) { + m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = i +} + +/* + Recording and responding to activity +*/ + +func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { + for i, call := range m.ExpectedCalls { + if call.Method == method && call.Repeatability > -1 { + + _, diffCount := call.Arguments.Diff(arguments) + if diffCount == 0 { + return i, &call + } + + } + } + return -1, nil +} + +func (m *Mock) findClosestCall(method string, arguments ...interface{}) (bool, *Call) { + + diffCount := 0 + var closestCall *Call = nil + + for _, call := range m.ExpectedCalls { + if call.Method == method { + + _, tempDiffCount := call.Arguments.Diff(arguments) + if tempDiffCount < diffCount || diffCount == 0 { + diffCount = tempDiffCount + closestCall = &call + } + + } + } + + if closestCall == nil { + return false, nil + } + + return true, closestCall +} + +func callString(method string, arguments Arguments, includeArgumentValues bool) string { + + var argValsString string = "" + if includeArgumentValues { + var argVals []string + for argIndex, arg := range arguments { + argVals = append(argVals, fmt.Sprintf("%d: %v", argIndex, arg)) + } + argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) + } + + return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) +} + +// Called tells the mock object that a method has been called, and gets an array +// of arguments to return. Panics if the call is unexpected (i.e. not preceeded by +// appropriate .On .Return() calls) +func (m *Mock) Called(arguments ...interface{}) Arguments { + + // get the calling function's name + pc, _, _, ok := runtime.Caller(1) + if !ok { + panic("Couldn't get the caller information") + } + functionPath := runtime.FuncForPC(pc).Name() + parts := strings.Split(functionPath, ".") + functionName := parts[len(parts)-1] + + found, call := m.findExpectedCall(functionName, arguments...) + + switch { + case found < 0: + // we have to fail here - because we don't know what to do + // as the return arguments. This is because: + // + // a) this is a totally unexpected call to this method, + // b) the arguments are not what was expected, or + // c) the developer has forgotten to add an accompanying On...Return pair. + + closestFound, closestCall := m.findClosestCall(functionName, arguments...) + + if closestFound { + panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n", callString(functionName, arguments, true), callString(functionName, closestCall.Arguments, true))) + } else { + panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", functionName, functionName, callString(functionName, arguments, true), assert.CallerInfo())) + } + case call.Repeatability == 1: + call.Repeatability = -1 + m.ExpectedCalls[found] = *call + case call.Repeatability > 1: + call.Repeatability -= 1 + m.ExpectedCalls[found] = *call + } + + // add the call + m.Calls = append(m.Calls, Call{functionName, arguments, make([]interface{}, 0), 0}) + + return call.ReturnArguments + +} + +/* + Assertions +*/ + +// AssertExpectationsForObjects asserts that everything specified with On and Return +// of the specified objects was in fact called as expected. +// +// Calls may have occurred in any order. +func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { + var success bool = true + for _, obj := range testObjects { + mockObj := obj.(Mock) + success = success && mockObj.AssertExpectations(t) + } + return success +} + +// AssertExpectations asserts that everything specified with On and Return was +// in fact called as expected. Calls may have occurred in any order. +func (m *Mock) AssertExpectations(t TestingT) bool { + + var somethingMissing bool = false + var failedExpectations int = 0 + + // iterate through each expectation + for _, expectedCall := range m.ExpectedCalls { + switch { + case !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments): + somethingMissing = true + failedExpectations++ + t.Logf("\u274C\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + case expectedCall.Repeatability > 0: + somethingMissing = true + failedExpectations++ + default: + t.Logf("\u2705\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + } + } + + if somethingMissing { + t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(m.ExpectedCalls)-failedExpectations, len(m.ExpectedCalls), failedExpectations, assert.CallerInfo()) + } + + return !somethingMissing +} + +// AssertNumberOfCalls asserts that the method was called expectedCalls times. +func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { + var actualCalls int = 0 + for _, call := range m.Calls { + if call.Method == methodName { + actualCalls++ + } + } + return assert.Equal(t, actualCalls, expectedCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) +} + +// AssertCalled asserts that the method was called. +func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if !assert.True(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method should have been called with %d argument(s), but was not.", methodName, len(arguments))) { + t.Logf("%s", m.ExpectedCalls) + return false + } + return true +} + +// AssertNotCalled asserts that the method was not called. +func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if !assert.False(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method was called with %d argument(s), but should NOT have been.", methodName, len(arguments))) { + t.Logf("%s", m.ExpectedCalls) + return false + } + return true +} + +func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { + for _, call := range m.Calls { + if call.Method == methodName { + + _, differences := Arguments(expected).Diff(call.Arguments) + + if differences == 0 { + // found the expected call + return true + } + + } + } + // we didn't find the expected call + return false +} + +/* + Arguments +*/ + +// Arguments holds an array of method arguments or return values. +type Arguments []interface{} + +const ( + // The "any" argument. Used in Diff and Assert when + // the argument being tested shouldn't be taken into consideration. + Anything string = "mock.Anything" +) + +// AnythingOfTypeArgument is a string that contains the type of an argument +// for use when type checking. Used in Diff and Assert. +type AnythingOfTypeArgument string + +// AnythingOfType returns an AnythingOfTypeArgument object containing the +// name of the type to check for. Used in Diff and Assert. +// +// For example: +// Assert(t, AnythingOfType("string"), AnythingOfType("int")) +func AnythingOfType(t string) AnythingOfTypeArgument { + return AnythingOfTypeArgument(t) +} + +// Get Returns the argument at the specified index. +func (args Arguments) Get(index int) interface{} { + if index+1 > len(args) { + panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) + } + return args[index] +} + +// Is gets whether the objects match the arguments specified. +func (args Arguments) Is(objects ...interface{}) bool { + for i, obj := range args { + if obj != objects[i] { + return false + } + } + return true +} + +// Diff gets a string describing the differences between the arguments +// and the specified objects. +// +// Returns the diff string and number of differences found. +func (args Arguments) Diff(objects []interface{}) (string, int) { + + var output string = "\n" + var differences int + + var maxArgCount int = len(args) + if len(objects) > maxArgCount { + maxArgCount = len(objects) + } + + for i := 0; i < maxArgCount; i++ { + var actual, expected interface{} + + if len(objects) <= i { + actual = "(Missing)" + } else { + actual = objects[i] + } + + if len(args) <= i { + expected = "(Missing)" + } else { + expected = args[i] + } + + if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { + + // type checking + if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: \u274C type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actual) + } + + } else { + + // normal checking + + if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { + // match + output = fmt.Sprintf("%s\t%d: \u2705 %s == %s\n", output, i, actual, expected) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: \u274C %s != %s\n", output, i, actual, expected) + } + } + + } + + if differences == 0 { + return "No differences.", differences + } + + return output, differences + +} + +// Assert compares the arguments with the specified objects and fails if +// they do not exactly match. +func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { + + // get the differences + diff, diffCount := args.Diff(objects) + + if diffCount == 0 { + return true + } + + // there are differences... report them... + t.Logf(diff) + t.Errorf("%sArguments do not match.", assert.CallerInfo()) + + return false + +} + +// String gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +// +// If no index is provided, String() returns a complete string representation +// of the arguments. +func (args Arguments) String(indexOrNil ...int) string { + + if len(indexOrNil) == 0 { + // normal String() method - return a string representation of the args + var argsStr []string + for _, arg := range args { + argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg))) + } + return strings.Join(argsStr, ",") + } else if len(indexOrNil) == 1 { + // Index has been specified - get the argument at that index + var index int = indexOrNil[0] + var s string + var ok bool + if s, ok = args.Get(index).(string); !ok { + panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) + } + return s + } + + panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) + +} + +// Int gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Int(index int) int { + var s int + var ok bool + if s, ok = args.Get(index).(int); !ok { + panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %s", index, args.Get(index))) + } + return s +} + +// Error gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Error(index int) error { + obj := args.Get(index) + var s error + var ok bool + if obj == nil { + return nil + } + if s, ok = obj.(error); !ok { + panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %s", index, args.Get(index))) + } + return s +} + +// Bool gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Bool(index int) bool { + var s bool + var ok bool + if s, ok = args.Get(index).(bool); !ok { + panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %s", index, args.Get(index))) + } + return s +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go new file mode 100644 index 00000000..353d8104 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go @@ -0,0 +1,669 @@ +package mock + +import ( + "errors" + "github.com/stretchr/testify/assert" + "testing" +) + +/* + Test objects +*/ + +// ExampleInterface represents an example interface. +type ExampleInterface interface { + TheExampleMethod(a, b, c int) (int, error) +} + +// TestExampleImplementation is a test implementation of ExampleInterface +type TestExampleImplementation struct { + Mock +} + +func (i *TestExampleImplementation) TheExampleMethod(a, b, c int) (int, error) { + args := i.Mock.Called(a, b, c) + return args.Int(0), errors.New("Whoops") +} + +func (i *TestExampleImplementation) TheExampleMethod2(yesorno bool) { + i.Mock.Called(yesorno) +} + +type ExampleType struct{} + +func (i *TestExampleImplementation) TheExampleMethod3(et *ExampleType) error { + args := i.Mock.Called(et) + return args.Error(0) +} + +/* + Mock +*/ + +func Test_Mock_TestData(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + if assert.NotNil(t, mockedService.TestData()) { + + mockedService.TestData().Set("something", 123) + assert.Equal(t, 123, mockedService.TestData().Get("something").Data()) + + } + +} + +func Test_Mock_On(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + assert.Equal(t, mockedService.Mock.On("TheExampleMethod"), &mockedService.Mock) + assert.Equal(t, "TheExampleMethod", mockedService.Mock.onMethodName) + +} + +func Test_Mock_On_WithArgs(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + assert.Equal(t, mockedService.Mock.On("TheExampleMethod", 1, 2, 3), &mockedService.Mock) + assert.Equal(t, "TheExampleMethod", mockedService.Mock.onMethodName) + assert.Equal(t, 1, mockedService.Mock.onMethodArguments[0]) + assert.Equal(t, 2, mockedService.Mock.onMethodArguments[1]) + assert.Equal(t, 3, mockedService.Mock.onMethodArguments[2]) + +} + +func Test_Mock_Return(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + assert.Equal(t, mockedService.Mock.On("TheExampleMethod", "A", "B", true).Return(1, "two", true), &mockedService.Mock) + + // ensure the call was created + if assert.Equal(t, 1, len(mockedService.Mock.ExpectedCalls)) { + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 0, call.Repeatability) + + } + +} + +func Test_Mock_Return_Once(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("TheExampleMethod", "A", "B", true).Return(1, "two", true).Once() + + // ensure the call was created + if assert.Equal(t, 1, len(mockedService.Mock.ExpectedCalls)) { + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 1, call.Repeatability) + + } + +} + +func Test_Mock_Return_Twice(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("TheExampleMethod", "A", "B", true).Return(1, "two", true).Twice() + + // ensure the call was created + if assert.Equal(t, 1, len(mockedService.Mock.ExpectedCalls)) { + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 2, call.Repeatability) + + } + +} + +func Test_Mock_Return_Times(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("TheExampleMethod", "A", "B", true).Return(1, "two", true).Times(5) + + // ensure the call was created + if assert.Equal(t, 1, len(mockedService.Mock.ExpectedCalls)) { + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 5, call.Repeatability) + + } + +} + +func Test_Mock_Return_Nothing(t *testing.T) { + + // make a test impl object + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + assert.Equal(t, mockedService.Mock.On("TheExampleMethod", "A", "B", true).Return(), &mockedService.Mock) + + // ensure the call was created + if assert.Equal(t, 1, len(mockedService.Mock.ExpectedCalls)) { + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 0, len(call.ReturnArguments)) + + } + +} + +func Test_Mock_findExpectedCall(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two") + m.On("Two", 3).Return("three") + + f, c := m.findExpectedCall("Two", 3) + + if assert.Equal(t, 2, f) { + if assert.NotNil(t, c) { + assert.Equal(t, "Two", c.Method) + assert.Equal(t, 3, c.Arguments[0]) + assert.Equal(t, "three", c.ReturnArguments[0]) + } + } + +} + +func Test_Mock_findExpectedCall_For_Unknown_Method(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two") + m.On("Two", 3).Return("three") + + f, _ := m.findExpectedCall("Two") + + assert.Equal(t, -1, f) + +} + +func Test_Mock_findExpectedCall_Respects_Repeatability(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two").Once() + m.On("Two", 3).Return("three").Twice() + m.On("Two", 3).Return("three").Times(8) + + f, c := m.findExpectedCall("Two", 3) + + if assert.Equal(t, 2, f) { + if assert.NotNil(t, c) { + assert.Equal(t, "Two", c.Method) + assert.Equal(t, 3, c.Arguments[0]) + assert.Equal(t, "three", c.ReturnArguments[0]) + } + } + +} + +func Test_callString(t *testing.T) { + + assert.Equal(t, `Method(int,bool,string)`, callString("Method", []interface{}{1, true, "something"}, false)) + +} + +func Test_Mock_Called(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("Test_Mock_Called", 1, 2, 3).Return(5, "6", true) + + returnArguments := mockedService.Mock.Called(1, 2, 3) + + if assert.Equal(t, 1, len(mockedService.Mock.Calls)) { + assert.Equal(t, "Test_Mock_Called", mockedService.Mock.Calls[0].Method) + assert.Equal(t, 1, mockedService.Mock.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Mock.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Mock.Calls[0].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments)) { + assert.Equal(t, 5, returnArguments[0]) + assert.Equal(t, "6", returnArguments[1]) + assert.Equal(t, true, returnArguments[2]) + } + +} + +func Test_Mock_Called_For_Bounded_Repeatability(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3).Return(5, "6", true).Once() + mockedService.Mock.On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3).Return(-1, "hi", false) + + returnArguments1 := mockedService.Mock.Called(1, 2, 3) + returnArguments2 := mockedService.Mock.Called(1, 2, 3) + + if assert.Equal(t, 2, len(mockedService.Mock.Calls)) { + assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Mock.Calls[0].Method) + assert.Equal(t, 1, mockedService.Mock.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Mock.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Mock.Calls[0].Arguments[2]) + + assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Mock.Calls[1].Method) + assert.Equal(t, 1, mockedService.Mock.Calls[1].Arguments[0]) + assert.Equal(t, 2, mockedService.Mock.Calls[1].Arguments[1]) + assert.Equal(t, 3, mockedService.Mock.Calls[1].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments1)) { + assert.Equal(t, 5, returnArguments1[0]) + assert.Equal(t, "6", returnArguments1[1]) + assert.Equal(t, true, returnArguments1[2]) + } + + if assert.Equal(t, 3, len(returnArguments2)) { + assert.Equal(t, -1, returnArguments2[0]) + assert.Equal(t, "hi", returnArguments2[1]) + assert.Equal(t, false, returnArguments2[2]) + } + +} + +func Test_Mock_Called_For_SetTime_Expectation(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("TheExampleMethod", 1, 2, 3).Return(5, "6", true).Times(4) + + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + assert.Panics(t, func() { + mockedService.TheExampleMethod(1, 2, 3) + }) + +} + +func Test_Mock_Called_Unexpected(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + // make sure it panics if no expectation was made + assert.Panics(t, func() { + mockedService.Mock.Called(1, 2, 3) + }, "Calling unexpected method should panic") + +} + +func Test_AssertExpectationsForObjects_Helper(t *testing.T) { + + var mockedService1 *TestExampleImplementation = new(TestExampleImplementation) + var mockedService2 *TestExampleImplementation = new(TestExampleImplementation) + var mockedService3 *TestExampleImplementation = new(TestExampleImplementation) + + mockedService1.Mock.On("Test_AssertExpectationsForObjects_Helper", 1).Return() + mockedService2.Mock.On("Test_AssertExpectationsForObjects_Helper", 2).Return() + mockedService3.Mock.On("Test_AssertExpectationsForObjects_Helper", 3).Return() + + mockedService1.Called(1) + mockedService2.Called(2) + mockedService3.Called(3) + + assert.True(t, AssertExpectationsForObjects(t, mockedService1.Mock, mockedService2.Mock, mockedService3.Mock)) + +} + +func Test_AssertExpectationsForObjects_Helper_Failed(t *testing.T) { + + var mockedService1 *TestExampleImplementation = new(TestExampleImplementation) + var mockedService2 *TestExampleImplementation = new(TestExampleImplementation) + var mockedService3 *TestExampleImplementation = new(TestExampleImplementation) + + mockedService1.Mock.On("Test_AssertExpectationsForObjects_Helper_Failed", 1).Return() + mockedService2.Mock.On("Test_AssertExpectationsForObjects_Helper_Failed", 2).Return() + mockedService3.Mock.On("Test_AssertExpectationsForObjects_Helper_Failed", 3).Return() + + mockedService1.Called(1) + mockedService3.Called(3) + + tt := new(testing.T) + assert.False(t, AssertExpectationsForObjects(tt, mockedService1.Mock, mockedService2.Mock, mockedService3.Mock)) + +} + +func Test_Mock_AssertExpectations(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("Test_Mock_AssertExpectations", 1, 2, 3).Return(5, 6, 7) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Mock.Called(1, 2, 3) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectationsCustomType(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")).Return(nil).Once() + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.TheExampleMethod3(&ExampleType{}) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectations_With_Repeatability(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("Test_Mock_AssertExpectations_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Twice() + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Mock.Called(1, 2, 3) + + assert.False(t, mockedService.AssertExpectations(tt)) + + mockedService.Mock.Called(1, 2, 3) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_TwoCallsWithDifferentArguments(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("Test_Mock_TwoCallsWithDifferentArguments", 1, 2, 3).Return(5, 6, 7) + mockedService.Mock.On("Test_Mock_TwoCallsWithDifferentArguments", 4, 5, 6).Return(5, 6, 7) + + args1 := mockedService.Mock.Called(1, 2, 3) + assert.Equal(t, 5, args1.Int(0)) + assert.Equal(t, 6, args1.Int(1)) + assert.Equal(t, 7, args1.Int(2)) + + args2 := mockedService.Mock.Called(4, 5, 6) + assert.Equal(t, 5, args2.Int(0)) + assert.Equal(t, 6, args2.Int(1)) + assert.Equal(t, 7, args2.Int(2)) + +} + +func Test_Mock_AssertNumberOfCalls(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("Test_Mock_AssertNumberOfCalls", 1, 2, 3).Return(5, 6, 7) + + mockedService.Mock.Called(1, 2, 3) + assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 1)) + + mockedService.Mock.Called(1, 2, 3) + assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 2)) + +} + +func Test_Mock_AssertCalled(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("Test_Mock_AssertCalled", 1, 2, 3).Return(5, 6, 7) + + mockedService.Mock.Called(1, 2, 3) + + assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled", 1, 2, 3)) + +} + +func Test_Mock_AssertCalled_WithAnythingOfTypeArgument(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("Test_Mock_AssertCalled_WithAnythingOfTypeArgument", Anything, Anything, Anything).Return() + + mockedService.Mock.Called(1, "two", []uint8("three")) + + assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled_WithAnythingOfTypeArgument", AnythingOfType("int"), AnythingOfType("string"), AnythingOfType("[]uint8"))) + +} + +func Test_Mock_AssertCalled_WithArguments(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("Test_Mock_AssertCalled_WithArguments", 1, 2, 3).Return(5, 6, 7) + + mockedService.Mock.Called(1, 2, 3) + + tt := new(testing.T) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 1, 2, 3)) + assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 2, 3, 4)) + +} + +func Test_Mock_AssertCalled_WithArguments_With_Repeatability(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Once() + mockedService.Mock.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4).Return(5, 6, 7).Once() + + mockedService.Mock.Called(1, 2, 3) + mockedService.Mock.Called(2, 3, 4) + + tt := new(testing.T) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3)) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4)) + assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 3, 4, 5)) + +} + +func Test_Mock_AssertNotCalled(t *testing.T) { + + var mockedService *TestExampleImplementation = new(TestExampleImplementation) + + mockedService.Mock.On("Test_Mock_AssertNotCalled", 1, 2, 3).Return(5, 6, 7) + + mockedService.Mock.Called(1, 2, 3) + + assert.True(t, mockedService.AssertNotCalled(t, "Test_Mock_NotCalled")) + +} + +/* + Arguments helper methods +*/ +func Test_Arguments_Get(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + + assert.Equal(t, "string", args.Get(0).(string)) + assert.Equal(t, 123, args.Get(1).(int)) + assert.Equal(t, true, args.Get(2).(bool)) + +} + +func Test_Arguments_Is(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + + assert.True(t, args.Is("string", 123, true)) + assert.False(t, args.Is("wrong", 456, false)) + +} + +func Test_Arguments_Diff(t *testing.T) { + + var args Arguments = []interface{}{"Hello World", 123, true} + var diff string + var count int + diff, count = args.Diff([]interface{}{"Hello World", 456, "false"}) + + assert.Equal(t, 2, count) + assert.Contains(t, diff, `%!s(int=456) != %!s(int=123)`) + assert.Contains(t, diff, `false != %!s(bool=true)`) + +} + +func Test_Arguments_Diff_DifferentNumberOfArgs(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + var diff string + var count int + diff, count = args.Diff([]interface{}{"string", 456, "false", "extra"}) + + assert.Equal(t, 3, count) + assert.Contains(t, diff, `extra != (Missing)`) + +} + +func Test_Arguments_Diff_WithAnythingArgument(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + var count int + _, count = args.Diff([]interface{}{"string", Anything, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingArgument_InActualToo(t *testing.T) { + + var args Arguments = []interface{}{"string", Anything, true} + var count int + _, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingOfTypeArgument(t *testing.T) { + + var args Arguments = []interface{}{"string", AnythingOfType("int"), true} + var count int + _, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingOfTypeArgument_Failing(t *testing.T) { + + var args Arguments = []interface{}{"string", AnythingOfType("string"), true} + var count int + var diff string + diff, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 1, count) + assert.Contains(t, diff, `string != type int - %!s(int=123)`) + +} + +func Test_Arguments_Assert(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + + assert.True(t, args.Assert(t, "string", 123, true)) + +} + +func Test_Arguments_String_Representation(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + assert.Equal(t, `string,int,bool`, args.String()) + +} + +func Test_Arguments_String(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + assert.Equal(t, "string", args.String(0)) + +} + +func Test_Arguments_Error(t *testing.T) { + + var err error = errors.New("An Error") + var args Arguments = []interface{}{"string", 123, true, err} + assert.Equal(t, err, args.Error(3)) + +} + +func Test_Arguments_Error_Nil(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true, nil} + assert.Equal(t, nil, args.Error(3)) + +} + +func Test_Arguments_Int(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + assert.Equal(t, 123, args.Int(1)) + +} + +func Test_Arguments_Bool(t *testing.T) { + + var args Arguments = []interface{}{"string", 123, true} + assert.Equal(t, true, args.Bool(2)) + +} diff --git a/deploy/prepare.sh b/deploy/prepare.sh index 96e4a8d7..c2251064 100755 --- a/deploy/prepare.sh +++ b/deploy/prepare.sh @@ -4,4 +4,4 @@ set -e set -x # Statically build cAdvisor from source and stage it. -go build --ldflags '-extldflags "-static"' github.com/google/cadvisor +godep go build --ldflags '-extldflags "-static"' github.com/google/cadvisor