Merge pull request #1190 from timstclair/godep

Bump go-dockerclient godep version
This commit is contained in:
Vish Kannan 2016-04-05 15:10:25 -07:00
commit 05a1d7d5ed
316 changed files with 101582 additions and 6600 deletions

54
Godeps/Godeps.json generated
View File

@ -1,6 +1,7 @@
{
"ImportPath": "github.com/google/cadvisor",
"GoVersion": "go1.5.3",
"GodepVersion": "v60",
"Packages": [
"./..."
],
@ -154,68 +155,75 @@
},
{
"ImportPath": "github.com/fsouza/go-dockerclient",
"Comment": "0.2.1-764-g412c004",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools",
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath",
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/go-units",
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp",
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context",
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix",
"Rev": "086d16d0c1428734a75065d2aa64e248093d2790"
},
{
"ImportPath": "github.com/garyburd/redigo/internal",

View File

@ -3,13 +3,26 @@ sudo: required
go:
- 1.3.3
- 1.4.2
- 1.5.1
- 1.5.3
- 1.6
- tip
env:
- GOARCH=amd64
- GOARCH=386
- GOARCH=amd64 DOCKER_VERSION=1.7.1
- GOARCH=386 DOCKER_VERSION=1.7.1
- GOARCH=amd64 DOCKER_VERSION=1.8.3
- GOARCH=386 DOCKER_VERSION=1.8.3
- GOARCH=amd64 DOCKER_VERSION=1.9.1
- GOARCH=386 DOCKER_VERSION=1.9.1
- GOARCH=amd64 DOCKER_VERSION=1.10.3
- GOARCH=386 DOCKER_VERSION=1.10.3
install:
- travis_retry make prepare_docker
script:
- make test
- travis-scripts/run-tests.bash
- DOCKER_HOST=tcp://127.0.0.1:2375 make integration
services:
- docker
matrix:
fast_finish: true
allow_failures:
- go: tip

View File

@ -6,12 +6,18 @@ Adrien Kohlbecker <adrien.kohlbecker@gmail.com>
Aldrin Leal <aldrin@leal.eng.br>
Andreas Jaekle <andreas@jaekle.net>
Andrews Medina <andrewsmedina@gmail.com>
Artem Sidorenko <artem@2realities.com>
Andrey Sibiryov <kobolog@uber.com>
Andy Goldstein <andy.goldstein@redhat.com>
Antonio Murdaca <runcom@redhat.com>
Artem Sidorenko <artem@2realities.com>
Ben Marini <ben@remind101.com>
Ben McCann <benmccann.com>
Ben Parees <bparees@redhat.com>
Benno van den Berg <bennovandenberg@gmail.com>
Bradley Cicenas <bradley.cicenas@gmail.com>
Brendan Fosberry <brendan@codeship.com>
Brian Lalor <blalor@bravo5.org>
Brian P. Hamachek <brian@brianhama.com>
Brian Palmer <brianp@instructure.com>
Bryan Boreham <bjboreham@gmail.com>
Burke Libbey <burke@libbey.me>
@ -22,15 +28,18 @@ Cheah Chu Yeow <chuyeow@gmail.com>
cheneydeng <cheneydeng@qq.com>
Chris Bednarski <banzaimonkey@gmail.com>
CMGS <ilskdw@gmail.com>
Colin Hebert <hebert.colin@gmail.com>
Craig Jellick <craig@rancher.com>
Dan Williams <dcbw@redhat.com>
Daniel, Dao Quang Minh <dqminh89@gmail.com>
Daniel Garcia <daniel@danielgarcia.info>
Daniel Hiltgen <daniel.hiltgen@docker.com>
Darren Shepherd <darren@rancher.com>
Dave Choi <dave.choi@daumkakao.com>
David Huie <dahuie@gmail.com>
Dawn Chen <dawnchen@google.com>
Dinesh Subhraveti <dinesh@gemini-systems.net>
Drew Wells <drew.wells00@gmail.com>
Ed <edrocksit@gmail.com>
Elias G. Schneevoigt <eliasgs@gmail.com>
Erez Horev <erez.horev@elastifile.com>
@ -40,11 +49,15 @@ Fabio Rehm <fgrehm@gmail.com>
Fatih Arslan <ftharsln@gmail.com>
Flavia Missi <flaviamissi@gmail.com>
Francisco Souza <f@souza.cc>
Frank Groeneveld <frank@frankgroeneveld.nl>
George Moura <gwmoura@gmail.com>
Grégoire Delattre <gregoire.delattre@gmail.com>
Guillermo Álvarez Fernández <guillermo@cientifico.net>
Harry Zhang <harryzhang@zju.edu.cn>
He Simei <hesimei@zju.edu.cn>
Ivan Mikushin <i.mikushin@gmail.com>
James Bardin <jbardin@litl.com>
James Nugent <james@jen20.com>
Jari Kolehmainen <jari.kolehmainen@digia.com>
Jason Wilder <jwilder@litl.com>
Jawher Moussa <jawher.moussa@gmail.com>
@ -52,22 +65,31 @@ Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
Jeff Mitchell <jeffrey.mitchell@gmail.com>
Jeffrey Hulten <jhulten@gmail.com>
Jen Andre <jandre@gmail.com>
Jérôme Laurens <jeromelaurens@gmail.com>
Johan Euphrosine <proppy@google.com>
John Hughes <hughesj@visa.com>
Kamil Domanski <kamil@domanski.co>
Karan Misra <kidoman@gmail.com>
Ken Herner <chosenken@gmail.com>
Kim, Hirokuni <hirokuni.kim@kvh.co.jp>
Kyle Allan <kallan357@gmail.com>
Liron Levin <levinlir@gmail.com>
Lior Yankovich <lior@twistlock.com>
Liu Peng <vslene@gmail.com>
Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
Lucas Clemente <lucas@clemente.io>
Lucas Weiblen <lucasweiblen@gmail.com>
Lyon Hill <lyondhill@gmail.com>
Mantas Matelis <mmatelis@coursera.org>
Martin Sweeney <martin@sweeney.io>
Máximo Cuadros Ortiz <mcuadros@gmail.com>
Michael Schmatz <michaelschmatz@gmail.com>
Michal Fojtik <mfojtik@redhat.com>
Mike Dillon <mike.dillon@synctree.com>
Mrunal Patel <mrunalp@gmail.com>
Nate Jones <nate@endot.org>
Nguyen Sy Thanh Son <sonnst@sigma-solutions.eu>
Nicholas Van Wiggeren <nvanwiggeren@digitalocean.com>
Nick Ethier <ncethier@gmail.com>
Omeid Matten <public@omeid.me>
Orivej Desh <orivej@gmx.fr>
@ -81,9 +103,11 @@ Philippe Lafoucrière <philippe.lafoucriere@tech-angels.com>
Rafe Colton <rafael.colton@gmail.com>
Rob Miller <rob@kalistra.com>
Robert Williamson <williamson.robert@gmail.com>
Roman Khlystik <roman.khlystik@gmail.com>
Salvador Gironès <salvadorgirones@gmail.com>
Sam Rijs <srijs@airpost.net>
Sami Wagiaalla <swagiaal@redhat.com>
Samuel Archambault <sarchambault@lapresse.ca>
Samuel Karp <skarp@amazon.com>
Silas Sewell <silas@sewell.org>
Simon Eskildsen <sirup@sirupsen.com>
@ -95,7 +119,9 @@ Summer Mousa <smousa@zenoss.com>
Sunjin Lee <styner32@gmail.com>
Tarsis Azevedo <tarsis@corp.globo.com>
Tim Schindler <tim@catalyst-zero.com>
Timothy St. Clair <tstclair@redhat.com>
Tobi Knaup <tobi@mesosphere.io>
Tom Wilkie <tom.wilkie@gmail.com>
Tonic <tonicbupt@gmail.com>
ttyh061 <ttyh061@gmail.com>
Victor Marmol <vmarmol@google.com>

View File

@ -1,4 +1,4 @@
Copyright (c) 2015, go-dockerclient authors
Copyright (c) 2016, go-dockerclient authors
All rights reserved.
Redistribution and use in source and binary forms, with or without

View File

@ -11,8 +11,7 @@
cov \
clean
SRCS = $(shell git ls-files '*.go' | grep -v '^external/')
PKGS = ./. ./testing
PKGS = . ./testing
all: test
@ -22,23 +21,40 @@ vendor:
lint:
@ go get -v github.com/golang/lint/golint
$(foreach file,$(SRCS),golint $(file) || exit;)
@for file in $$(git ls-files '*.go' | grep -v 'external/'); do \
export output="$$(golint $${file} | grep -v 'type name will be used as docker.DockerInfo')"; \
[ -n "$${output}" ] && echo "$${output}" && export status=1; \
done; \
exit $${status:-0}
vet:
@-go get -v golang.org/x/tools/cmd/vet
$(foreach pkg,$(PKGS),go vet $(pkg);)
fmt:
gofmt -w $(SRCS)
gofmt -s -w $(PKGS)
fmtcheck:
$(foreach file,$(SRCS),gofmt -d $(file);)
@ export output=$$(gofmt -s -d $(PKGS)); \
[ -n "$${output}" ] && echo "$${output}" && export status=1; \
exit $${status:-0}
prepare_docker:
sudo stop docker || true
sudo rm -rf /var/lib/docker
sudo rm -f `which docker`
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
echo "deb https://apt.dockerproject.org/repo ubuntu-trusty main" | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt-get update
sudo apt-get install docker-engine=$(DOCKER_VERSION)-0~$(shell lsb_release -cs) -y --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
pretest: lint vet fmtcheck
test: pretest
gotest:
$(foreach pkg,$(PKGS),go test $(pkg) || exit;)
test: pretest gotest
integration:
go test -tags docker_integration -run TestIntegration -v

View File

@ -1,18 +1,17 @@
# go-dockerclient
[![Drone](https://drone.io/github.com/fsouza/go-dockerclient/status.png)](https://drone.io/github.com/fsouza/go-dockerclient/latest)
[![Travis](https://img.shields.io/travis/fsouza/go-dockerclient.svg?style=flat-square)](https://travis-ci.org/fsouza/go-dockerclient)
[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient)
This package presents a client for the Docker remote API. It also provides
support for the extensions in the [Swarm API](https://docs.docker.com/swarm/api/swarm-api/).
support for the extensions in the [Swarm API](https://docs.docker.com/swarm/swarm-api/).
This package also provides support for docker's network API, which is a simple
passthrough to the libnetwork remote API. Note that docker's network API is
only available in docker 1.8 and above, and only enabled in docker if
DOCKER_EXPERIMENTAL is defined during the docker build process.
For more details, check the [remote API documentation](http://docs.docker.com/en/latest/reference/api/docker_remote_api/).
For more details, check the [remote API documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/).
## Vendoring

View File

@ -82,10 +82,12 @@ func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) {
buf.ReadFrom(r)
byteData := buf.Bytes()
var confsWrapper map[string]map[string]dockerConfig
confsWrapper := struct {
Auths map[string]dockerConfig `json:"auths"`
}{}
if err := json.Unmarshal(byteData, &confsWrapper); err == nil {
if confs, ok := confsWrapper["auths"]; ok {
return confs, nil
if len(confsWrapper.Auths) > 0 {
return confsWrapper.Auths, nil
}
}
@ -106,7 +108,7 @@ func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) {
if err != nil {
return nil, err
}
userpass := strings.Split(string(data), ":")
userpass := strings.SplitN(string(data), ":", 2)
if len(userpass) != 2 {
return nil, ErrCannotParseDockercfg
}

View File

@ -32,6 +32,7 @@ import (
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy"
"github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp"
)
const userAgent = "go-dockerclient"
@ -59,7 +60,8 @@ func NewAPIVersion(input string) (APIVersion, error) {
if !strings.Contains(input, ".") {
return nil, fmt.Errorf("Unable to parse version %q", input)
}
arr := strings.Split(input, ".")
raw := strings.Split(input, "-")
arr := strings.Split(raw[0], ".")
ret := make(APIVersion, len(arr))
var err error
for i, val := range arr {
@ -191,7 +193,7 @@ func NewVersionedClient(endpoint string, apiVersionString string) (*Client, erro
}
}
return &Client{
HTTPClient: &http.Client{},
HTTPClient: cleanhttp.DefaultClient(),
Dialer: &net.Dialer{},
endpoint: endpoint,
endpointURL: u,
@ -294,9 +296,8 @@ func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock,
}
tlsConfig.RootCAs = caPool
}
tr := &http.Transport{
TLSClientConfig: tlsConfig,
}
tr := cleanhttp.DefaultTransport()
tr.TLSClientConfig = tlsConfig
if err != nil {
return nil, err
}
@ -554,32 +555,41 @@ type hijackOptions struct {
data interface{}
}
func (c *Client) hijack(method, path string, hijackOptions hijackOptions) error {
// CloseWaiter is an interface with methods for closing the underlying resource
// and then waiting for it to finish processing.
type CloseWaiter interface {
io.Closer
Wait() error
}
type waiterFunc func() error
func (w waiterFunc) Wait() error { return w() }
type closerFunc func() error
func (c closerFunc) Close() error { return c() }
func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (CloseWaiter, error) {
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
err := c.checkAPIVersion()
if err != nil {
return err
return nil, err
}
}
var params io.Reader
if hijackOptions.data != nil {
buf, err := json.Marshal(hijackOptions.data)
if err != nil {
return err
return nil, err
}
params = bytes.NewBuffer(buf)
}
if hijackOptions.stdout == nil {
hijackOptions.stdout = ioutil.Discard
}
if hijackOptions.stderr == nil {
hijackOptions.stderr = ioutil.Discard
}
req, err := http.NewRequest(method, c.getURL(path), params)
if err != nil {
return err
return nil, err
}
req.Header.Set("Content-Type", "plain/text")
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Connection", "Upgrade")
req.Header.Set("Upgrade", "tcp")
protocol := c.endpointURL.Scheme
@ -592,58 +602,103 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) error
if c.TLSConfig != nil && protocol != "unix" {
dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig)
if err != nil {
return err
return nil, err
}
} else {
dial, err = c.Dialer.Dial(protocol, address)
if err != nil {
return err
return nil, err
}
}
clientconn := httputil.NewClientConn(dial, nil)
defer clientconn.Close()
clientconn.Do(req)
if hijackOptions.success != nil {
hijackOptions.success <- struct{}{}
<-hijackOptions.success
}
rwc, br := clientconn.Hijack()
defer rwc.Close()
errChanOut := make(chan error, 1)
errChanIn := make(chan error, 1)
errs := make(chan error)
quit := make(chan struct{})
go func() {
defer func() {
if hijackOptions.in != nil {
if closer, ok := hijackOptions.in.(io.Closer); ok {
errChanIn <- nil
closer.Close()
}
}
}()
var err error
if hijackOptions.setRawTerminal {
_, err = io.Copy(hijackOptions.stdout, br)
clientconn := httputil.NewClientConn(dial, nil)
defer clientconn.Close()
clientconn.Do(req)
if hijackOptions.success != nil {
hijackOptions.success <- struct{}{}
<-hijackOptions.success
}
rwc, br := clientconn.Hijack()
defer rwc.Close()
errChanOut := make(chan error, 1)
errChanIn := make(chan error, 1)
if hijackOptions.stdout == nil && hijackOptions.stderr == nil {
close(errChanOut)
} else {
_, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br)
// Only copy if hijackOptions.stdout and/or hijackOptions.stderr is actually set.
// Otherwise, if the only stream you care about is stdin, your attach session
// will "hang" until the container terminates, even though you're not reading
// stdout/stderr
if hijackOptions.stdout == nil {
hijackOptions.stdout = ioutil.Discard
}
if hijackOptions.stderr == nil {
hijackOptions.stderr = ioutil.Discard
}
go func() {
defer func() {
if hijackOptions.in != nil {
if closer, ok := hijackOptions.in.(io.Closer); ok {
closer.Close()
}
errChanIn <- nil
}
}()
var err error
if hijackOptions.setRawTerminal {
_, err = io.Copy(hijackOptions.stdout, br)
} else {
_, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br)
}
errChanOut <- err
}()
}
errChanOut <- err
}()
go func() {
var err error
if hijackOptions.in != nil {
_, err = io.Copy(rwc, hijackOptions.in)
go func() {
var err error
if hijackOptions.in != nil {
_, err = io.Copy(rwc, hijackOptions.in)
}
errChanIn <- err
rwc.(interface {
CloseWrite() error
}).CloseWrite()
}()
var errIn error
select {
case errIn = <-errChanIn:
case <-quit:
return
}
var errOut error
select {
case errOut = <-errChanOut:
case <-quit:
return
}
if errIn != nil {
errs <- errIn
} else {
errs <- errOut
}
errChanIn <- err
rwc.(interface {
CloseWrite() error
}).CloseWrite()
}()
errIn := <-errChanIn
errOut := <-errChanOut
if errIn != nil {
return errIn
}
return errOut
return struct {
closerFunc
waiterFunc
}{
closerFunc(func() error { close(quit); return nil }),
waiterFunc(func() error { return <-errs }),
}, nil
}
func (c *Client) getURL(path string) string {
@ -678,13 +733,13 @@ func (c *Client) unixClient() *http.Client {
return c.unixHTTPClient
}
socketPath := c.endpointURL.Path
c.unixHTTPClient = &http.Client{
Transport: &http.Transport{
Dial: func(network, addr string) (net.Conn, error) {
return c.Dialer.Dial("unix", socketPath)
},
tr := &http.Transport{
Dial: func(network, addr string) (net.Conn, error) {
return c.Dialer.Dial("unix", socketPath)
},
}
cleanhttp.SetTransportFinalizer(tr)
c.unixHTTPClient = &http.Client{Transport: tr}
return c.unixHTTPClient
}
@ -783,6 +838,9 @@ func (e *Error) Error() string {
}
func parseEndpoint(endpoint string, tls bool) (*url.URL, error) {
if endpoint != "" && !strings.Contains(endpoint, "://") {
endpoint = "tcp://" + endpoint
}
u, err := url.Parse(endpoint)
if err != nil {
return nil, ErrInvalidEndpoint

View File

@ -52,7 +52,14 @@ type APIContainers struct {
SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty"`
SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty"`
Names []string `json:"Names,omitempty" yaml:"Names,omitempty"`
Labels map[string]string `json:"Labels,omitempty" yaml:"Labels, omitempty"`
Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
Networks NetworkList `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty"`
}
// NetworkList encapsulates a map of networks, as returned by the Docker API in
// ListContainers.
type NetworkList struct {
Networks map[string]ContainerNetwork `json:"Networks" yaml:"Networks,omitempty"`
}
// ListContainers returns a slice of containers matching the given criteria.
@ -125,25 +132,39 @@ type PortBinding struct {
// and its value as found in NetworkSettings should always be nil
type PortMapping map[string]string
// ContainerNetwork represents the networking settings of a container per network.
type ContainerNetwork struct {
MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty"`
GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty"`
IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty"`
IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"`
IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"`
Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty"`
EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty"`
NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty"`
}
// NetworkSettings contains network-related information about a container
type NetworkSettings struct {
IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"`
IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"`
MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty"`
Bridge string `json:"Bridge,omitempty" yaml:"Bridge,omitempty"`
PortMapping map[string]PortMapping `json:"PortMapping,omitempty" yaml:"PortMapping,omitempty"`
Ports map[Port][]PortBinding `json:"Ports,omitempty" yaml:"Ports,omitempty"`
NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty"`
EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty"`
SandboxKey string `json:"SandboxKey,omitempty" yaml:"SandboxKey,omitempty"`
GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty"`
GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty"`
IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty"`
LinkLocalIPv6Address string `json:"LinkLocalIPv6Address,omitempty" yaml:"LinkLocalIPv6Address,omitempty"`
LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen,omitempty" yaml:"LinkLocalIPv6PrefixLen,omitempty"`
SecondaryIPAddresses []string `json:"SecondaryIPAddresses,omitempty" yaml:"SecondaryIPAddresses,omitempty"`
SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses,omitempty" yaml:"SecondaryIPv6Addresses,omitempty"`
Networks map[string]ContainerNetwork `json:"Networks,omitempty" yaml:"Networks,omitempty"`
IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"`
IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"`
MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty"`
Bridge string `json:"Bridge,omitempty" yaml:"Bridge,omitempty"`
PortMapping map[string]PortMapping `json:"PortMapping,omitempty" yaml:"PortMapping,omitempty"`
Ports map[Port][]PortBinding `json:"Ports,omitempty" yaml:"Ports,omitempty"`
NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty"`
EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty"`
SandboxKey string `json:"SandboxKey,omitempty" yaml:"SandboxKey,omitempty"`
GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty"`
GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty"`
IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty"`
LinkLocalIPv6Address string `json:"LinkLocalIPv6Address,omitempty" yaml:"LinkLocalIPv6Address,omitempty"`
LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen,omitempty" yaml:"LinkLocalIPv6PrefixLen,omitempty"`
SecondaryIPAddresses []string `json:"SecondaryIPAddresses,omitempty" yaml:"SecondaryIPAddresses,omitempty"`
SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses,omitempty" yaml:"SecondaryIPv6Addresses,omitempty"`
}
// PortMappingAPI translates the port mappings as contained in NetworkSettings
@ -154,8 +175,8 @@ func (settings *NetworkSettings) PortMappingAPI() []APIPort {
p, _ := parsePort(port.Port())
if len(bindings) == 0 {
mapping = append(mapping, APIPort{
PublicPort: int64(p),
Type: port.Proto(),
PrivatePort: int64(p),
Type: port.Proto(),
})
continue
}
@ -185,36 +206,39 @@ func parsePort(rawPort string) (int, error) {
// Config does not contain the options that are specific to starting a container on a
// given host. Those are contained in HostConfig
type Config struct {
Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"`
Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"`
User string `json:"User,omitempty" yaml:"User,omitempty"`
Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"`
CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"`
ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"`
Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"`
Env []string `json:"Env,omitempty" yaml:"Env,omitempty"`
Cmd []string `json:"Cmd" yaml:"Cmd"`
DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only
Image string `json:"Image,omitempty" yaml:"Image,omitempty"`
Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"`
VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"`
MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"`
NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"`
SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"`
OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"`
Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"`
Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"`
Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"`
User string `json:"User,omitempty" yaml:"User,omitempty"`
Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"`
MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty"`
KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty"`
CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"`
ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"`
StopSignal string `json:"StopSignal,omitempty" yaml:"StopSignal,omitempty"`
Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"`
Env []string `json:"Env,omitempty" yaml:"Env,omitempty"`
Cmd []string `json:"Cmd" yaml:"Cmd"`
DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only
Image string `json:"Image,omitempty" yaml:"Image,omitempty"`
Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"`
VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"`
MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"`
NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"`
SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"`
OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"`
Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"`
Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
}
// Mount represents a mount point in the container.
@ -222,8 +246,10 @@ type Config struct {
// It has been added in the version 1.20 of the Docker API, available since
// Docker 1.8.
type Mount struct {
Name string
Source string
Destination string
Driver string
Mode string
RW bool
}
@ -290,6 +316,34 @@ type Container struct {
AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty"`
}
// UpdateContainerOptions specify parameters to the UpdateContainer function.
//
// See https://goo.gl/Y6fXUy for more details.
type UpdateContainerOptions struct {
BlkioWeight int `json:"BlkioWeight"`
CPUShares int `json:"CpuShares"`
CPUPeriod int `json:"CpuPeriod"`
CPUQuota int `json:"CpuQuota"`
CpusetCpus string `json:"CpusetCpus"`
CpusetMems string `json:"CpusetMems"`
Memory int `json:"Memory"`
MemorySwap int `json:"MemorySwap"`
MemoryReservation int `json:"MemoryReservation"`
KernelMemory int `json:"KernelMemory"`
}
// UpdateContainer updates the container at ID with the options
//
// See https://goo.gl/Y6fXUy for more details.
func (c *Client) UpdateContainer(id string, opts UpdateContainerOptions) error {
resp, err := c.do("POST", fmt.Sprintf("/containers/"+id+"/update"), doOptions{data: opts, forceJSON: true})
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
// RenameContainerOptions specify parameters to the RenameContainer function.
//
// See https://goo.gl/laSOIy for more details.
@ -451,45 +505,71 @@ type Device struct {
CgroupPermissions string `json:"CgroupPermissions,omitempty" yaml:"CgroupPermissions,omitempty"`
}
// BlockWeight represents a relative device weight for an individual device inside
// of a container
//
// See https://goo.gl/FSdP0H for more details.
type BlockWeight struct {
Path string `json:"Path,omitempty"`
Weight string `json:"Weight,omitempty"`
}
// BlockLimit represents a read/write limit in IOPS or Bandwidth for a device
// inside of a container
//
// See https://goo.gl/FSdP0H for more details.
type BlockLimit struct {
Path string `json:"Path,omitempty"`
Rate string `json:"Rate,omitempty"`
}
// HostConfig contains the container options related to starting a container on
// a given host
type HostConfig struct {
Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty"`
CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty"`
CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty"`
GroupAdd []string `json:"GroupAdd,omitempty" yaml:"GroupAdd,omitempty"`
ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty"`
LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty"`
Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"`
PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty"`
Links []string `json:"Links,omitempty" yaml:"Links,omitempty"`
PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty"`
DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.10 and above only
DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty"`
ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty"`
VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty"`
IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty"`
PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty"`
UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty"`
RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"`
Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty"`
LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty"`
ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty"`
SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"`
CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty"`
Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"`
MemorySwappiness int64 `json:"MemorySwappiness,omitempty" yaml:"MemorySwappiness,omitempty"`
OOMKillDisable bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable"`
CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
CPUSetCPUs string `json:"CpusetCpus,omitempty" yaml:"CpusetCpus,omitempty"`
CPUSetMEMs string `json:"CpusetMems,omitempty" yaml:"CpusetMems,omitempty"`
CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty"`
CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty"`
BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight"`
Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"`
Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty"`
CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty"`
CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty"`
GroupAdd []string `json:"GroupAdd,omitempty" yaml:"GroupAdd,omitempty"`
ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty"`
LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty"`
Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"`
PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty"`
Links []string `json:"Links,omitempty" yaml:"Links,omitempty"`
PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty"`
DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.10 and above only
DNSOptions []string `json:"DnsOptions,omitempty" yaml:"DnsOptions,omitempty"`
DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty"`
ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty"`
VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty"`
IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty"`
PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty"`
UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty"`
RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"`
Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty"`
LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty"`
ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty"`
SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"`
CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty"`
Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"`
MemorySwappiness int64 `json:"MemorySwappiness,omitempty" yaml:"MemorySwappiness,omitempty"`
OOMKillDisable bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable"`
CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
CPUSetCPUs string `json:"CpusetCpus,omitempty" yaml:"CpusetCpus,omitempty"`
CPUSetMEMs string `json:"CpusetMems,omitempty" yaml:"CpusetMems,omitempty"`
CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty"`
CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty"`
BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight"`
BlkioWeightDevice []BlockWeight `json:"BlkioWeightDevice,omitempty" yaml:"BlkioWeightDevice"`
BlkioDeviceReadBps []BlockLimit `json:"BlkioDeviceReadBps,omitempty" yaml:"BlkioDeviceReadBps"`
BlkioDeviceReadIOps []BlockLimit `json:"BlkioDeviceReadIOps,omitempty" yaml:"BlkioDeviceReadIOps"`
BlkioDeviceWriteBps []BlockLimit `json:"BlkioDeviceWriteBps,omitempty" yaml:"BlkioDeviceWriteBps"`
BlkioDeviceWriteIOps []BlockLimit `json:"BlkioDeviceWriteIOps,omitempty" yaml:"BlkioDeviceWriteIOps"`
Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"`
VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"`
OomScoreAdj int `json:"OomScoreAdj,omitempty" yaml:"OomScoreAdj,omitempty"`
}
// StartContainer starts a container, returning an error in case of failure.
@ -617,17 +697,9 @@ func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) {
//
// See https://goo.gl/GNmLHb for more details.
type Stats struct {
Read time.Time `json:"read,omitempty" yaml:"read,omitempty"`
Network struct {
RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty"`
RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty"`
RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty"`
TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty"`
TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty"`
RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty"`
TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty"`
TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty"`
} `json:"network,omitempty" yaml:"network,omitempty"`
Read time.Time `json:"read,omitempty" yaml:"read,omitempty"`
Network NetworkStats `json:"network,omitempty" yaml:"network,omitempty"`
Networks map[string]NetworkStats `json:"networks,omitempty" yaml:"networks,omitempty"`
MemoryStats struct {
Stats struct {
TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty"`
@ -659,6 +731,8 @@ type Stats struct {
Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty"`
InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty"`
TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty"`
HierarchicalMemswLimit uint64 `json:"hierarchical_memsw_limit,omitempty" yaml:"hierarchical_memsw_limit,omitempty"`
Swap uint64 `json:"swap,omitempty" yaml:"swap,omitempty"`
} `json:"stats,omitempty" yaml:"stats,omitempty"`
MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty"`
Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty"`
@ -679,6 +753,18 @@ type Stats struct {
PreCPUStats CPUStats `json:"precpu_stats,omitempty"`
}
// NetworkStats is a stats entry for network stats
type NetworkStats struct {
RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty"`
RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty"`
RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty"`
TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty"`
TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty"`
RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty"`
TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty"`
TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty"`
}
// CPUStats is a stats entry for cpu stats
type CPUStats struct {
CPUUsage struct {
@ -953,7 +1039,7 @@ type CommitContainerOptions struct {
Container string
Repository string `qs:"repo"`
Tag string
Message string `qs:"m"`
Message string `qs:"comment"`
Author string
Run *Config `qs:"-"`
}
@ -1018,8 +1104,20 @@ type AttachToContainerOptions struct {
//
// See https://goo.gl/NKpkFk for more details.
func (c *Client) AttachToContainer(opts AttachToContainerOptions) error {
cw, err := c.AttachToContainerNonBlocking(opts)
if err != nil {
return err
}
return cw.Wait()
}
// AttachToContainerNonBlocking attaches to a container, using the given options.
// This function does not block.
//
// See https://goo.gl/NKpkFk for more details.
func (c *Client) AttachToContainerNonBlocking(opts AttachToContainerOptions) (CloseWaiter, error) {
if opts.Container == "" {
return &NoSuchContainer{ID: opts.Container}
return nil, &NoSuchContainer{ID: opts.Container}
}
path := "/containers/" + opts.Container + "/attach?" + queryString(opts)
return c.hijack("POST", path, hijackOptions{

View File

@ -18,12 +18,38 @@ import (
"time"
)
// APIEvents represents an event returned by the API.
// APIEvents represents events coming from the Docker API
// The fields in the Docker API changed in API version 1.22, and
// events for more than images and containers are now fired off.
// To maintain forward and backward compatibility, go-dockerclient
// replicates the event in both the new and old format as faithfully as possible.
//
// For events that only exist in 1.22 in later, `Status` is filled in as
// `"Type:Action"` instead of just `Action` to allow for older clients to
// differentiate and not break if they rely on the pre-1.22 Status types.
//
// The transformEvent method can be consulted for more information about how
// events are translated from new/old API formats
type APIEvents struct {
Status string `json:"Status,omitempty" yaml:"Status,omitempty"`
ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
From string `json:"From,omitempty" yaml:"From,omitempty"`
Time int64 `json:"Time,omitempty" yaml:"Time,omitempty"`
// New API Fields in 1.22
Action string `json:"action,omitempty"`
Type string `json:"type,omitempty"`
Actor APIActor `json:"actor,omitempty"`
// Old API fields for < 1.22
Status string `json:"status,omitempty"`
ID string `json:"id,omitempty"`
From string `json:"from,omitempty"`
// Fields in both
Time int64 `json:"time,omitempty"`
TimeNano int64 `json:"timeNano,omitempty"`
}
// APIActor represents an actor that accomplishes something for an event
type APIActor struct {
ID string `json:"id,omitempty"`
Attributes map[string]string `json:"attributes,omitempty"`
}
type eventMonitoringState struct {
@ -52,6 +78,7 @@ var (
// EOFEvent is sent when the event listener receives an EOF error.
EOFEvent = &APIEvents{
Type: "EOF",
Status: "EOF",
}
)
@ -297,8 +324,47 @@ func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan
if !c.eventMonitor.isEnabled() {
return
}
transformEvent(&event)
eventChan <- &event
}
}(res, conn)
return nil
}
// transformEvent takes an event and determines what version it is from
// then populates both versions of the event
func transformEvent(event *APIEvents) {
// if event version is <= 1.21 there will be no Action and no Type
if event.Action == "" && event.Type == "" {
event.Action = event.Status
event.Actor.ID = event.ID
event.Actor.Attributes = map[string]string{}
switch event.Status {
case "delete", "import", "pull", "push", "tag", "untag":
event.Type = "image"
default:
event.Type = "container"
if event.From != "" {
event.Actor.Attributes["image"] = event.From
}
}
} else {
if event.Status == "" {
if event.Type == "image" || event.Type == "container" {
event.Status = event.Action
} else {
// Because just the Status has been overloaded with different Types
// if an event is not for an image or a container, we prepend the type
// to avoid problems for people relying on actions being only for
// images and containers
event.Status = event.Type + ":" + event.Action
}
}
if event.ID == "" {
event.ID = event.Actor.ID
}
if event.From == "" {
event.From = event.Actor.Attributes["image"]
}
}
}

View File

@ -83,8 +83,24 @@ type StartExecOptions struct {
//
// See https://goo.gl/iQCnto for more details
func (c *Client) StartExec(id string, opts StartExecOptions) error {
cw, err := c.StartExecNonBlocking(id, opts)
if err != nil {
return err
}
if cw != nil {
return cw.Wait()
}
return nil
}
// StartExecNonBlocking starts a previously set up exec instance id. If opts.Detach is
// true, it returns after starting the exec command. Otherwise, it sets up an
// interactive session with the exec command.
//
// See https://goo.gl/iQCnto for more details
func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWaiter, error) {
if id == "" {
return &NoSuchExec{ID: id}
return nil, &NoSuchExec{ID: id}
}
path := fmt.Sprintf("/exec/%s/start", id)
@ -93,12 +109,12 @@ func (c *Client) StartExec(id string, opts StartExecOptions) error {
resp, err := c.do("POST", path, doOptions{data: opts})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchExec{ID: id}
return nil, &NoSuchExec{ID: id}
}
return err
return nil, err
}
defer resp.Body.Close()
return nil
return nil, nil
}
return c.hijack("POST", path, hijackOptions{

View File

@ -1,26 +1,55 @@
# (Unreleased)
# 0.9.0 (Unreleased)
logrus/core: improve performance of text formatter by 40%
logrus/core: expose `LevelHooks` type
* logrus/text_formatter: don't emit empty msg
* logrus/hooks/airbrake: move out of main repository
* logrus/hooks/sentry: move out of main repository
* logrus/hooks/papertrail: move out of main repository
* logrus/hooks/bugsnag: move out of main repository
# 0.8.7
* logrus/core: fix possible race (#216)
* logrus/doc: small typo fixes and doc improvements
# 0.8.6
* hooks/raven: allow passing an initialized client
# 0.8.5
* logrus/core: revert #208
# 0.8.4
* formatter/text: fix data race (#218)
# 0.8.3
* logrus/core: fix entry log level (#208)
* logrus/core: improve performance of text formatter by 40%
* logrus/core: expose `LevelHooks` type
* logrus/core: add support for DragonflyBSD and NetBSD
* formatter/text: print structs more verbosely
# 0.8.2
logrus: fix more Fatal family functions
* logrus: fix more Fatal family functions
# 0.8.1
logrus: fix not exiting on `Fatalf` and `Fatalln`
* logrus: fix not exiting on `Fatalf` and `Fatalln`
# 0.8.0
logrus: defaults to stderr instead of stdout
hooks/sentry: add special field for `*http.Request`
formatter/text: ignore Windows for colors
* logrus: defaults to stderr instead of stdout
* hooks/sentry: add special field for `*http.Request`
* formatter/text: ignore Windows for colors
# 0.7.3
formatter/\*: allow configuration of timestamp layout
* formatter/\*: allow configuration of timestamp layout
# 0.7.2
formatter/text: Add configuration option for time format (#158)
* formatter/text: Add configuration option for time format (#158)

View File

@ -75,17 +75,12 @@ package main
import (
"os"
log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
)
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
// Use the Airbrake hook to report errors that have Error severity or above to
// an exception tracker. You can create custom hooks, see the Hooks section.
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
// Output to stderr instead of stdout, could also be a file.
log.SetOutput(os.Stderr)
@ -182,13 +177,16 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
```go
import (
log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
"log/syslog"
)
func init() {
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
// Use the Airbrake hook to report errors that have Error severity or above to
// an exception tracker. You can create custom hooks, see the Hooks section.
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
@ -198,25 +196,31 @@ func init() {
}
}
```
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
| Hook | Description |
| ----- | ----------- |
| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. |
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. |
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
#### Level logging
@ -272,7 +276,7 @@ init() {
// do something here to set environment depending on an environment variable
// or command-line flag
if Environment == "production" {
log.SetFormatter(&logrus.JSONFormatter{})
log.SetFormatter(&log.JSONFormatter{})
} else {
// The TextFormatter is default, you don't actually have to do this.
log.SetFormatter(&log.TextFormatter{})
@ -294,15 +298,16 @@ The built-in logging formatters are:
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`
* `logrus.JSONFormatter`. Logs fields as JSON.
* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net).
* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events.
```go
logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"})
logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"})
```
Third party logging formatters:
* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
@ -315,7 +320,7 @@ type MyJSONFormatter struct {
log.SetFormatter(new(MyJSONFormatter))
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
// Note this doesn't include Time, Level and Message which are available on
// the Entry. Consult `godoc` on information about those fields or read the
// source of the official loggers.
@ -351,5 +356,10 @@ Log rotation is not provided with Logrus. Log rotation should be done by an
external program (like `logrotate(8)`) that can compress and delete old log
entries. It should not be a feature of the application-level logger.
#### Tools
| Tool | Description |
| ---- | ----------- |
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
[godoc]: https://godoc.org/github.com/Sirupsen/logrus

View File

@ -0,0 +1,26 @@
/*
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
The simplest way to use Logrus is simply the package-level exported logger:
package main
import (
log "github.com/Sirupsen/logrus"
)
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
"number": 1,
"size": 10,
}).Info("A walrus appears")
}
Output:
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
For a full guide visit https://github.com/Sirupsen/logrus
*/
package logrus

View File

@ -8,6 +8,9 @@ import (
"time"
)
// Defines the key when adding errors using WithError.
var ErrorKey = "error"
// An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
@ -53,6 +56,11 @@ func (entry *Entry) String() (string, error) {
return reader.String(), err
}
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
func (entry *Entry) WithError(err error) *Entry {
return entry.WithField(ErrorKey, err)
}
// Add a single field to the Entry.
func (entry *Entry) WithField(key string, value interface{}) *Entry {
return entry.WithFields(Fields{key: value})
@ -70,12 +78,14 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
return &Entry{Logger: entry.Logger, Data: data}
}
func (entry *Entry) log(level Level, msg string) {
// This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) {
entry.Time = time.Now()
entry.Level = level
entry.Message = msg
if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
@ -100,7 +110,7 @@ func (entry *Entry) log(level Level, msg string) {
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
panic(entry)
panic(&entry)
}
}

View File

@ -48,6 +48,11 @@ func AddHook(hook Hook) {
std.Hooks.Add(hook)
}
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
func WithError(err error) *Entry {
return std.WithField(ErrorKey, err)
}
// WithField creates an entry from the standard logger and adds a field to
// it. If you want multiple fields, use `WithFields`.
//

View File

@ -8,7 +8,7 @@ import (
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stdout`. You can also set this to
// file, or leave it default which is `os.Stderr`. You can also set this to
// something more adventorous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
@ -53,7 +53,7 @@ func New() *Logger {
// Adds a field to the log entry, note that you it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// Ff you want multiple fields, use `WithFields`.
// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
return NewEntry(logger).WithField(key, value)
}
@ -64,6 +64,12 @@ func (logger *Logger) WithFields(fields Fields) *Entry {
return NewEntry(logger).WithFields(fields)
}
// Add an error as single field to the log entry. All it does is call
// `WithError` for the given `error`.
func (logger *Logger) WithError(err error) *Entry {
return NewEntry(logger).WithError(err)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debugf(format, args...)

View File

@ -74,7 +74,11 @@ const (
)
// Won't compile if StdLogger can't be realized by a log.Logger
var _ StdLogger = &log.Logger{}
var (
_ StdLogger = &log.Logger{}
_ StdLogger = &Entry{}
_ StdLogger = &Logger{}
)
// StdLogger is what your logrus-enabled library should take, that way
// it'll accept a stdlib logger and a logrus logger. There's no standard

View File

@ -1,20 +0,0 @@
/*
Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
*/
package logrus
import (
"syscall"
)
const ioctlReadTermios = syscall.TIOCGETA
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]uint8
Ispeed uint32
Ospeed uint32
}

View File

@ -3,7 +3,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd
// +build linux darwin freebsd openbsd netbsd dragonfly
package logrus
@ -12,9 +12,9 @@ import (
"unsafe"
)
// IsTerminal returns true if the given file descriptor is a terminal.
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stdout
fd := syscall.Stderr
var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0

View File

@ -1,7 +0,0 @@
package logrus
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

View File

@ -0,0 +1,15 @@
// +build solaris
package logrus
import (
"os"
"github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
return err == nil
}

View File

@ -18,9 +18,9 @@ var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
)
// IsTerminal returns true if the given file descriptor is a terminal.
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stdout
fd := syscall.Stderr
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0

View File

@ -73,17 +73,20 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
if f.TimestampFormat == "" {
f.TimestampFormat = DefaultTimestampFormat
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = DefaultTimestampFormat
}
if isColored {
f.printColored(b, entry, keys)
f.printColored(b, entry, keys, timestampFormat)
} else {
if !f.DisableTimestamp {
f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat))
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
}
f.appendKeyValue(b, "level", entry.Level.String())
f.appendKeyValue(b, "msg", entry.Message)
if entry.Message != "" {
f.appendKeyValue(b, "msg", entry.Message)
}
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
}
@ -93,7 +96,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
return b.Bytes(), nil
}
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) {
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
var levelColor int
switch entry.Level {
case DebugLevel:
@ -111,11 +114,11 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
} else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message)
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
}
}

View File

@ -4,18 +4,22 @@ import (
"bufio"
"fmt"
"os"
"regexp"
"strings"
)
var (
// EnvironmentVariableRegexp A regexp to validate correct environment variables
// Environment variables set by the user must have a name consisting solely of
// alphabetics, numerics, and underscores - the first of which must not be numeric.
EnvironmentVariableRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$")
)
// ParseEnvFile Read in a line delimited file with environment variables enumerated
// ParseEnvFile reads a file with environment variables enumerated by lines
//
// ``Environment variable names used by the utilities in the Shell and
// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase
// letters, digits, and the '_' (underscore) from the characters defined in
// Portable Character Set and do not begin with a digit. *But*, other
// characters may be permitted by an implementation; applications shall
// tolerate the presence of such names.''
// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html
//
// As of #16585, it's up to application inside docker to validate or not
// environment variables, that's why we just strip leading whitespace and
// nothing more.
func ParseEnvFile(filename string) ([]string, error) {
fh, err := os.Open(filename)
if err != nil {
@ -26,17 +30,18 @@ func ParseEnvFile(filename string) ([]string, error) {
lines := []string{}
scanner := bufio.NewScanner(fh)
for scanner.Scan() {
line := scanner.Text()
// trim the line from all leading whitespace first
line := strings.TrimLeft(scanner.Text(), whiteSpaces)
// line is not empty, and not starting with '#'
if len(line) > 0 && !strings.HasPrefix(line, "#") {
data := strings.SplitN(line, "=", 2)
// trim the front of a variable, but nothing else
variable := strings.TrimLeft(data[0], whiteSpaces)
if !EnvironmentVariableRegexp.MatchString(variable) {
return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", variable)}
if strings.ContainsAny(variable, whiteSpaces) {
return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)}
}
if len(data) > 1 {
// pass the value through, no trimming

View File

@ -0,0 +1,146 @@
package opts
import (
"fmt"
"net"
"net/url"
"runtime"
"strconv"
"strings"
)
var (
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
// TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter
// is not supplied. A better longer term solution would be to use a named
// pipe as the default on the Windows daemon.
// These are the IANA registered port numbers for use with Docker
// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
DefaultHTTPPort = 2375 // Default HTTP Port
// DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
// DefaultUnixSocket Path for the unix socket.
// Docker daemon by default always listens on the default unix socket
DefaultUnixSocket = "/var/run/docker.sock"
// DefaultTCPHost constant defines the default host string used by docker on Windows
DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
// DefaultTLSHost constant defines the default host string used by docker for TLS sockets
DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
)
// ValidateHost validates that the specified string is a valid host and returns it.
func ValidateHost(val string) (string, error) {
_, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val)
if err != nil {
return val, err
}
// Note: unlike most flag validators, we don't return the mutated value here
// we need to know what the user entered later (using ParseHost) to adjust for tls
return val, nil
}
// ParseHost and set defaults for a Daemon host string
func ParseHost(defaultHost, val string) (string, error) {
host, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val)
if err != nil {
return val, err
}
return host, nil
}
// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr
// defaultUnixAddr must be a absolute file path (no `unix://` prefix)
// defaultTCPAddr must be the full `tcp://host:port` form
func parseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defaultAddr, addr string) (string, error) {
addr = strings.TrimSpace(addr)
if addr == "" {
if defaultAddr == defaultTLSHost {
return defaultTLSHost, nil
}
if runtime.GOOS != "windows" {
return fmt.Sprintf("unix://%s", defaultUnixAddr), nil
}
return defaultTCPAddr, nil
}
addrParts := strings.Split(addr, "://")
if len(addrParts) == 1 {
addrParts = []string{"tcp", addrParts[0]}
}
switch addrParts[0] {
case "tcp":
return parseTCPAddr(addrParts[1], defaultTCPAddr)
case "unix":
return parseUnixAddr(addrParts[1], defaultUnixAddr)
case "fd":
return addr, nil
default:
return "", fmt.Errorf("Invalid bind address format: %s", addr)
}
}
// parseUnixAddr parses and validates that the specified address is a valid UNIX
// socket address. It returns a formatted UNIX socket address, either using the
// address parsed from addr, or the contents of defaultAddr if addr is a blank
// string.
func parseUnixAddr(addr string, defaultAddr string) (string, error) {
addr = strings.TrimPrefix(addr, "unix://")
if strings.Contains(addr, "://") {
return "", fmt.Errorf("Invalid proto, expected unix: %s", addr)
}
if addr == "" {
addr = defaultAddr
}
return fmt.Sprintf("unix://%s", addr), nil
}
// parseTCPAddr parses and validates that the specified address is a valid TCP
// address. It returns a formatted TCP address, either using the address parsed
// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
// tryAddr is expected to have already been Trim()'d
// defaultAddr must be in the full `tcp://host:port` form
func parseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
if tryAddr == "" || tryAddr == "tcp://" {
return defaultAddr, nil
}
addr := strings.TrimPrefix(tryAddr, "tcp://")
if strings.Contains(addr, "://") || addr == "" {
return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
}
defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
if err != nil {
return "", err
}
// url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
// not 1.4. See https://github.com/golang/go/issues/12200 and
// https://github.com/golang/go/issues/6530.
if strings.HasSuffix(addr, "]:") {
addr += defaultPort
}
u, err := url.Parse("tcp://" + addr)
if err != nil {
return "", err
}
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
}
if host == "" {
host = defaultHost
}
if port == "" {
port = defaultPort
}
p, err := strconv.Atoi(port)
if err != nil && p == 0 {
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
}
return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
}

View File

@ -4,4 +4,5 @@ package opts
import "fmt"
// DefaultHost constant defines the default host string used by docker on other hosts than Windows
var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)

View File

@ -2,6 +2,5 @@
package opts
import "fmt"
var DefaultHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
// DefaultHost constant defines the default host string used by docker on Windows
var DefaultHost = DefaultTCPHost

View File

@ -5,20 +5,25 @@ import (
"net"
)
// IpOpt type that hold an IP
type IpOpt struct {
// IPOpt holds an IP. It is used to store values from CLI flags.
type IPOpt struct {
*net.IP
}
func NewIpOpt(ref *net.IP, defaultVal string) *IpOpt {
o := &IpOpt{
// NewIPOpt creates a new IPOpt from a reference net.IP and a
// string representation of an IP. If the string is not a valid
// IP it will fallback to the specified reference.
func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
o := &IPOpt{
IP: ref,
}
o.Set(defaultVal)
return o
}
func (o *IpOpt) Set(val string) error {
// Set sets an IPv4 or IPv6 address from a given string. If the given
// string is not parseable as an IP address it returns an error.
func (o *IPOpt) Set(val string) error {
ip := net.ParseIP(val)
if ip == nil {
return fmt.Errorf("%s is not an ip address", val)
@ -27,7 +32,9 @@ func (o *IpOpt) Set(val string) error {
return nil
}
func (o *IpOpt) String() string {
// String returns the IP address stored in the IPOpt. If stored IP is a
// nil pointer, it returns an empty string.
func (o *IPOpt) String() string {
if *o.IP == nil {
return ""
}

View File

@ -4,41 +4,28 @@ import (
"fmt"
"net"
"os"
"path"
"regexp"
"strings"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume"
)
var (
alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080
DefaultHTTPHost = "127.0.0.1"
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker -d -H tcp://
// TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter
// is not supplied. A better longer term solution would be to use a named
// pipe as the default on the Windows daemon.
DefaultHTTPPort = 2375 // Default HTTP Port
// DefaultUnixSocket Path for the unix socket.
// Docker daemon by default always listens on the default unix socket
DefaultUnixSocket = "/var/run/docker.sock"
)
// ListOpts type that hold a list of values and a validation function.
// ListOpts holds a list of values and a validation function.
type ListOpts struct {
values *[]string
validator ValidatorFctType
}
// NewListOpts Create a new ListOpts with the specified validator.
// NewListOpts creates a new ListOpts with the specified validator.
func NewListOpts(validator ValidatorFctType) ListOpts {
var values []string
return *NewListOptsRef(&values, validator)
}
// NewListOptsRef creates a new ListOpts with the specified values and validator.
func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
return &ListOpts{
values: values,
@ -64,7 +51,7 @@ func (opts *ListOpts) Set(value string) error {
return nil
}
// Delete remove the given element from the slice.
// Delete removes the specified element from the slice.
func (opts *ListOpts) Delete(key string) {
for i, k := range *opts.values {
if k == key {
@ -76,7 +63,6 @@ func (opts *ListOpts) Delete(key string) {
// GetMap returns the content of values in a map in order to avoid
// duplicates.
// FIXME: can we remove this?
func (opts *ListOpts) GetMap() map[string]struct{} {
ret := make(map[string]struct{})
for _, k := range *opts.values {
@ -85,13 +71,22 @@ func (opts *ListOpts) GetMap() map[string]struct{} {
return ret
}
// GetAll returns the values' slice.
// FIXME: Can we remove this?
// GetAll returns the values of slice.
func (opts *ListOpts) GetAll() []string {
return (*opts.values)
}
// Get checks the existence of the given key.
// GetAllOrEmpty returns the values of the slice
// or an empty slice when there are no values.
func (opts *ListOpts) GetAllOrEmpty() []string {
v := *opts.values
if v == nil {
return make([]string, 0)
}
return v
}
// Get checks the existence of the specified key.
func (opts *ListOpts) Get(key string) bool {
for _, k := range *opts.values {
if k == key {
@ -106,7 +101,7 @@ func (opts *ListOpts) Len() int {
return len((*opts.values))
}
//MapOpts type that holds a map of values and a validation function.
//MapOpts holds a map of values and a validation function.
type MapOpts struct {
values map[string]string
validator ValidatorFctType
@ -131,10 +126,16 @@ func (opts *MapOpts) Set(value string) error {
return nil
}
// GetAll returns the values of MapOpts as a map.
func (opts *MapOpts) GetAll() map[string]string {
return opts.values
}
func (opts *MapOpts) String() string {
return fmt.Sprintf("%v", map[string]string((opts.values)))
}
// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
if values == nil {
values = make(map[string]string)
@ -145,13 +146,13 @@ func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
}
}
// ValidatorFctType validator that return a validate string and/or an error
// ValidatorFctType defines a validator function that returns a validated string and/or an error.
type ValidatorFctType func(val string) (string, error)
// ValidatorFctListType validator that return a validate list of string and/or an error
// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
type ValidatorFctListType func(val string) ([]string, error)
// ValidateAttach Validates that the specified string is a valid attach option.
// ValidateAttach validates that the specified string is a valid attach option.
func ValidateAttach(val string) (string, error) {
s := strings.ToLower(val)
for _, str := range []string{"stdin", "stdout", "stderr"} {
@ -162,87 +163,24 @@ func ValidateAttach(val string) (string, error) {
return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR")
}
// ValidateLink Validates that the specified string has a valid link format (containerName:alias).
func ValidateLink(val string) (string, error) {
if _, _, err := parsers.ParseLink(val); err != nil {
return val, err
}
return val, nil
}
// ValidateDevice Validate a path for devices
// It will make sure 'val' is in the form:
// [host-dir:]container-path[:mode]
func ValidateDevice(val string) (string, error) {
return validatePath(val, false)
}
// ValidatePath Validate a path for volumes
// It will make sure 'val' is in the form:
// [host-dir:]container-path[:rw|ro]
// It will also validate the mount mode.
func ValidatePath(val string) (string, error) {
return validatePath(val, true)
}
func validatePath(val string, validateMountMode bool) (string, error) {
var containerPath string
var mode string
if strings.Count(val, ":") > 2 {
return val, fmt.Errorf("bad format for volumes: %s", val)
}
splited := strings.SplitN(val, ":", 3)
if splited[0] == "" {
return val, fmt.Errorf("bad format for volumes: %s", val)
}
switch len(splited) {
case 1:
containerPath = splited[0]
val = path.Clean(containerPath)
case 2:
if isValid, _ := volume.ValidateMountMode(splited[1]); validateMountMode && isValid {
containerPath = splited[0]
mode = splited[1]
val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode)
} else {
containerPath = splited[1]
val = fmt.Sprintf("%s:%s", splited[0], path.Clean(containerPath))
}
case 3:
containerPath = splited[1]
mode = splited[2]
if isValid, _ := volume.ValidateMountMode(splited[2]); validateMountMode && !isValid {
return val, fmt.Errorf("bad mount mode specified : %s", mode)
}
val = fmt.Sprintf("%s:%s:%s", splited[0], containerPath, mode)
}
if !path.IsAbs(containerPath) {
return val, fmt.Errorf("%s is not an absolute path", containerPath)
}
return val, nil
}
// ValidateEnv Validate an environment variable and returns it
// It will use EnvironmentVariableRegexp to ensure the name of the environment variable is valid.
// ValidateEnv validates an environment variable and returns it.
// If no value is specified, it returns the current value using os.Getenv.
//
// As on ParseEnvFile and related to #16585, environment variable names
// are not validate what so ever, it's up to application inside docker
// to validate them or not.
func ValidateEnv(val string) (string, error) {
arr := strings.Split(val, "=")
if len(arr) > 1 {
return val, nil
}
if !EnvironmentVariableRegexp.MatchString(arr[0]) {
return val, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", val)}
}
if !doesEnvExist(val) {
return val, nil
}
return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
}
// ValidateIPAddress Validates an Ip address
// ValidateIPAddress validates an Ip address.
func ValidateIPAddress(val string) (string, error) {
var ip = net.ParseIP(strings.TrimSpace(val))
if ip != nil {
@ -251,7 +189,7 @@ func ValidateIPAddress(val string) (string, error) {
return "", fmt.Errorf("%s is not an ip address", val)
}
// ValidateMACAddress Validates a MAC address
// ValidateMACAddress validates a MAC address.
func ValidateMACAddress(val string) (string, error) {
_, err := net.ParseMAC(strings.TrimSpace(val))
if err != nil {
@ -260,8 +198,8 @@ func ValidateMACAddress(val string) (string, error) {
return val, nil
}
// ValidateDNSSearch Validates domain for resolvconf search configuration.
// A zero length domain is represented by .
// ValidateDNSSearch validates domain for resolvconf search configuration.
// A zero length domain is represented by a dot (.).
func ValidateDNSSearch(val string) (string, error) {
if val = strings.Trim(val, " "); val == "." {
return val, nil
@ -280,8 +218,8 @@ func validateDomain(val string) (string, error) {
return "", fmt.Errorf("%s is not a valid domain", val)
}
// ValidateExtraHost Validate that the given string is a valid extrahost and returns it
// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6)
// ValidateExtraHost validates that the specified string is a valid extrahost and returns it.
// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6).
func ValidateExtraHost(val string) (string, error) {
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
arr := strings.SplitN(val, ":", 2)
@ -294,8 +232,8 @@ func ValidateExtraHost(val string) (string, error) {
return val, nil
}
// ValidateLabel Validate that the given string is a valid label, and returns it
// Labels are in the form on key=value
// ValidateLabel validates that the specified string is a valid label, and returns it.
// Labels are in the form on key=value.
func ValidateLabel(val string) (string, error) {
if strings.Count(val, "=") < 1 {
return "", fmt.Errorf("bad attribute format: %s", val)
@ -303,15 +241,6 @@ func ValidateLabel(val string) (string, error) {
return val, nil
}
// ValidateHost Validate that the given string is a valid host and returns it
func ValidateHost(val string) (string, error) {
host, err := parsers.ParseHost(DefaultHTTPHost, DefaultUnixSocket, val)
if err != nil {
return val, err
}
return host, nil
}
func doesEnvExist(name string) bool {
for _, entry := range os.Environ() {
parts := strings.SplitN(entry, "=", 2)

View File

@ -0,0 +1,6 @@
// +build !windows
package opts
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
const DefaultHTTPHost = "localhost"

View File

@ -0,0 +1,56 @@
package opts
// TODO Windows. Identify bug in GOLang 1.5.1 and/or Windows Server 2016 TP4.
// @jhowardmsft, @swernli.
//
// On Windows, this mitigates a problem with the default options of running
// a docker client against a local docker daemon on TP4.
//
// What was found that if the default host is "localhost", even if the client
// (and daemon as this is local) is not physically on a network, and the DNS
// cache is flushed (ipconfig /flushdns), then the client will pause for
// exactly one second when connecting to the daemon for calls. For example
// using docker run windowsservercore cmd, the CLI will send a create followed
// by an attach. You see the delay between the attach finishing and the attach
// being seen by the daemon.
//
// Here's some daemon debug logs with additional debug spew put in. The
// AfterWriteJSON log is the very last thing the daemon does as part of the
// create call. The POST /attach is the second CLI call. Notice the second
// time gap.
//
// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create"
// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2"
// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate"
// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON"
// ... 1 second gap here....
// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach"
// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
//
// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows TP4. In theory,
// the Windows networking stack is supposed to resolve "localhost" internally,
// without hitting DNS, or even reading the hosts file (which is why localhost
// is commented out in the hosts file on Windows).
//
// We have validated that working around this using the actual IPv4 localhost
// address does not cause the delay.
//
// This does not occur with the docker client built with 1.4.3 on the same
// Windows TP4 build, regardless of whether the daemon is built using 1.5.1
// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
// on a cross-compiled Windows binary (from Linux).
//
// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
// to the delay in TP4 if a user were to do 'docker run -H=tcp://localhost:2375...'
// explicitly.
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
const DefaultHTTPHost = "127.0.0.1"

View File

@ -1,47 +0,0 @@
package opts
import (
"fmt"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit"
)
type UlimitOpt struct {
values *map[string]*ulimit.Ulimit
}
func NewUlimitOpt(ref *map[string]*ulimit.Ulimit) *UlimitOpt {
if ref == nil {
ref = &map[string]*ulimit.Ulimit{}
}
return &UlimitOpt{ref}
}
func (o *UlimitOpt) Set(val string) error {
l, err := ulimit.Parse(val)
if err != nil {
return err
}
(*o.values)[l.Name] = l
return nil
}
func (o *UlimitOpt) String() string {
var out []string
for _, v := range *o.values {
out = append(out, v.String())
}
return fmt.Sprintf("%v", out)
}
func (o *UlimitOpt) GetList() []*ulimit.Ulimit {
var ulimits []*ulimit.Ulimit
for _, v := range *o.values {
ulimits = append(ulimits, v)
}
return ulimits
}

View File

@ -19,35 +19,50 @@ import (
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
type (
Archive io.ReadCloser
ArchiveReader io.Reader
Compression int
// Archive is a type of io.ReadCloser which has two interfaces Read and Closer.
Archive io.ReadCloser
// Reader is a type of io.Reader.
Reader io.Reader
// Compression is the state represents if compressed or not.
Compression int
// TarChownOptions wraps the chown options UID and GID.
TarChownOptions struct {
UID, GID int
}
// TarOptions wraps the tar options.
TarOptions struct {
IncludeFiles []string
ExcludePatterns []string
Compression Compression
NoLchown bool
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
ChownOpts *TarChownOptions
Name string
IncludeSourceDir bool
// When unpacking, specifies whether overwriting a directory with a
// non-directory is allowed and vice versa.
NoOverwriteDirNonDir bool
// For each include when creating an archive, the included name will be
// replaced with the matching name from this map.
RebaseNames map[string]string
}
// Archiver allows the reuse of most utility functions of this package
// with a pluggable Untar function.
// with a pluggable Untar function. Also, to facilitate the passing of
// specific id mappings for untar, an archiver can be created with maps
// which will then be passed to Untar operations
Archiver struct {
Untar func(io.Reader, string, *TarOptions) error
Untar func(io.Reader, string, *TarOptions) error
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
}
// breakoutError is used to differentiate errors related to breaking out
@ -57,17 +72,29 @@ type (
)
var (
// ErrNotImplemented is the error message of function not implemented.
ErrNotImplemented = errors.New("Function not implemented")
defaultArchiver = &Archiver{Untar}
defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil}
)
const (
// HeaderSize is the size in bytes of a tar header
HeaderSize = 512
)
const (
// Uncompressed represents the uncompressed.
Uncompressed Compression = iota
// Bzip2 is bzip2 compression algorithm.
Bzip2
// Gzip is gzip compression algorithm.
Gzip
// Xz is xz compression algorithm.
Xz
)
// IsArchive checks for the magic bytes of a tar or any supported compression
// algorithm.
func IsArchive(header []byte) bool {
compression := DetectCompression(header)
if compression != Uncompressed {
@ -78,6 +105,24 @@ func IsArchive(header []byte) bool {
return err == nil
}
// IsArchivePath checks if the (possibly compressed) file at the given path
// starts with a tar file header.
func IsArchivePath(path string) bool {
file, err := os.Open(path)
if err != nil {
return false
}
defer file.Close()
rdr, err := DecompressStream(file)
if err != nil {
return false
}
r := tar.NewReader(rdr)
_, err = r.Next()
return err == nil
}
// DetectCompression detects the compression algorithm of the source.
func DetectCompression(source []byte) Compression {
for compression, m := range map[Compression][]byte{
Bzip2: {0x42, 0x5A, 0x68},
@ -95,17 +140,24 @@ func DetectCompression(source []byte) Compression {
return Uncompressed
}
func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) {
args := []string{"xz", "-d", "-c", "-q"}
return CmdStream(exec.Command(args[0], args[1:]...), archive)
return cmdStream(exec.Command(args[0], args[1:]...), archive)
}
// DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive.
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
p := pools.BufioReader32KPool
buf := p.Get(archive)
bs, err := buf.Peek(10)
if err != nil {
if err != nil && err != io.EOF {
// Note: we'll ignore any io.EOF error because there are some odd
// cases where the layer.tar file will be empty (zero bytes) and
// that results in an io.EOF from the Peek() call. So, in those
// cases we'll just treat it as a non-compressed stream and
// that means just create an empty layer.
// See Issue 18170
return nil, err
}
@ -126,17 +178,21 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
return readBufWrapper, nil
case Xz:
xzReader, err := xzDecompress(buf)
xzReader, chdone, err := xzDecompress(buf)
if err != nil {
return nil, err
}
readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
return readBufWrapper, nil
return ioutils.NewReadCloserWrapper(readBufWrapper, func() error {
<-chdone
return readBufWrapper.Close()
}), nil
default:
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
}
}
// CompressStream compresses the dest with specified compression algorithm.
func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
p := pools.BufioWriter32KPool
buf := p.Get(dest)
@ -157,6 +213,7 @@ func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteClose
}
}
// Extension returns the extension of a file that uses the specified compression algorithm.
func (compression *Compression) Extension() string {
switch *compression {
case Uncompressed:
@ -177,6 +234,8 @@ type tarAppender struct {
// for hardlink mapping
SeenFiles map[uint64]string
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
}
// canonicalTarName provides a platform-independent and consistent posix-style
@ -219,14 +278,14 @@ func (ta *tarAppender) addTarFile(path, name string) error {
}
hdr.Name = name
nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
if err != nil {
return err
}
// if it's a regular file and has more than 1 link,
// if it's not a directory and has more than 1 link,
// it's hardlinked, so set the type flag accordingly
if fi.Mode().IsRegular() && nlink > 1 {
if !fi.IsDir() && hasHardlinks(fi) {
// a link should have a name that it links too
// and that linked name should be first in the tar archive
if oldpath, ok := ta.SeenFiles[inode]; ok {
@ -244,6 +303,26 @@ func (ta *tarAppender) addTarFile(path, name string) error {
hdr.Xattrs["security.capability"] = string(capability)
}
//handle re-mapping container ID mappings back to host ID mappings before
//writing tar headers/files. We skip whiteout files because they were written
//by the kernel and already have proper ownership relative to the host
if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) {
uid, gid, err := getFileUIDGID(fi.Sys())
if err != nil {
return err
}
xUID, err := idtools.ToContainer(uid, ta.UIDMaps)
if err != nil {
return err
}
xGID, err := idtools.ToContainer(gid, ta.GIDMaps)
if err != nil {
return err
}
hdr.Uid = xUID
hdr.Gid = xGID
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
return err
}
@ -358,19 +437,25 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
return err
}
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
// syscall.UtimesNano doesn't support a NOFOLLOW flag atm
aTime := hdr.AccessTime
if aTime.Before(hdr.ModTime) {
// Last access time should never be before last modified time.
aTime = hdr.ModTime
}
// system.Chtimes doesn't support a NOFOLLOW flag atm
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
return err
}
} else {
ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
return err
}
@ -388,6 +473,10 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) {
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
// Fix the source path to work with long path names. This is a no-op
// on platforms other than Windows.
srcPath = fixVolumePathPrefix(srcPath)
patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)
if err != nil {
@ -406,6 +495,8 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
TarWriter: tar.NewWriter(compressWriter),
Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string),
UIDMaps: options.UIDMaps,
GIDMaps: options.GIDMaps,
}
defer func() {
@ -454,11 +545,10 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
seen := make(map[string]bool)
var renamedRelFilePath string // For when tar.Options.Name is set
for _, include := range options.IncludeFiles {
// We can't use filepath.Join(srcPath, include) because this will
// clean away a trailing "." or "/" which may be important.
walkRoot := strings.Join([]string{srcPath, include}, string(filepath.Separator))
rebaseName := options.RebaseNames[include]
walkRoot := getWalkRoot(srcPath, include)
filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
if err != nil {
logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
@ -503,14 +593,17 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
}
seen[relFilePath] = true
// TODO Windows: Verify if this needs to be os.Pathseparator
// Rename the base resource
if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) {
renamedRelFilePath = relFilePath
}
// Set this to make sure the items underneath also get renamed
if options.Name != "" {
relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1)
// Rename the base resource.
if rebaseName != "" {
var replacement string
if rebaseName != string(filepath.Separator) {
// Special case the root directory to replace with an
// empty string instead so that we don't end up with
// double slashes in the paths.
replacement = rebaseName
}
relFilePath = strings.Replace(relFilePath, include, replacement, 1)
}
if err := ta.addTarFile(filePath, relFilePath); err != nil {
@ -524,12 +617,17 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
return pipeReader, nil
}
// Unpack unpacks the decompressedArchive to dest with options.
func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
tr := tar.NewReader(decompressedArchive)
trBuf := pools.BufioReader32KPool.Get(nil)
defer pools.BufioReader32KPool.Put(trBuf)
var dirs []*tar.Header
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return err
}
// Iterate through the files in the archive.
loop:
@ -607,6 +705,28 @@ loop:
}
trBuf.Reset(tr)
// if the options contain a uid & gid maps, convert header uid/gid
// entries using the maps such that lchown sets the proper mapped
// uid/gid after writing the file. We only perform this mapping if
// the file isn't already owned by the remapped root UID or GID, as
// that specific uid/gid has no mapping from container -> host, and
// those files already have the proper ownership for inside the
// container.
if hdr.Uid != remappedRootUID {
xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps)
if err != nil {
return err
}
hdr.Uid = xUID
}
if hdr.Gid != remappedRootGID {
xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps)
if err != nil {
return err
}
hdr.Gid = xGID
}
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
return err
}
@ -620,8 +740,8 @@ loop:
for _, hdr := range dirs {
path := filepath.Join(dest, hdr.Name)
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
if err := syscall.UtimesNano(path, ts); err != nil {
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
return err
}
}
@ -637,7 +757,7 @@ func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
return untarHandler(tarArchive, dest, options, true)
}
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive must be an uncompressed stream.
func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
@ -657,7 +777,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp
options.ExcludePatterns = []string{}
}
var r io.Reader = tarArchive
r := tarArchive
if decompress {
decompressedArchive, err := DecompressStream(tarArchive)
if err != nil {
@ -670,6 +790,8 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp
return Unpack(r, dest, options)
}
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
// If either Tar or Untar fails, TarUntar aborts and returns the error.
func (archiver *Archiver) TarUntar(src, dst string) error {
logrus.Debugf("TarUntar(%s %s)", src, dst)
archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
@ -677,7 +799,15 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
return err
}
defer archive.Close()
return archiver.Untar(archive, dst, nil)
var options *TarOptions
if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
options = &TarOptions{
UIDMaps: archiver.UIDMaps,
GIDMaps: archiver.GIDMaps,
}
}
return archiver.Untar(archive, dst, options)
}
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
@ -686,16 +816,21 @@ func TarUntar(src, dst string) error {
return defaultArchiver.TarUntar(src, dst)
}
// UntarPath untar a file from path to a destination, src is the source tar file path.
func (archiver *Archiver) UntarPath(src, dst string) error {
archive, err := os.Open(src)
if err != nil {
return err
}
defer archive.Close()
if err := archiver.Untar(archive, dst, nil); err != nil {
return err
var options *TarOptions
if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
options = &TarOptions{
UIDMaps: archiver.UIDMaps,
GIDMaps: archiver.GIDMaps,
}
}
return nil
return archiver.Untar(archive, dst, options)
}
// UntarPath is a convenience function which looks for an archive
@ -704,6 +839,10 @@ func UntarPath(src, dst string) error {
return defaultArchiver.UntarPath(src, dst)
}
// CopyWithTar creates a tar archive of filesystem path `src`, and
// unpacks it at filesystem path `dst`.
// The archive is streamed directly with fixed buffering and no
// intermediary disk IO.
func (archiver *Archiver) CopyWithTar(src, dst string) error {
srcSt, err := os.Stat(src)
if err != nil {
@ -714,7 +853,7 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error {
}
// Create dst, copy src's content into it
logrus.Debugf("Creating dest directory: %s", dst)
if err := system.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
if err := system.MkdirAll(dst, 0755); err != nil {
return err
}
logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
@ -729,6 +868,9 @@ func CopyWithTar(src, dst string) error {
return defaultArchiver.CopyWithTar(src, dst)
}
// CopyFileWithTar emulates the behavior of the 'cp' command-line
// for a single file. It copies a regular file from path `src` to
// path `dst`, and preserves all its metadata.
func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
srcSt, err := os.Stat(src)
@ -746,7 +888,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
dst = filepath.Join(dst, filepath.Base(src))
}
// Create the holding directory if necessary
if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil {
return err
}
@ -767,6 +909,28 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
hdr.Name = filepath.Base(dst)
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
if err != nil {
return err
}
// only perform mapping if the file being copied isn't already owned by the
// uid or gid of the remapped root in the container
if remappedRootUID != hdr.Uid {
xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps)
if err != nil {
return err
}
hdr.Uid = xUID
}
if remappedRootGID != hdr.Gid {
xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps)
if err != nil {
return err
}
hdr.Gid = xGID
}
tw := tar.NewWriter(w)
defer tw.Close()
if err := tw.WriteHeader(hdr); err != nil {
@ -782,7 +946,12 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
err = er
}
}()
return archiver.Untar(r, filepath.Dir(dst), nil)
err = archiver.Untar(r, filepath.Dir(dst), nil)
if err != nil {
r.CloseWithError(err)
}
return err
}
// CopyFileWithTar emulates the behavior of the 'cp' command-line
@ -797,57 +966,33 @@ func CopyFileWithTar(src, dst string) (err error) {
return defaultArchiver.CopyFileWithTar(src, dst)
}
// CmdStream executes a command, and returns its stdout as a stream.
// cmdStream executes a command, and returns its stdout as a stream.
// If the command fails to run or doesn't complete successfully, an error
// will be returned, including anything written on stderr.
func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
if input != nil {
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, err
}
// Write stdin if any
go func() {
io.Copy(stdin, input)
stdin.Close()
}()
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, err
}
func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) {
chdone := make(chan struct{})
cmd.Stdin = input
pipeR, pipeW := io.Pipe()
errChan := make(chan []byte)
// Collect stderr, we will use it in case of an error
go func() {
errText, e := ioutil.ReadAll(stderr)
if e != nil {
errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
}
errChan <- errText
}()
cmd.Stdout = pipeW
var errBuf bytes.Buffer
cmd.Stderr = &errBuf
// Run the command and return the pipe
if err := cmd.Start(); err != nil {
return nil, nil, err
}
// Copy stdout to the returned pipe
go func() {
_, err := io.Copy(pipeW, stdout)
if err != nil {
pipeW.CloseWithError(err)
}
errText := <-errChan
if err := cmd.Wait(); err != nil {
pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
} else {
pipeW.Close()
}
close(chdone)
}()
// Run the command and return the pipe
if err := cmd.Start(); err != nil {
return nil, err
}
return pipeR, nil
return pipeR, chdone, nil
}
// NewTempArchive reads the content of src into a temporary file, and returns the contents
@ -872,6 +1017,8 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
return &TempArchive{File: f, Size: size}, nil
}
// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
// the file will be deleted.
type TempArchive struct {
*os.File
Size int64 // Pre-computed from Stat().Size() as a convenience

View File

@ -6,11 +6,26 @@ import (
"archive/tar"
"errors"
"os"
"path/filepath"
"syscall"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
return srcPath
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific. On Linux, we
// can't use filepath.Join(srcPath,include) because this will clean away
// a trailing "." or "/" which may be important.
func getWalkRoot(srcPath string, include string) string {
return srcPath + string(filepath.Separator) + include
}
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
@ -25,7 +40,7 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm // noop for unix as golang APIs provide perm bits correctly
}
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) {
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
@ -33,10 +48,9 @@ func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, st
return
}
nlink = uint32(s.Nlink)
inode = uint64(s.Ino)
// Currently go does not fil in the major/minors
// Currently go does not fill in the major/minors
if s.Mode&syscall.S_IFBLK != 0 ||
s.Mode&syscall.S_IFCHR != 0 {
hdr.Devmajor = int64(major(uint64(s.Rdev)))
@ -46,6 +60,15 @@ func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, st
return
}
func getFileUIDGID(stat interface{}) (int, int, error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t")
}
return int(s.Uid), int(s.Gid), nil
}
func major(device uint64) uint64 {
return (device >> 8) & 0xfff
}

View File

@ -6,10 +6,25 @@ import (
"archive/tar"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath"
)
// canonicalTarNameForPath returns platform-specific filepath
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
return longpath.AddPrefix(srcPath)
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific.
func getWalkRoot(srcPath string, include string) string {
return filepath.Join(srcPath, include)
}
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) (string, error) {
@ -34,7 +49,7 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm
}
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) {
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
return
}
@ -48,3 +63,8 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
return nil
}
func getFileUIDGID(stat interface{}) (int, int, error) {
// no notion of file ownership mapping yet on Windows
return 0, 0, nil
}

View File

@ -14,34 +14,46 @@ import (
"time"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
// ChangeType represents the change type.
type ChangeType int
const (
// ChangeModify represents the modify operation.
ChangeModify = iota
// ChangeAdd represents the add operation.
ChangeAdd
// ChangeDelete represents the delete operation.
ChangeDelete
)
func (c ChangeType) String() string {
switch c {
case ChangeModify:
return "C"
case ChangeAdd:
return "A"
case ChangeDelete:
return "D"
}
return ""
}
// Change represents a change, it wraps the change type and path.
// It describes changes of the files in the path respect to the
// parent layers. The change could be modify, add, delete.
// This is used for layer diff.
type Change struct {
Path string
Kind ChangeType
}
func (change *Change) String() string {
var kind string
switch change.Kind {
case ChangeModify:
kind = "C"
case ChangeAdd:
kind = "A"
case ChangeDelete:
kind = "D"
}
return fmt.Sprintf("%s %s", kind, change.Path)
return fmt.Sprintf("%s %s", change.Kind, change.Path)
}
// for sort.Sort
@ -94,7 +106,7 @@ func Changes(layers []string, rw string) ([]Change, error) {
}
// Skip AUFS metadata
if matched, err := filepath.Match(string(os.PathSeparator)+".wh..wh.*", path); err != nil || matched {
if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched {
return err
}
@ -105,8 +117,8 @@ func Changes(layers []string, rw string) ([]Change, error) {
// Find out what kind of modification happened
file := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(file, ".wh.") {
originalFile := file[len(".wh."):]
if strings.HasPrefix(file, WhiteoutPrefix) {
originalFile := file[len(WhiteoutPrefix):]
change.Path = filepath.Join(filepath.Dir(path), originalFile)
change.Kind = ChangeDelete
} else {
@ -138,7 +150,7 @@ func Changes(layers []string, rw string) ([]Change, error) {
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
// This block is here to ensure the change is recorded even if the
// modify time, mode and size of the parent directoriy in the rw and ro layers are all equal.
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
// Check https://github.com/docker/docker/pull/13590 for details.
if f.IsDir() {
changedDirs[path] = struct{}{}
@ -161,20 +173,22 @@ func Changes(layers []string, rw string) ([]Change, error) {
return changes, nil
}
// FileInfo describes the information of a file.
type FileInfo struct {
parent *FileInfo
name string
stat *system.Stat_t
stat *system.StatT
children map[string]*FileInfo
capability []byte
added bool
}
func (root *FileInfo) LookUp(path string) *FileInfo {
// LookUp looks up the file information of a file.
func (info *FileInfo) LookUp(path string) *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
parent := root
parent := info
if path == string(os.PathSeparator) {
return root
return info
}
pathElements := strings.Split(path, string(os.PathSeparator))
@ -275,6 +289,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
}
// Changes add changes to file information.
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
var changes []Change
@ -316,13 +331,29 @@ func ChangesDirs(newDir, oldDir string) ([]Change, error) {
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
func ChangesSize(newDir string, changes []Change) int64 {
var size int64
var (
size int64
sf = make(map[uint64]struct{})
)
for _, change := range changes {
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
file := filepath.Join(newDir, change.Path)
fileInfo, _ := os.Lstat(file)
fileInfo, err := os.Lstat(file)
if err != nil {
logrus.Errorf("Can not stat %q: %s", file, err)
continue
}
if fileInfo != nil && !fileInfo.IsDir() {
size += fileInfo.Size()
if hasHardlinks(fileInfo) {
inode := getIno(fileInfo)
if _, ok := sf[inode]; !ok {
size += fileInfo.Size()
sf[inode] = struct{}{}
}
} else {
size += fileInfo.Size()
}
}
}
}
@ -330,13 +361,15 @@ func ChangesSize(newDir string, changes []Change) int64 {
}
// ExportChanges produces an Archive from the provided changes, relative to dir.
func ExportChanges(dir string, changes []Change) (Archive, error) {
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) {
reader, writer := io.Pipe()
go func() {
ta := &tarAppender{
TarWriter: tar.NewWriter(writer),
Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string),
UIDMaps: uidMaps,
GIDMaps: gidMaps,
}
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
@ -351,7 +384,7 @@ func ExportChanges(dir string, changes []Change) (Archive, error) {
if change.Kind == ChangeDelete {
whiteOutDir := filepath.Dir(change.Path)
whiteOutBase := filepath.Base(change.Path)
whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase)
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
timestamp := time.Now()
hdr := &tar.Header{
Name: whiteOut[1:],

View File

@ -3,16 +3,17 @@
package archive
import (
"os"
"syscall"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Don't look at size for dirs, its not a good measure of change
if oldStat.Mode() != newStat.Mode() ||
oldStat.Uid() != newStat.Uid() ||
oldStat.Gid() != newStat.Gid() ||
oldStat.UID() != newStat.UID() ||
oldStat.GID() != newStat.GID() ||
oldStat.Rdev() != newStat.Rdev() ||
// Don't look at size for dirs, its not a good measure of change
(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
@ -25,3 +26,11 @@ func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
}
func getIno(fi os.FileInfo) uint64 {
return uint64(fi.Sys().(*syscall.Stat_t).Ino)
}
func hasHardlinks(fi os.FileInfo) bool {
return fi.Sys().(*syscall.Stat_t).Nlink > 1
}

View File

@ -1,10 +1,12 @@
package archive
import (
"os"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Don't look at size for dirs, its not a good measure of change
if oldStat.ModTime() != newStat.ModTime() ||
@ -18,3 +20,11 @@ func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.IsDir()
}
func getIno(fi os.FileInfo) (inode uint64) {
return
}
func hasHardlinks(fi os.FileInfo) bool {
return false
}

View File

@ -6,11 +6,11 @@ import (
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
// Errors used or returned by this file.
@ -29,8 +29,12 @@ var (
// path already ends in a `.` path segment, then another is not added. If the
// clean path already ends in a path separator, then another is not added.
func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
if !SpecifiesCurrentDir(cleanedPath) && SpecifiesCurrentDir(originalPath) {
if !HasTrailingPathSeparator(cleanedPath) {
// Ensure paths are in platform semantics
cleanedPath = normalizePath(cleanedPath)
originalPath = normalizePath(originalPath)
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
if !hasTrailingPathSeparator(cleanedPath) {
// Add a separator if it doesn't already end with one (a cleaned
// path would only end in a separator if it is the root).
cleanedPath += string(filepath.Separator)
@ -38,60 +42,60 @@ func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
cleanedPath += "."
}
if !HasTrailingPathSeparator(cleanedPath) && HasTrailingPathSeparator(originalPath) {
if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
cleanedPath += string(filepath.Separator)
}
return cleanedPath
}
// AssertsDirectory returns whether the given path is
// assertsDirectory returns whether the given path is
// asserted to be a directory, i.e., the path ends with
// a trailing '/' or `/.`, assuming a path separator of `/`.
func AssertsDirectory(path string) bool {
return HasTrailingPathSeparator(path) || SpecifiesCurrentDir(path)
func assertsDirectory(path string) bool {
return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
}
// HasTrailingPathSeparator returns whether the given
// hasTrailingPathSeparator returns whether the given
// path ends with the system's path separator character.
func HasTrailingPathSeparator(path string) bool {
func hasTrailingPathSeparator(path string) bool {
return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
}
// SpecifiesCurrentDir returns whether the given path specifies
// specifiesCurrentDir returns whether the given path specifies
// a "current directory", i.e., the last path segment is `.`.
func SpecifiesCurrentDir(path string) bool {
func specifiesCurrentDir(path string) bool {
return filepath.Base(path) == "."
}
// SplitPathDirEntry splits the given path between its
// parent directory and its basename in that directory.
func SplitPathDirEntry(localizedPath string) (dir, base string) {
normalizedPath := filepath.ToSlash(localizedPath)
vol := filepath.VolumeName(normalizedPath)
normalizedPath = normalizedPath[len(vol):]
// SplitPathDirEntry splits the given path between its directory name and its
// basename by first cleaning the path but preserves a trailing "." if the
// original path specified the current directory.
func SplitPathDirEntry(path string) (dir, base string) {
cleanedPath := filepath.Clean(normalizePath(path))
if normalizedPath == "/" {
// Specifies the root path.
return filepath.FromSlash(vol + normalizedPath), "."
if specifiesCurrentDir(path) {
cleanedPath += string(filepath.Separator) + "."
}
trimmedPath := vol + strings.TrimRight(normalizedPath, "/")
dir = filepath.FromSlash(path.Dir(trimmedPath))
base = filepath.FromSlash(path.Base(trimmedPath))
return dir, base
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
}
// TarResource archives the resource at the given sourcePath into a Tar
// TarResource archives the resource described by the given CopyInfo to a Tar
// archive. A non-nil error is returned if sourcePath does not exist or is
// asserted to be a directory but exists as another type of file.
//
// This function acts as a convenient wrapper around TarWithOptions, which
// requires a directory as the source path. TarResource accepts either a
// directory or a file path and correctly sets the Tar options.
func TarResource(sourcePath string) (content Archive, err error) {
func TarResource(sourceInfo CopyInfo) (content Archive, err error) {
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
}
// TarResourceRebase is like TarResource but renames the first path element of
// items in the resulting tar archive to match the given rebaseName if not "".
func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) {
sourcePath = normalizePath(sourcePath)
if _, err = os.Lstat(sourcePath); err != nil {
// Catches the case where the source does not exist or is not a
// directory if asserted to be a directory, as this also causes an
@ -99,22 +103,6 @@ func TarResource(sourcePath string) (content Archive, err error) {
return
}
if len(sourcePath) > 1 && HasTrailingPathSeparator(sourcePath) {
// In the case where the source path is a symbolic link AND it ends
// with a path separator, we will want to evaluate the symbolic link.
trimmedPath := sourcePath[:len(sourcePath)-1]
stat, err := os.Lstat(trimmedPath)
if err != nil {
return nil, err
}
if stat.Mode()&os.ModeSymlink != 0 {
if sourcePath, err = filepath.EvalSymlinks(trimmedPath); err != nil {
return nil, err
}
}
}
// Separate the source path between it's directory and
// the entry in that directory which we are archiving.
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
@ -127,39 +115,137 @@ func TarResource(sourcePath string) (content Archive, err error) {
Compression: Uncompressed,
IncludeFiles: filter,
IncludeSourceDir: true,
RebaseNames: map[string]string{
sourceBase: rebaseName,
},
})
}
// CopyInfo holds basic info about the source
// or destination path of a copy operation.
type CopyInfo struct {
Path string
Exists bool
IsDir bool
Path string
Exists bool
IsDir bool
RebaseName string
}
// CopyInfoStatPath stats the given path to create a CopyInfo
// struct representing that resource. If mustExist is true, then
// it is an error if there is no file or directory at the given path.
func CopyInfoStatPath(path string, mustExist bool) (CopyInfo, error) {
pathInfo := CopyInfo{Path: path}
// CopyInfoSourcePath stats the given path to create a CopyInfo
// struct representing that resource for the source of an archive copy
// operation. The given path should be an absolute local path. A source path
// has all symlinks evaluated that appear before the last path separator ("/"
// on Unix). As it is to be a copy source, the path must exist.
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
// normalize the file path and then evaluate the symbol link
// we will use the target file instead of the symbol link if
// followLink is set
path = normalizePath(path)
fileInfo, err := os.Lstat(path)
if err == nil {
pathInfo.Exists, pathInfo.IsDir = true, fileInfo.IsDir()
} else if os.IsNotExist(err) && !mustExist {
err = nil
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
if err != nil {
return CopyInfo{}, err
}
return pathInfo, err
stat, err := os.Lstat(resolvedPath)
if err != nil {
return CopyInfo{}, err
}
return CopyInfo{
Path: resolvedPath,
Exists: true,
IsDir: stat.IsDir(),
RebaseName: rebaseName,
}, nil
}
// CopyInfoDestinationPath stats the given path to create a CopyInfo
// struct representing that resource for the destination of an archive copy
// operation. The given path should be an absolute local path.
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
path = normalizePath(path)
originalPath := path
stat, err := os.Lstat(path)
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
// The path exists and is not a symlink.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// While the path is a symlink.
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
if n > maxSymlinkIter {
// Don't follow symlinks more than this arbitrary number of times.
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
}
// The path is a symbolic link. We need to evaluate it so that the
// destination of the copy operation is the link target and not the
// link itself. This is notably different than CopyInfoSourcePath which
// only evaluates symlinks before the last appearing path separator.
// Also note that it is okay if the last path element is a broken
// symlink as the copy operation should create the target.
var linkTarget string
linkTarget, err = os.Readlink(path)
if err != nil {
return CopyInfo{}, err
}
if !system.IsAbs(linkTarget) {
// Join with the parent directory.
dstParent, _ := SplitPathDirEntry(path)
linkTarget = filepath.Join(dstParent, linkTarget)
}
path = linkTarget
stat, err = os.Lstat(path)
}
if err != nil {
// It's okay if the destination path doesn't exist. We can still
// continue the copy operation if the parent directory exists.
if !os.IsNotExist(err) {
return CopyInfo{}, err
}
// Ensure destination parent dir exists.
dstParent, _ := SplitPathDirEntry(path)
parentDirStat, err := os.Lstat(dstParent)
if err != nil {
return CopyInfo{}, err
}
if !parentDirStat.IsDir() {
return CopyInfo{}, ErrNotDirectory
}
return CopyInfo{Path: path}, nil
}
// The path exists after resolving symlinks.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// PrepareArchiveCopy prepares the given srcContent archive, which should
// contain the archived resource described by srcInfo, to the destination
// described by dstInfo. Returns the possibly modified content archive along
// with the path to the destination directory which it should be extracted to.
func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
// Ensure in platform semantics
srcInfo.Path = normalizePath(srcInfo.Path)
dstInfo.Path = normalizePath(dstInfo.Path)
// Separate the destination path between its directory and base
// components in case the source archive contents need to be rebased.
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
@ -180,7 +266,10 @@ func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (ds
// The destination exists as some type of file and the source content
// is also a file. The source content entry will have to be renamed to
// have a basename which matches the destination path's basename.
return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case srcInfo.IsDir:
// The destination does not exist and the source content is an archive
// of a directory. The archive should be extracted to the parent of
@ -188,8 +277,11 @@ func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (ds
// created as a result should take the name of the destination path.
// The source content entries will have to be renamed to have a
// basename which matches the destination path's basename.
return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case AssertsDirectory(dstInfo.Path):
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case assertsDirectory(dstInfo.Path):
// The destination does not exist and is asserted to be created as a
// directory, but the source content is not a directory. This is an
// error condition since you cannot create a directory from a file
@ -202,14 +294,24 @@ func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (ds
// to be created when the archive is extracted and the source content
// entry will have to be renamed to have a basename which matches the
// destination path's basename.
return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
}
}
// rebaseArchiveEntries rewrites the given srcContent archive replacing
// an occurance of oldBase with newBase at the beginning of entry names.
func rebaseArchiveEntries(srcContent ArchiveReader, oldBase, newBase string) Archive {
// RebaseArchiveEntries rewrites the given srcContent archive replacing
// an occurrence of oldBase with newBase at the beginning of entry names.
func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
if oldBase == string(os.PathSeparator) {
// If oldBase specifies the root directory, use an empty string as
// oldBase instead so that newBase doesn't replace the path separator
// that all paths will start with.
oldBase = ""
}
rebased, w := io.Pipe()
go func() {
@ -249,21 +351,25 @@ func rebaseArchiveEntries(srcContent ArchiveReader, oldBase, newBase string) Arc
// CopyResource performs an archive copy from the given source path to the
// given destination path. The source path MUST exist and the destination
// path's parent directory must exist.
func CopyResource(srcPath, dstPath string) error {
func CopyResource(srcPath, dstPath string, followLink bool) error {
var (
srcInfo CopyInfo
err error
)
// Ensure in platform semantics
srcPath = normalizePath(srcPath)
dstPath = normalizePath(dstPath)
// Clean the source and destination paths.
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
if srcInfo, err = CopyInfoStatPath(srcPath, true); err != nil {
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
return err
}
content, err := TarResource(srcPath)
content, err := TarResource(srcInfo)
if err != nil {
return err
}
@ -274,25 +380,14 @@ func CopyResource(srcPath, dstPath string) error {
// CopyTo handles extracting the given content whose
// entries should be sourced from srcInfo to dstPath.
func CopyTo(content ArchiveReader, srcInfo CopyInfo, dstPath string) error {
dstInfo, err := CopyInfoStatPath(dstPath, false)
func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error {
// The destination path need not exist, but CopyInfoDestinationPath will
// ensure that at least the parent directory exists.
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
if err != nil {
return err
}
if !dstInfo.Exists {
// Ensure destination parent dir exists.
dstParent, _ := SplitPathDirEntry(dstPath)
dstStat, err := os.Lstat(dstParent)
if err != nil {
return err
}
if !dstStat.IsDir() {
return ErrNotDirectory
}
}
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
if err != nil {
return err
@ -306,3 +401,58 @@ func CopyTo(content ArchiveReader, srcInfo CopyInfo, dstPath string) error {
return Untar(copyArchive, dstDir, options)
}
// ResolveHostSourcePath decides real path need to be copied with parameters such as
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
// link target of any symbol link file, else it will only resolve symlink of directory
// but return symbol link file itself without resolving.
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
if followLink {
resolvedPath, err = filepath.EvalSymlinks(path)
if err != nil {
return
}
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
} else {
dirPath, basePath := filepath.Split(path)
// if not follow symbol link, then resolve symbol link of parent dir
var resolvedDirPath string
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
if err != nil {
return
}
// resolvedDirPath will have been cleaned (no trailing path separators) so
// we can manually join it with the base path element.
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
rebaseName = filepath.Base(path)
}
}
return resolvedPath, rebaseName, nil
}
// GetRebaseName normalizes and compares path and resolvedPath,
// return completed resolved path and rebased file name
func GetRebaseName(path, resolvedPath string) (string, string) {
// linkTarget will have been cleaned (no trailing path separators and dot) so
// we can manually join it with them
var rebaseName string
if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
resolvedPath += string(filepath.Separator) + "."
}
if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
resolvedPath += string(filepath.Separator)
}
if filepath.Base(path) != filepath.Base(resolvedPath) {
// In the case where the path had a trailing separator and a symlink
// evaluation has changed the last path component, we will need to
// rebase the name in the archive that is being copied to match the
// originally requested name.
rebaseName = filepath.Base(path)
}
return resolvedPath, rebaseName
}

View File

@ -9,23 +9,41 @@ import (
"path/filepath"
"runtime"
"strings"
"syscall"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) {
tr := tar.NewReader(layer)
trBuf := pools.BufioReader32KPool.Get(tr)
defer pools.BufioReader32KPool.Put(trBuf)
var dirs []*tar.Header
unpackedPaths := make(map[string]struct{})
if options == nil {
options = &TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return 0, err
}
aufsTempdir := ""
aufsHardlinks := make(map[string]*tar.Header)
if options == nil {
options = &TarOptions{}
}
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
@ -55,7 +73,7 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
// TODO Windows. Once the registry is aware of what images are Windows-
// specific or Linux-specific, this warning should be changed to an error
// to cater for the situation where someone does manage to upload a Linux
// image but have it tagged as Windows inadvertantly.
// image but have it tagged as Windows inadvertently.
if runtime.GOOS == "windows" {
if strings.Contains(hdr.Name, ":") {
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
@ -80,11 +98,11 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
}
// Skip AUFS metadata dirs
if strings.HasPrefix(hdr.Name, ".wh..wh.") {
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
// We don't want this directory, but we need the files in them so that
// such hardlinks can be resolved.
if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg {
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
@ -97,7 +115,10 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
return 0, err
}
}
continue
if hdr.Name != WhiteoutOpaqueDir {
continue
}
}
path := filepath.Join(dest, hdr.Name)
rel, err := filepath.Rel(dest, path)
@ -111,11 +132,38 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
}
base := filepath.Base(path)
if strings.HasPrefix(base, ".wh.") {
originalBase := base[len(".wh."):]
originalPath := filepath.Join(filepath.Dir(path), originalBase)
if err := os.RemoveAll(originalPath); err != nil {
return 0, err
if strings.HasPrefix(base, WhiteoutPrefix) {
dir := filepath.Dir(path)
if base == WhiteoutOpaqueDir {
_, err := os.Lstat(dir)
if err != nil {
return 0, err
}
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
err = nil // parent was deleted
}
return err
}
if path == dir {
return nil
}
if _, exists := unpackedPaths[path]; !exists {
err := os.RemoveAll(path)
return err
}
return nil
})
if err != nil {
return 0, err
}
} else {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := os.RemoveAll(originalPath); err != nil {
return 0, err
}
}
} else {
// If path exits we almost always just want to remove and replace it.
@ -136,7 +184,7 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
// we manually retarget these into the temporary files we extracted them into
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") {
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
@ -150,6 +198,27 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
srcData = tmpFile
}
// if the options contain a uid & gid maps, convert header uid/gid
// entries using the maps such that lchown sets the proper mapped
// uid/gid after writing the file. We only perform this mapping if
// the file isn't already owned by the remapped root UID or GID, as
// that specific uid/gid has no mapping from container -> host, and
// those files already have the proper ownership for inside the
// container.
if srcHdr.Uid != remappedRootUID {
xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
if err != nil {
return 0, err
}
srcHdr.Uid = xUID
}
if srcHdr.Gid != remappedRootGID {
xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
if err != nil {
return 0, err
}
srcHdr.Gid = xGID
}
if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
return 0, err
}
@ -159,13 +228,13 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
unpackedPaths[path] = struct{}{}
}
}
for _, hdr := range dirs {
path := filepath.Join(dest, hdr.Name)
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
if err := syscall.UtimesNano(path, ts); err != nil {
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
return 0, err
}
}
@ -177,20 +246,20 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
// and applies it to the directory `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyLayer(dest string, layer ArchiveReader) (int64, error) {
return applyLayerHandler(dest, layer, true)
func ApplyLayer(dest string, layer Reader) (int64, error) {
return applyLayerHandler(dest, layer, &TarOptions{}, true)
}
// ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyUncompressedLayer(dest string, layer ArchiveReader) (int64, error) {
return applyLayerHandler(dest, layer, false)
func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) {
return applyLayerHandler(dest, layer, options, false)
}
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64, error) {
func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) {
dest = filepath.Clean(dest)
// We need to be able to set any perms
@ -206,5 +275,5 @@ func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64
return 0, err
}
}
return UnpackLayer(dest, layer)
return UnpackLayer(dest, layer, options)
}

View File

@ -0,0 +1,23 @@
package archive
// Whiteouts are files with a special meaning for the layered filesystem.
// Docker uses AUFS whiteout files inside exported archives. In other
// filesystems these files are generated/handled on tar creation/extraction.
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
// filename this means that file has been removed from the base layer.
const WhiteoutPrefix = ".wh."
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for removing an actual file. Normally these files are excluded from exported
// archives.
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
// layers. Normally these should not go into exported archives and all changed
// hardlinks should be copied to the top layer.
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
// readdir calls to this directory do not follow to lower layers.
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"

View File

@ -4,10 +4,11 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"text/scanner"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
)
@ -77,7 +78,7 @@ func Matches(file string, patterns []string) (bool, error) {
// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
// It will assume that the inputs have been preprocessed and therefore the function
// doen't need to do as much error checking and clean-up. This was done to avoid
// doesn't need to do as much error checking and clean-up. This was done to avoid
// repeating these steps on each file being checked during the archive process.
// The more generic fileutils.Matches() can't make these assumptions.
func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
@ -93,15 +94,15 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool,
pattern = pattern[1:]
}
match, err := filepath.Match(pattern, file)
match, err := regexpMatch(pattern, file)
if err != nil {
return false, err
return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err)
}
if !match && parentPath != "." {
// Check to see if the pattern matches one of our parent dirs.
if len(patDirs[i]) <= len(parentPathDirs) {
match, _ = filepath.Match(strings.Join(patDirs[i], "/"),
match, _ = regexpMatch(strings.Join(patDirs[i], "/"),
strings.Join(parentPathDirs[:len(patDirs[i])], "/"))
}
}
@ -118,6 +119,99 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool,
return matched, nil
}
// regexpMatch tries to match the logic of filepath.Match but
// does so using regexp logic. We do this so that we can expand the
// wildcard set to include other things, like "**" to mean any number
// of directories. This means that we should be backwards compatible
// with filepath.Match(). We'll end up supporting more stuff, due to
// the fact that we're using regexp, but that's ok - it does no harm.
func regexpMatch(pattern, path string) (bool, error) {
regStr := "^"
// Do some syntax checking on the pattern.
// filepath's Match() has some really weird rules that are inconsistent
// so instead of trying to dup their logic, just call Match() for its
// error state and if there is an error in the pattern return it.
// If this becomes an issue we can remove this since its really only
// needed in the error (syntax) case - which isn't really critical.
if _, err := filepath.Match(pattern, path); err != nil {
return false, err
}
// Go through the pattern and convert it to a regexp.
// We use a scanner so we can support utf-8 chars.
var scan scanner.Scanner
scan.Init(strings.NewReader(pattern))
sl := string(os.PathSeparator)
escSL := sl
if sl == `\` {
escSL += `\`
}
for scan.Peek() != scanner.EOF {
ch := scan.Next()
if ch == '*' {
if scan.Peek() == '*' {
// is some flavor of "**"
scan.Next()
if scan.Peek() == scanner.EOF {
// is "**EOF" - to align with .gitignore just accept all
regStr += ".*"
} else {
// is "**"
regStr += "((.*" + escSL + ")|([^" + escSL + "]*))"
}
// Treat **/ as ** so eat the "/"
if string(scan.Peek()) == sl {
scan.Next()
}
} else {
// is "*" so map it to anything but "/"
regStr += "[^" + escSL + "]*"
}
} else if ch == '?' {
// "?" is any char except "/"
regStr += "[^" + escSL + "]"
} else if strings.Index(".$", string(ch)) != -1 {
// Escape some regexp special chars that have no meaning
// in golang's filepath.Match
regStr += `\` + string(ch)
} else if ch == '\\' {
// escape next char. Note that a trailing \ in the pattern
// will be left alone (but need to escape it)
if sl == `\` {
// On windows map "\" to "\\", meaning an escaped backslash,
// and then just continue because filepath.Match on
// Windows doesn't allow escaping at all
regStr += escSL
continue
}
if scan.Peek() != scanner.EOF {
regStr += `\` + string(scan.Next())
} else {
regStr += `\`
}
} else {
regStr += string(ch)
}
}
regStr += "$"
res, err := regexp.MatchString(regStr, path)
// Map regexp's error to filepath's so no one knows we're not using filepath
if err != nil {
err = filepath.ErrBadPattern
}
return res, err
}
// CopyFile copies from src to dst until either EOF is reached
// on src or an error occurs. It verifies src exists and remove
// the dst if it exists.
@ -143,17 +237,6 @@ func CopyFile(src, dst string) (int64, error) {
return io.Copy(df, sf)
}
// GetTotalUsedFds Returns the number of used File Descriptors by
// reading it via /proc filesystem.
func GetTotalUsedFds() int {
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
} else {
return len(fds)
}
return -1
}
// ReadSymlinkedDirectory returns the target directory of a symlink.
// The target of the symbolic link may not be a file.
func ReadSymlinkedDirectory(path string) (string, error) {

View File

@ -0,0 +1,22 @@
// +build linux freebsd
package fileutils
import (
"fmt"
"io/ioutil"
"os"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
)
// GetTotalUsedFds Returns the number of used File Descriptors by
// reading it via /proc filesystem.
func GetTotalUsedFds() int {
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
} else {
return len(fds)
}
return -1
}

View File

@ -0,0 +1,7 @@
package fileutils
// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
// on Windows.
func GetTotalUsedFds() int {
return -1
}

View File

@ -0,0 +1,195 @@
package idtools
import (
"bufio"
"fmt"
"os"
"sort"
"strconv"
"strings"
)
// IDMap contains a single entry for user namespace range remapping. An array
// of IDMap entries represents the structure that will be provided to the Linux
// kernel for creating a user namespace.
type IDMap struct {
ContainerID int `json:"container_id"`
HostID int `json:"host_id"`
Size int `json:"size"`
}
type subIDRange struct {
Start int
Length int
}
type ranges []subIDRange
func (e ranges) Len() int { return len(e) }
func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
const (
subuidFileName string = "/etc/subuid"
subgidFileName string = "/etc/subgid"
)
// MkdirAllAs creates a directory (include any along the path) and then modifies
// ownership to the requested uid/gid. If the directory already exists, this
// function will still change ownership to the requested uid/gid pair.
func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
}
// MkdirAllNewAs creates a directory (include any along the path) and then modifies
// ownership ONLY of newly created directories to the requested uid/gid. If the
// directories along the path exist, no change of ownership will be performed
func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
return mkdirAs(path, mode, ownerUID, ownerGID, true, false)
}
// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
// If the directory already exists, this function still changes ownership
func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
}
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
// If the maps are empty, then the root uid/gid will default to "real" 0/0
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
var uid, gid int
if uidMap != nil {
xUID, err := ToHost(0, uidMap)
if err != nil {
return -1, -1, err
}
uid = xUID
}
if gidMap != nil {
xGID, err := ToHost(0, gidMap)
if err != nil {
return -1, -1, err
}
gid = xGID
}
return uid, gid, nil
}
// ToContainer takes an id mapping, and uses it to translate a
// host ID to the remapped ID. If no map is provided, then the translation
// assumes a 1-to-1 mapping and returns the passed in id
func ToContainer(hostID int, idMap []IDMap) (int, error) {
if idMap == nil {
return hostID, nil
}
for _, m := range idMap {
if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
contID := m.ContainerID + (hostID - m.HostID)
return contID, nil
}
}
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
}
// ToHost takes an id mapping and a remapped ID, and translates the
// ID to the mapped host ID. If no map is provided, then the translation
// assumes a 1-to-1 mapping and returns the passed in id #
func ToHost(contID int, idMap []IDMap) (int, error) {
if idMap == nil {
return contID, nil
}
for _, m := range idMap {
if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
hostID := m.HostID + (contID - m.ContainerID)
return hostID, nil
}
}
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
}
// CreateIDMappings takes a requested user and group name and
// using the data from /etc/sub{uid,gid} ranges, creates the
// proper uid and gid remapping ranges for that user/group pair
func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) {
subuidRanges, err := parseSubuid(username)
if err != nil {
return nil, nil, err
}
subgidRanges, err := parseSubgid(groupname)
if err != nil {
return nil, nil, err
}
if len(subuidRanges) == 0 {
return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username)
}
if len(subgidRanges) == 0 {
return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
}
return createIDMap(subuidRanges), createIDMap(subgidRanges), nil
}
func createIDMap(subidRanges ranges) []IDMap {
idMap := []IDMap{}
// sort the ranges by lowest ID first
sort.Sort(subidRanges)
containerID := 0
for _, idrange := range subidRanges {
idMap = append(idMap, IDMap{
ContainerID: containerID,
HostID: idrange.Start,
Size: idrange.Length,
})
containerID = containerID + idrange.Length
}
return idMap
}
func parseSubuid(username string) (ranges, error) {
return parseSubidFile(subuidFileName, username)
}
func parseSubgid(username string) (ranges, error) {
return parseSubidFile(subgidFileName, username)
}
func parseSubidFile(path, username string) (ranges, error) {
var rangeList ranges
subidFile, err := os.Open(path)
if err != nil {
return rangeList, err
}
defer subidFile.Close()
s := bufio.NewScanner(subidFile)
for s.Scan() {
if err := s.Err(); err != nil {
return rangeList, err
}
text := strings.TrimSpace(s.Text())
if text == "" {
continue
}
parts := strings.Split(text, ":")
if len(parts) != 3 {
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
}
if parts[0] == username {
// return the first entry for a user; ignores potential for multiple ranges per user
startid, err := strconv.Atoi(parts[1])
if err != nil {
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
}
length, err := strconv.Atoi(parts[2])
if err != nil {
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
}
rangeList = append(rangeList, subIDRange{startid, length})
}
}
return rangeList, nil
}

View File

@ -0,0 +1,60 @@
// +build !windows
package idtools
import (
"os"
"path/filepath"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
// make an array containing the original path asked for, plus (for mkAll == true)
// all path components leading up to the complete path that don't exist before we MkdirAll
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
// chown the full directory path if it exists
var paths []string
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
paths = []string{path}
} else if err == nil && chownExisting {
if err := os.Chown(path, ownerUID, ownerGID); err != nil {
return err
}
// short-circuit--we were called with an existing directory and chown was requested
return nil
} else if err == nil {
// nothing to do; directory path fully exists already and chown was NOT requested
return nil
}
if mkAll {
// walk back to "/" looking for directories which do not exist
// and add them to the paths array for chown after creation
dirPath := path
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
paths = append(paths, dirPath)
}
}
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
return err
}
} else {
if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
return err
}
}
// even if it existed, we will chown the requested path + any subpaths that
// didn't exist when we called MkdirAll
for _, pathComponent := range paths {
if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,18 @@
// +build windows
package idtools
import (
"os"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
// Platforms such as Windows do not support the UID/GID concept. So make this
// just a wrapper around system.MkdirAll.
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
return err
}
return nil
}

View File

@ -0,0 +1,155 @@
package idtools
import (
"fmt"
"os/exec"
"path/filepath"
"strings"
"syscall"
)
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
// Linux distribution commands:
// adduser --uid <id> --shell /bin/login --no-create-home --disabled-login --ingroup <groupname> <username>
// useradd -M -u <id> -s /bin/nologin -N -g <groupname> <username>
// addgroup --gid <id> <groupname>
// groupadd -g <id> <groupname>
const baseUID int = 10000
const baseGID int = 10000
const idMAX int = 65534
var (
userCommand string
groupCommand string
cmdTemplates = map[string]string{
"adduser": "--uid %d --shell /bin/false --no-create-home --disabled-login --ingroup %s %s",
"useradd": "-M -u %d -s /bin/false -N -g %s %s",
"addgroup": "--gid %d %s",
"groupadd": "-g %d %s",
}
)
func init() {
// set up which commands are used for adding users/groups dependent on distro
if _, err := resolveBinary("adduser"); err == nil {
userCommand = "adduser"
} else if _, err := resolveBinary("useradd"); err == nil {
userCommand = "useradd"
}
if _, err := resolveBinary("addgroup"); err == nil {
groupCommand = "addgroup"
} else if _, err := resolveBinary("groupadd"); err == nil {
groupCommand = "groupadd"
}
}
func resolveBinary(binname string) (string, error) {
binaryPath, err := exec.LookPath(binname)
if err != nil {
return "", err
}
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
if err != nil {
return "", err
}
//only return no error if the final resolved binary basename
//matches what was searched for
if filepath.Base(resolvedPath) == binname {
return resolvedPath, nil
}
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
}
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
// and calls the appropriate helper function to add the group and then
// the user to the group in /etc/group and /etc/passwd respectively.
// This new user's /etc/sub{uid,gid} ranges will be used for user namespace
// mapping ranges in containers.
func AddNamespaceRangesUser(name string) (int, int, error) {
// Find unused uid, gid pair
uid, err := findUnusedUID(baseUID)
if err != nil {
return -1, -1, fmt.Errorf("Unable to find unused UID: %v", err)
}
gid, err := findUnusedGID(baseGID)
if err != nil {
return -1, -1, fmt.Errorf("Unable to find unused GID: %v", err)
}
// First add the group that we will use
if err := addGroup(name, gid); err != nil {
return -1, -1, fmt.Errorf("Error adding group %q: %v", name, err)
}
// Add the user as a member of the group
if err := addUser(name, uid, name); err != nil {
return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
}
return uid, gid, nil
}
func addUser(userName string, uid int, groupName string) error {
if userCommand == "" {
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
}
args := fmt.Sprintf(cmdTemplates[userCommand], uid, groupName, userName)
return execAddCmd(userCommand, args)
}
func addGroup(groupName string, gid int) error {
if groupCommand == "" {
return fmt.Errorf("Cannot add group; no groupadd/addgroup binary found")
}
args := fmt.Sprintf(cmdTemplates[groupCommand], gid, groupName)
// only error out if the error isn't that the group already exists
// if the group exists then our needs are already met
if err := execAddCmd(groupCommand, args); err != nil && !strings.Contains(err.Error(), "already exists") {
return err
}
return nil
}
func execAddCmd(cmd, args string) error {
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
out, err := execCmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Failed to add user/group with error: %v; output: %q", err, string(out))
}
return nil
}
func findUnusedUID(startUID int) (int, error) {
return findUnused("passwd", startUID)
}
func findUnusedGID(startGID int) (int, error) {
return findUnused("group", startGID)
}
func findUnused(file string, id int) (int, error) {
for {
cmdStr := fmt.Sprintf("cat /etc/%s | cut -d: -f3 | grep '^%d$'", file, id)
cmd := exec.Command("sh", "-c", cmdStr)
if err := cmd.Run(); err != nil {
// if a non-zero return code occurs, then we know the ID was not found
// and is usable
if exiterr, ok := err.(*exec.ExitError); ok {
// The program has exited with an exit code != 0
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
if status.ExitStatus() == 1 {
//no match, we can use this ID
return id, nil
}
}
}
return -1, fmt.Errorf("Error looking in /etc/%s for unused ID: %v", file, err)
}
id++
if id > idMAX {
return -1, fmt.Errorf("Maximum id in %q reached with finding unused numeric ID", file)
}
}
}

View File

@ -0,0 +1,12 @@
// +build !linux
package idtools
import "fmt"
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
// and calls the appropriate helper function to add the group and then
// the user to the group in /etc/group and /etc/passwd respectively.
func AddNamespaceRangesUser(name string) (int, int, error) {
return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
}

View File

@ -0,0 +1,152 @@
package ioutils
import (
"errors"
"io"
"sync"
)
// maxCap is the highest capacity to use in byte slices that buffer data.
const maxCap = 1e6
// blockThreshold is the minimum number of bytes in the buffer which will cause
// a write to BytesPipe to block when allocating a new slice.
const blockThreshold = 1e6
// ErrClosed is returned when Write is called on a closed BytesPipe.
var ErrClosed = errors.New("write to closed BytesPipe")
// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
// All written data may be read at most once. Also, BytesPipe allocates
// and releases new byte slices to adjust to current needs, so the buffer
// won't be overgrown after peak loads.
type BytesPipe struct {
mu sync.Mutex
wait *sync.Cond
buf [][]byte // slice of byte-slices of buffered data
lastRead int // index in the first slice to a read point
bufLen int // length of data buffered over the slices
closeErr error // error to return from next Read. set to nil if not closed.
}
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
// If buf is nil, then it will be initialized with slice which cap is 64.
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
func NewBytesPipe(buf []byte) *BytesPipe {
if cap(buf) == 0 {
buf = make([]byte, 0, 64)
}
bp := &BytesPipe{
buf: [][]byte{buf[:0]},
}
bp.wait = sync.NewCond(&bp.mu)
return bp
}
// Write writes p to BytesPipe.
// It can allocate new []byte slices in a process of writing.
func (bp *BytesPipe) Write(p []byte) (int, error) {
bp.mu.Lock()
defer bp.mu.Unlock()
written := 0
for {
if bp.closeErr != nil {
return written, ErrClosed
}
// write data to the last buffer
b := bp.buf[len(bp.buf)-1]
// copy data to the current empty allocated area
n := copy(b[len(b):cap(b)], p)
// increment buffered data length
bp.bufLen += n
// include written data in last buffer
bp.buf[len(bp.buf)-1] = b[:len(b)+n]
written += n
// if there was enough room to write all then break
if len(p) == n {
break
}
// more data: write to the next slice
p = p[n:]
// block if too much data is still in the buffer
for bp.bufLen >= blockThreshold {
bp.wait.Wait()
}
// allocate slice that has twice the size of the last unless maximum reached
nextCap := 2 * cap(bp.buf[len(bp.buf)-1])
if nextCap > maxCap {
nextCap = maxCap
}
// add new byte slice to the buffers slice and continue writing
bp.buf = append(bp.buf, make([]byte, 0, nextCap))
}
bp.wait.Broadcast()
return written, nil
}
// CloseWithError causes further reads from a BytesPipe to return immediately.
func (bp *BytesPipe) CloseWithError(err error) error {
bp.mu.Lock()
if err != nil {
bp.closeErr = err
} else {
bp.closeErr = io.EOF
}
bp.wait.Broadcast()
bp.mu.Unlock()
return nil
}
// Close causes further reads from a BytesPipe to return immediately.
func (bp *BytesPipe) Close() error {
return bp.CloseWithError(nil)
}
func (bp *BytesPipe) len() int {
return bp.bufLen - bp.lastRead
}
// Read reads bytes from BytesPipe.
// Data could be read only once.
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
bp.mu.Lock()
defer bp.mu.Unlock()
if bp.len() == 0 {
if bp.closeErr != nil {
return 0, bp.closeErr
}
bp.wait.Wait()
if bp.len() == 0 && bp.closeErr != nil {
return 0, bp.closeErr
}
}
for {
read := copy(p, bp.buf[0][bp.lastRead:])
n += read
bp.lastRead += read
if bp.len() == 0 {
// we have read everything. reset to the beginning.
bp.lastRead = 0
bp.bufLen -= len(bp.buf[0])
bp.buf[0] = bp.buf[0][:0]
break
}
// break if everything was read
if len(p) == read {
break
}
// more buffered data and more asked. read from next slice.
p = p[read:]
bp.lastRead = 0
bp.bufLen -= len(bp.buf[0])
bp.buf[0] = nil // throw away old slice
bp.buf = bp.buf[1:] // switch to next
}
bp.wait.Broadcast()
return
}

View File

@ -12,3 +12,11 @@ func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
}
return 0, nil
}
// FprintfIfTrue prints the boolean value if it's true
func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) {
if ok {
return fmt.Fprintf(w, format, ok)
}
return 0, nil
}

View File

@ -53,7 +53,7 @@ func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
}
if rdrOffset == s && i != len(r.readers)-1 {
idx += 1
idx++
rdrOffset = 0
}
r.pos = &pos{idx, rdrOffset}

View File

@ -1,14 +1,11 @@
package ioutils
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"io"
"math/big"
"sync"
"time"
"github.com/fsouza/go-dockerclient/external/golang.org/x/net/context"
)
type readCloserWrapper struct {
@ -20,6 +17,7 @@ func (r *readCloserWrapper) Close() error {
return r.closer()
}
// NewReadCloserWrapper returns a new io.ReadCloser.
func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
return &readCloserWrapper{
Reader: r,
@ -40,6 +38,7 @@ func (r *readerErrWrapper) Read(p []byte) (int, error) {
return n, err
}
// NewReaderErrWrapper returns a new io.Reader.
func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
return &readerErrWrapper{
reader: r,
@ -47,178 +46,7 @@ func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
}
}
// bufReader allows the underlying reader to continue to produce
// output by pre-emptively reading from the wrapped reader.
// This is achieved by buffering this data in bufReader's
// expanding buffer.
type bufReader struct {
sync.Mutex
buf *bytes.Buffer
reader io.Reader
err error
wait sync.Cond
drainBuf []byte
reuseBuf []byte
maxReuse int64
resetTimeout time.Duration
bufLenResetThreshold int64
maxReadDataReset int64
}
func NewBufReader(r io.Reader) *bufReader {
var timeout int
if randVal, err := rand.Int(rand.Reader, big.NewInt(120)); err == nil {
timeout = int(randVal.Int64()) + 180
} else {
timeout = 300
}
reader := &bufReader{
buf: &bytes.Buffer{},
drainBuf: make([]byte, 1024),
reuseBuf: make([]byte, 4096),
maxReuse: 1000,
resetTimeout: time.Second * time.Duration(timeout),
bufLenResetThreshold: 100 * 1024,
maxReadDataReset: 10 * 1024 * 1024,
reader: r,
}
reader.wait.L = &reader.Mutex
go reader.drain()
return reader
}
func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader {
reader := &bufReader{
buf: buffer,
drainBuf: drainBuffer,
reader: r,
}
reader.wait.L = &reader.Mutex
go reader.drain()
return reader
}
func (r *bufReader) drain() {
var (
duration time.Duration
lastReset time.Time
now time.Time
reset bool
bufLen int64
dataSinceReset int64
maxBufLen int64
reuseBufLen int64
reuseCount int64
)
reuseBufLen = int64(len(r.reuseBuf))
lastReset = time.Now()
for {
n, err := r.reader.Read(r.drainBuf)
dataSinceReset += int64(n)
r.Lock()
bufLen = int64(r.buf.Len())
if bufLen > maxBufLen {
maxBufLen = bufLen
}
// Avoid unbounded growth of the buffer over time.
// This has been discovered to be the only non-intrusive
// solution to the unbounded growth of the buffer.
// Alternative solutions such as compression, multiple
// buffers, channels and other similar pieces of code
// were reducing throughput, overall Docker performance
// or simply crashed Docker.
// This solution releases the buffer when specific
// conditions are met to avoid the continuous resizing
// of the buffer for long lived containers.
//
// Move data to the front of the buffer if it's
// smaller than what reuseBuf can store
if bufLen > 0 && reuseBufLen >= bufLen {
n, _ := r.buf.Read(r.reuseBuf)
r.buf.Write(r.reuseBuf[0:n])
// Take action if the buffer has been reused too many
// times and if there's data in the buffer.
// The timeout is also used as means to avoid doing
// these operations more often or less often than
// required.
// The various conditions try to detect heavy activity
// in the buffer which might be indicators of heavy
// growth of the buffer.
} else if reuseCount >= r.maxReuse && bufLen > 0 {
now = time.Now()
duration = now.Sub(lastReset)
timeoutReached := duration >= r.resetTimeout
// The timeout has been reached and the
// buffered data couldn't be moved to the front
// of the buffer, so the buffer gets reset.
if timeoutReached && bufLen > reuseBufLen {
reset = true
}
// The amount of buffered data is too high now,
// reset the buffer.
if timeoutReached && maxBufLen >= r.bufLenResetThreshold {
reset = true
}
// Reset the buffer if a certain amount of
// data has gone through the buffer since the
// last reset.
if timeoutReached && dataSinceReset >= r.maxReadDataReset {
reset = true
}
// The buffered data is moved to a fresh buffer,
// swap the old buffer with the new one and
// reset all counters.
if reset {
newbuf := &bytes.Buffer{}
newbuf.ReadFrom(r.buf)
r.buf = newbuf
lastReset = now
reset = false
dataSinceReset = 0
maxBufLen = 0
reuseCount = 0
}
}
if err != nil {
r.err = err
} else {
r.buf.Write(r.drainBuf[0:n])
}
reuseCount++
r.wait.Signal()
r.Unlock()
callSchedulerIfNecessary()
if err != nil {
break
}
}
}
func (r *bufReader) Read(p []byte) (n int, err error) {
r.Lock()
defer r.Unlock()
for {
n, err = r.buf.Read(p)
if n > 0 {
return n, err
}
if r.err != nil {
return 0, r.err
}
r.wait.Wait()
}
}
func (r *bufReader) Close() error {
closer, ok := r.reader.(io.ReadCloser)
if !ok {
return nil
}
return closer.Close()
}
// HashData returns the sha256 sum of src.
func HashData(src io.Reader) (string, error) {
h := sha256.New()
if _, err := io.Copy(h, src); err != nil {
@ -227,6 +55,8 @@ func HashData(src io.Reader) (string, error) {
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
}
// OnEOFReader wraps a io.ReadCloser and a function
// the function will run at the end of file or close the file.
type OnEOFReader struct {
Rc io.ReadCloser
Fn func()
@ -240,6 +70,7 @@ func (r *OnEOFReader) Read(p []byte) (n int, err error) {
return
}
// Close closes the file and run the function.
func (r *OnEOFReader) Close() error {
err := r.Rc.Close()
r.runFunc()
@ -252,3 +83,72 @@ func (r *OnEOFReader) runFunc() {
r.Fn = nil
}
}
// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
// operations.
type cancelReadCloser struct {
cancel func()
pR *io.PipeReader // Stream to read from
pW *io.PipeWriter
}
// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
// context is cancelled. The returned io.ReadCloser must be closed when it is
// no longer needed.
func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
pR, pW := io.Pipe()
// Create a context used to signal when the pipe is closed
doneCtx, cancel := context.WithCancel(context.Background())
p := &cancelReadCloser{
cancel: cancel,
pR: pR,
pW: pW,
}
go func() {
_, err := io.Copy(pW, in)
select {
case <-ctx.Done():
// If the context was closed, p.closeWithError
// was already called. Calling it again would
// change the error that Read returns.
default:
p.closeWithError(err)
}
in.Close()
}()
go func() {
for {
select {
case <-ctx.Done():
p.closeWithError(ctx.Err())
case <-doneCtx.Done():
return
}
}
}()
return p
}
// Read wraps the Read method of the pipe that provides data from the wrapped
// ReadCloser.
func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
return p.pR.Read(buf)
}
// closeWithError closes the wrapper and its underlying reader. It will
// cause future calls to Read to return err.
func (p *cancelReadCloser) closeWithError(err error) {
p.pW.CloseWithError(err)
p.cancel()
}
// Close closes the wrapper its underlying reader. It will cause
// future calls to Read to return io.EOF.
func (p *cancelReadCloser) Close() error {
p.closeWithError(io.EOF)
return nil
}

View File

@ -0,0 +1,10 @@
// +build !windows
package ioutils
import "io/ioutil"
// TempDir on Unix systems is equivalent to ioutil.TempDir.
func TempDir(dir, prefix string) (string, error) {
return ioutil.TempDir(dir, prefix)
}

View File

@ -0,0 +1,18 @@
// +build windows
package ioutils
import (
"io/ioutil"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath"
)
// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
func TempDir(dir, prefix string) (string, error) {
tempDir, err := ioutil.TempDir(dir, prefix)
if err != nil {
return "", err
}
return longpath.AddPrefix(tempDir), nil
}

View File

@ -1,41 +1,86 @@
package ioutils
import (
"errors"
"io"
"net/http"
"sync"
)
// WriteFlusher wraps the Write and Flush operation ensuring that every write
// is a flush. In addition, the Close method can be called to intercept
// Read/Write calls if the targets lifecycle has already ended.
type WriteFlusher struct {
sync.Mutex
mu sync.Mutex
w io.Writer
flusher http.Flusher
flushed bool
closed error
// TODO(stevvooe): Use channel for closed instead, remove mutex. Using a
// channel will allow one to properly order the operations.
}
var errWriteFlusherClosed = errors.New("writeflusher: closed")
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
wf.Lock()
defer wf.Unlock()
wf.mu.Lock()
defer wf.mu.Unlock()
if wf.closed != nil {
return 0, wf.closed
}
n, err = wf.w.Write(b)
wf.flushed = true
wf.flusher.Flush()
wf.flush() // every write is a flush.
return n, err
}
// Flush the stream immediately.
func (wf *WriteFlusher) Flush() {
wf.Lock()
defer wf.Unlock()
wf.mu.Lock()
defer wf.mu.Unlock()
wf.flush()
}
// flush the stream immediately without taking a lock. Used internally.
func (wf *WriteFlusher) flush() {
if wf.closed != nil {
return
}
wf.flushed = true
wf.flusher.Flush()
}
// Flushed returns the state of flushed.
// If it's flushed, return true, or else it return false.
func (wf *WriteFlusher) Flushed() bool {
wf.Lock()
defer wf.Unlock()
// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
// be used to detect whether or a response code has been issued or not.
// Another hook should be used instead.
wf.mu.Lock()
defer wf.mu.Unlock()
return wf.flushed
}
// Close closes the write flusher, disallowing any further writes to the
// target. After the flusher is closed, all calls to write or flush will
// result in an error.
func (wf *WriteFlusher) Close() error {
wf.mu.Lock()
defer wf.mu.Unlock()
if wf.closed != nil {
return wf.closed
}
wf.closed = errWriteFlusherClosed
return nil
}
// NewWriteFlusher returns a new WriteFlusher.
func NewWriteFlusher(w io.Writer) *WriteFlusher {
var flusher http.Flusher
if f, ok := w.(http.Flusher); ok {

View File

@ -2,6 +2,7 @@ package ioutils
import "io"
// NopWriter represents a type which write operation is nop.
type NopWriter struct{}
func (*NopWriter) Write(buf []byte) (int, error) {
@ -14,12 +15,15 @@ type nopWriteCloser struct {
func (w *nopWriteCloser) Close() error { return nil }
// NopWriteCloser returns a nopWriteCloser.
func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{w}
}
// NopFlusher represents a type which flush operation is nop.
type NopFlusher struct{}
// Flush is a nop operation.
func (f *NopFlusher) Flush() {}
type writeCloserWrapper struct {
@ -31,6 +35,7 @@ func (r *writeCloserWrapper) Close() error {
return r.closer()
}
// NewWriteCloserWrapper returns a new io.WriteCloser.
func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
return &writeCloserWrapper{
Writer: r,
@ -38,7 +43,7 @@ func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
}
}
// Wrap a concrete io.Writer and hold a count of the number
// WriteCounter wraps a concrete io.Writer and hold a count of the number
// of bytes written to the writer during a "session".
// This can be convenient when write return is masked
// (e.g., json.Encoder.Encode())
@ -47,6 +52,7 @@ type WriteCounter struct {
Writer io.Writer
}
// NewWriteCounter returns a new WriteCounter.
func NewWriteCounter(w io.Writer) *WriteCounter {
return &WriteCounter{
Writer: w,

View File

@ -0,0 +1,26 @@
// longpath introduces some constants and helper functions for handling long paths
// in Windows, which are expected to be prepended with `\\?\` and followed by either
// a drive letter, a UNC server\share, or a volume identifier.
package longpath
import (
"strings"
)
// Prefix is the longpath prefix for Windows file paths.
const Prefix = `\\?\`
// AddPrefix will add the Windows long path prefix to the path provided if
// it does not already have it.
func AddPrefix(path string) string {
if !strings.HasPrefix(path, Prefix) {
if strings.HasPrefix(path, `\\`) {
// This is a UNC path, so we need to add 'UNC' to the path as well.
path = Prefix + `UNC` + path[1:]
} else {
path = Prefix + path
}
}
return path
}

View File

@ -1,27 +0,0 @@
Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,40 +0,0 @@
Package mflag (aka multiple-flag) implements command-line flag parsing.
It's an **hacky** fork of the [official golang package](http://golang.org/pkg/flag/)
It adds:
* both short and long flag version
`./example -s red` `./example --string blue`
* multiple names for the same option
```
$>./example -h
Usage of example:
-s, --string="": a simple string
```
___
It is very flexible on purpose, so you can do things like:
```
$>./example -h
Usage of example:
-s, -string, --string="": a simple string
```
Or:
```
$>./example -h
Usage of example:
-oldflag, --newflag="": a simple string
```
You can also hide some flags from the usage, so if we want only `--newflag`:
```
$>./example -h
Usage of example:
--newflag="": a simple string
$>./example -oldflag str
str
```
See [example.go](example/example.go) for more details.

View File

@ -1,187 +0,0 @@
// Package parsers provides helper functions to parse and validate different type
// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel
// operating system versions.
package parsers
import (
"fmt"
"net/url"
"path"
"runtime"
"strconv"
"strings"
)
// ParseHost parses the specified address and returns an address that will be used as the host.
// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr
// FIXME: Change this not to receive default value as parameter
func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) {
addr = strings.TrimSpace(addr)
if addr == "" {
if runtime.GOOS != "windows" {
addr = fmt.Sprintf("unix://%s", defaultUnixAddr)
} else {
// Note - defaultTCPAddr already includes tcp:// prefix
addr = defaultTCPAddr
}
}
addrParts := strings.Split(addr, "://")
if len(addrParts) == 1 {
addrParts = []string{"tcp", addrParts[0]}
}
switch addrParts[0] {
case "tcp":
return ParseTCPAddr(addrParts[1], defaultTCPAddr)
case "unix":
return ParseUnixAddr(addrParts[1], defaultUnixAddr)
case "fd":
return addr, nil
default:
return "", fmt.Errorf("Invalid bind address format: %s", addr)
}
}
// ParseUnixAddr parses and validates that the specified address is a valid UNIX
// socket address. It returns a formatted UNIX socket address, either using the
// address parsed from addr, or the contents of defaultAddr if addr is a blank
// string.
func ParseUnixAddr(addr string, defaultAddr string) (string, error) {
addr = strings.TrimPrefix(addr, "unix://")
if strings.Contains(addr, "://") {
return "", fmt.Errorf("Invalid proto, expected unix: %s", addr)
}
if addr == "" {
addr = defaultAddr
}
return fmt.Sprintf("unix://%s", addr), nil
}
// ParseTCPAddr parses and validates that the specified address is a valid TCP
// address. It returns a formatted TCP address, either using the address parsed
// from addr, or the contents of defaultAddr if addr is a blank string.
func ParseTCPAddr(addr string, defaultAddr string) (string, error) {
addr = strings.TrimPrefix(addr, "tcp://")
if strings.Contains(addr, "://") || addr == "" {
return "", fmt.Errorf("Invalid proto, expected tcp: %s", addr)
}
u, err := url.Parse("tcp://" + addr)
if err != nil {
return "", err
}
hostParts := strings.Split(u.Host, ":")
if len(hostParts) != 2 {
return "", fmt.Errorf("Invalid bind address format: %s", addr)
}
host := hostParts[0]
if host == "" {
host = defaultAddr
}
p, err := strconv.Atoi(hostParts[1])
if err != nil && p == 0 {
return "", fmt.Errorf("Invalid bind address format: %s", addr)
}
return fmt.Sprintf("tcp://%s:%d%s", host, p, u.Path), nil
}
// ParseRepositoryTag gets a repos name and returns the right reposName + tag|digest
// The tag can be confusing because of a port in a repository name.
// Ex: localhost.localdomain:5000/samalba/hipache:latest
// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb
func ParseRepositoryTag(repos string) (string, string) {
n := strings.Index(repos, "@")
if n >= 0 {
parts := strings.Split(repos, "@")
return parts[0], parts[1]
}
n = strings.LastIndex(repos, ":")
if n < 0 {
return repos, ""
}
if tag := repos[n+1:]; !strings.Contains(tag, "/") {
return repos[:n], tag
}
return repos, ""
}
// PartParser parses and validates the specified string (data) using the specified template
// e.g. ip:public:private -> 192.168.0.1:80:8000
func PartParser(template, data string) (map[string]string, error) {
// ip:public:private
var (
templateParts = strings.Split(template, ":")
parts = strings.Split(data, ":")
out = make(map[string]string, len(templateParts))
)
if len(parts) != len(templateParts) {
return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template)
}
for i, t := range templateParts {
value := ""
if len(parts) > i {
value = parts[i]
}
out[t] = value
}
return out, nil
}
// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value)
func ParseKeyValueOpt(opt string) (string, string, error) {
parts := strings.SplitN(opt, "=", 2)
if len(parts) != 2 {
return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt)
}
return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
}
// ParsePortRange parses and validates the specified string as a port-range (8000-9000)
func ParsePortRange(ports string) (uint64, uint64, error) {
if ports == "" {
return 0, 0, fmt.Errorf("Empty string specified for ports.")
}
if !strings.Contains(ports, "-") {
start, err := strconv.ParseUint(ports, 10, 16)
end := start
return start, end, err
}
parts := strings.Split(ports, "-")
start, err := strconv.ParseUint(parts[0], 10, 16)
if err != nil {
return 0, 0, err
}
end, err := strconv.ParseUint(parts[1], 10, 16)
if err != nil {
return 0, 0, err
}
if end < start {
return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports)
}
return start, end, nil
}
// ParseLink parses and validates the specified string as a link format (name:alias)
func ParseLink(val string) (string, string, error) {
if val == "" {
return "", "", fmt.Errorf("empty string specified for links")
}
arr := strings.Split(val, ":")
if len(arr) > 2 {
return "", "", fmt.Errorf("bad format for links: %s", val)
}
if len(arr) == 1 {
return val, val, nil
}
// This is kept because we can actually get an HostConfig with links
// from an already created container and the format is not `foo:bar`
// but `/foo:/c1/bar`
if strings.HasPrefix(arr[0], "/") {
_, alias := path.Split(arr[1])
return arr[0][1:], alias, nil
}
return arr[0], arr[1], nil
}

View File

@ -9,19 +9,26 @@ import (
)
const (
StdWriterPrefixLen = 8
StdWriterFdIndex = 0
StdWriterSizeIndex = 4
stdWriterPrefixLen = 8
stdWriterFdIndex = 0
stdWriterSizeIndex = 4
startingBufLen = 32*1024 + stdWriterPrefixLen + 1
)
type StdType [StdWriterPrefixLen]byte
// StdType prefixes type and length to standard stream.
type StdType [stdWriterPrefixLen]byte
var (
Stdin StdType = StdType{0: 0}
Stdout StdType = StdType{0: 1}
Stderr StdType = StdType{0: 2}
// Stdin represents standard input stream type.
Stdin = StdType{0: 0}
// Stdout represents standard output stream type.
Stdout = StdType{0: 1}
// Stderr represents standard error steam type.
Stderr = StdType{0: 2}
)
// StdWriter is wrapper of io.Writer with extra customized info.
type StdWriter struct {
io.Writer
prefix StdType
@ -36,10 +43,10 @@ func (w *StdWriter) Write(buf []byte) (n int, err error) {
binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf)))
n1, err = w.Writer.Write(w.prefix[:])
if err != nil {
n = n1 - StdWriterPrefixLen
n = n1 - stdWriterPrefixLen
} else {
n2, err = w.Writer.Write(buf)
n = n1 + n2 - StdWriterPrefixLen
n = n1 + n2 - stdWriterPrefixLen
}
if n < 0 {
n = 0
@ -61,7 +68,7 @@ func NewStdWriter(w io.Writer, t StdType) *StdWriter {
}
}
var ErrInvalidStdHeader = errors.New("Unrecognized input header")
var errInvalidStdHeader = errors.New("Unrecognized input header")
// StdCopy is a modified version of io.Copy.
//
@ -75,7 +82,7 @@ var ErrInvalidStdHeader = errors.New("Unrecognized input header")
// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
var (
buf = make([]byte, 32*1024+StdWriterPrefixLen+1)
buf = make([]byte, startingBufLen)
bufLen = len(buf)
nr, nw int
er, ew error
@ -85,12 +92,12 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
for {
// Make sure we have at least a full header
for nr < StdWriterPrefixLen {
for nr < stdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
if nr < StdWriterPrefixLen {
if nr < stdWriterPrefixLen {
logrus.Debugf("Corrupted prefix: %v", buf[:nr])
return written, nil
}
@ -103,7 +110,7 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
}
// Check the first byte to know where to write
switch buf[StdWriterFdIndex] {
switch buf[stdWriterFdIndex] {
case 0:
fallthrough
case 1:
@ -113,30 +120,30 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
// Write on stderr
out = dsterr
default:
logrus.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex])
return 0, ErrInvalidStdHeader
logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex])
return 0, errInvalidStdHeader
}
// Retrieve the size of the frame
frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4]))
frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
logrus.Debugf("framesize: %d", frameSize)
// Check if the buffer is big enough to read the frame.
// Extend it if necessary.
if frameSize+StdWriterPrefixLen > bufLen {
logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf))
buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...)
if frameSize+stdWriterPrefixLen > bufLen {
logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+stdWriterPrefixLen-bufLen+1, len(buf))
buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
bufLen = len(buf)
}
// While the amount of bytes read is less than the size of the frame + header, we keep reading
for nr < frameSize+StdWriterPrefixLen {
for nr < frameSize+stdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
if nr < frameSize+StdWriterPrefixLen {
logrus.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr])
if nr < frameSize+stdWriterPrefixLen {
logrus.Debugf("Corrupted frame: %v", buf[stdWriterPrefixLen:nr])
return written, nil
}
break
@ -148,7 +155,7 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
}
// Write the retrieved frame (without header)
nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen])
nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
if ew != nil {
logrus.Debugf("Error writing frame: %s", ew)
return 0, ew
@ -161,8 +168,8 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
written += int64(nw)
// Move the rest of the buffer to the beginning
copy(buf, buf[frameSize+StdWriterPrefixLen:])
copy(buf, buf[frameSize+stdWriterPrefixLen:])
// Move the index
nr -= frameSize + StdWriterPrefixLen
nr -= frameSize + stdWriterPrefixLen
}
}

View File

@ -0,0 +1,47 @@
package system
import (
"os"
"syscall"
"time"
"unsafe"
)
var (
maxTime time.Time
)
func init() {
if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
// This is a 64 bit timespec
// os.Chtimes limits time to the following
maxTime = time.Unix(0, 1<<63-1)
} else {
// This is a 32 bit timespec
maxTime = time.Unix(1<<31-1, 0)
}
}
// Chtimes changes the access time and modified time of a file at the given path
func Chtimes(name string, atime time.Time, mtime time.Time) error {
unixMinTime := time.Unix(0, 0)
unixMaxTime := maxTime
// If the modified time is prior to the Unix Epoch, or after the
// end of Unix Time, os.Chtimes has undefined behavior
// default to Unix Epoch in this case, just in case
if atime.Before(unixMinTime) || atime.After(unixMaxTime) {
atime = unixMinTime
}
if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) {
mtime = unixMinTime
}
if err := os.Chtimes(name, atime, mtime); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,14 @@
// +build !windows
package system
import (
"time"
)
//setCTime will set the create time on a file. On Unix, the create
//time is updated as a side effect of setting the modified time, so
//no action is required.
func setCTime(path string, ctime time.Time) error {
return nil
}

View File

@ -0,0 +1,27 @@
// +build windows
package system
import (
"syscall"
"time"
)
//setCTime will set the create time on a file. On Windows, this requires
//calling SetFileTime and explicitly including the create time.
func setCTime(path string, ctime time.Time) error {
ctimespec := syscall.NsecToTimespec(ctime.UnixNano())
pathp, e := syscall.UTF16PtrFromString(path)
if e != nil {
return e
}
h, e := syscall.CreateFile(pathp,
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
if e != nil {
return e
}
defer syscall.Close(h)
c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec))
return syscall.SetFileTime(h, &c, nil, nil)
}

View File

@ -5,5 +5,6 @@ import (
)
var (
// ErrNotSupportedPlatform means the platform is not supported.
ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
)

View File

@ -8,11 +8,6 @@ import (
"unsafe"
)
const (
EVENT_ALL_ACCESS = 0x1F0003
EVENT_MODIFY_STATUS = 0x0002
)
var (
procCreateEvent = modkernel32.NewProc("CreateEventW")
procOpenEvent = modkernel32.NewProc("OpenEventW")
@ -21,13 +16,14 @@ var (
procPulseEvent = modkernel32.NewProc("PulseEvent")
)
// CreateEvent implements win32 CreateEventW func in golang. It will create an event object.
func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) {
namep, _ := syscall.UTF16PtrFromString(name)
var _p1 uint32 = 0
var _p1 uint32
if manualReset {
_p1 = 1
}
var _p2 uint32 = 0
var _p2 uint32
if initialState {
_p2 = 1
}
@ -40,9 +36,10 @@ func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool,
return
}
// OpenEvent implements win32 OpenEventW func in golang. It opens an event object.
func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) {
namep, _ := syscall.UTF16PtrFromString(name)
var _p1 uint32 = 0
var _p1 uint32
if inheritHandle {
_p1 = 1
}
@ -55,14 +52,17 @@ func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle sy
return
}
// SetEvent implements win32 SetEvent func in golang.
func SetEvent(handle syscall.Handle) (err error) {
return setResetPulse(handle, procSetEvent)
}
// ResetEvent implements win32 ResetEvent func in golang.
func ResetEvent(handle syscall.Handle) (err error) {
return setResetPulse(handle, procResetEvent)
}
// PulseEvent implements win32 PulseEvent func in golang.
func PulseEvent(handle syscall.Handle) (err error) {
return setResetPulse(handle, procPulseEvent)
}

View File

@ -4,8 +4,16 @@ package system
import (
"os"
"path/filepath"
)
// MkdirAll creates a directory named path along with any necessary parents,
// with permission specified by attribute perm for all dir created.
func MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
// IsAbs is a platform-specific wrapper for filepath.IsAbs.
func IsAbs(path string) bool {
return filepath.IsAbs(path)
}

View File

@ -4,7 +4,9 @@ package system
import (
"os"
"path/filepath"
"regexp"
"strings"
"syscall"
)
@ -62,3 +64,19 @@ func MkdirAll(path string, perm os.FileMode) error {
}
return nil
}
// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
// as it doesn't start with a drive-letter/colon combination. However, in
// docker we need to verify things such as WORKDIR /windows/system32 in
// a Dockerfile (which gets translated to \windows\system32 when being processed
// by the daemon. This SHOULD be treated as absolute from a docker processing
// perspective.
func IsAbs(path string) bool {
if !filepath.IsAbs(path) {
if !strings.HasPrefix(path, string(os.PathSeparator)) {
return false
}
}
return true
}

View File

@ -7,10 +7,10 @@ import (
)
// Lstat takes a path to a file and returns
// a system.Stat_t type pertaining to that file.
// a system.StatT type pertaining to that file.
//
// Throws an error if the file does not exist
func Lstat(path string) (*Stat_t, error) {
func Lstat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Lstat(path, s); err != nil {
return nil, err

View File

@ -6,21 +6,17 @@ import (
"os"
)
// Some explanation for my own sanity, and hopefully maintainers in the
// future.
//
// Lstat calls os.Lstat to get a fileinfo interface back.
// This is then copied into our own locally defined structure.
// Note the Linux version uses fromStatT to do the copy back,
// but that not strictly necessary when already in an OS specific module.
func Lstat(path string) (*Stat_t, error) {
func Lstat(path string) (*StatT, error) {
fi, err := os.Lstat(path)
if err != nil {
return nil, err
}
return &Stat_t{
return &StatT{
name: fi.Name(),
size: fi.Size(),
mode: fi.Mode(),

View File

@ -2,17 +2,12 @@ package system
import (
"bufio"
"errors"
"io"
"os"
"strconv"
"strings"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units"
)
var (
ErrMalformed = errors.New("malformed file")
"github.com/fsouza/go-dockerclient/external/github.com/docker/go-units"
)
// ReadMemInfo retrieves memory statistics of the host system and returns a

View File

@ -2,6 +2,7 @@
package system
// ReadMemInfo is not supported on platforms other than linux and windows.
func ReadMemInfo() (*MemInfo, error) {
return nil, ErrNotSupportedPlatform
}

View File

@ -7,14 +7,16 @@ import (
)
// Mknod creates a filesystem node (file, device special file or named pipe) named path
// with attributes specified by mode and dev
// with attributes specified by mode and dev.
func Mknod(path string, mode uint32, dev int) error {
return syscall.Mknod(path, mode, dev)
}
// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
// and minor number of the newly created device special file.
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
// then the top 12 bits of the minor
// then the top 12 bits of the minor.
func Mkdev(major int64, minor int64) uint32 {
return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
}

View File

@ -2,10 +2,12 @@
package system
// Mknod is not implemented on Windows.
func Mknod(path string, mode uint32, dev int) error {
return ErrNotSupportedPlatform
}
// Mkdev is not implemented on Windows.
func Mkdev(major int64, minor int64) uint32 {
panic("Mkdev not implemented on Windows.")
}

View File

@ -0,0 +1,8 @@
// +build !windows
package system
// DefaultPathEnv is unix style list of directories to search for
// executables. Each directory is separated from the next by a colon
// ':' character .
const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"

View File

@ -0,0 +1,7 @@
// +build windows
package system
// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
// the container. Docker has no context of what the default path should be.
const DefaultPathEnv = ""

View File

@ -6,9 +6,9 @@ import (
"syscall"
)
// Stat_t type contains status of a file. It contains metadata
// like permission, owner, group, size, etc about a file
type Stat_t struct {
// StatT type contains status of a file. It contains metadata
// like permission, owner, group, size, etc about a file.
type StatT struct {
mode uint32
uid uint32
gid uint32
@ -17,30 +17,37 @@ type Stat_t struct {
mtim syscall.Timespec
}
func (s Stat_t) Mode() uint32 {
// Mode returns file's permission mode.
func (s StatT) Mode() uint32 {
return s.mode
}
func (s Stat_t) Uid() uint32 {
// UID returns file's user id of owner.
func (s StatT) UID() uint32 {
return s.uid
}
func (s Stat_t) Gid() uint32 {
// GID returns file's group id of owner.
func (s StatT) GID() uint32 {
return s.gid
}
func (s Stat_t) Rdev() uint64 {
// Rdev returns file's device ID (if it's special file).
func (s StatT) Rdev() uint64 {
return s.rdev
}
func (s Stat_t) Size() int64 {
// Size returns file's size.
func (s StatT) Size() int64 {
return s.size
}
func (s Stat_t) Mtim() syscall.Timespec {
// Mtim returns file's last modification time.
func (s StatT) Mtim() syscall.Timespec {
return s.mtim
}
func (s Stat_t) GetLastModification() syscall.Timespec {
// GetLastModification returns file's last modification time.
func (s StatT) GetLastModification() syscall.Timespec {
return s.Mtim()
}

View File

@ -5,8 +5,8 @@ import (
)
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
return &Stat_t{size: s.Size,
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
@ -18,7 +18,7 @@ func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
// a system.Stat_t type pertaining to that file.
//
// Throws an error if the file does not exist
func Stat(path string) (*Stat_t, error) {
func Stat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Stat(path, s); err != nil {
return nil, err

View File

@ -5,8 +5,8 @@ import (
)
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
return &Stat_t{size: s.Size,
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
mode: s.Mode,
uid: s.Uid,
gid: s.Gid,
@ -14,17 +14,17 @@ func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
mtim: s.Mtim}, nil
}
// FromStatT exists only on linux, and loads a system.Stat_t from a
// FromStatT exists only on linux, and loads a system.StatT from a
// syscal.Stat_t.
func FromStatT(s *syscall.Stat_t) (*Stat_t, error) {
func FromStatT(s *syscall.Stat_t) (*StatT, error) {
return fromStatT(s)
}
// Stat takes a path to a file and returns
// a system.Stat_t type pertaining to that file.
// a system.StatT type pertaining to that file.
//
// Throws an error if the file does not exist
func Stat(path string) (*Stat_t, error) {
func Stat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Stat(path, s); err != nil {
return nil, err

View File

@ -0,0 +1,15 @@
package system
import (
"syscall"
)
// fromStatT creates a system.StatT type from a syscall.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
mtim: s.Mtim}, nil
}

View File

@ -0,0 +1,17 @@
// +build solaris
package system
import (
"syscall"
)
// fromStatT creates a system.StatT type from a syscall.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
mtim: s.Mtim}, nil
}

View File

@ -1,4 +1,4 @@
// +build !linux,!windows,!freebsd
// +build !linux,!windows,!freebsd,!solaris,!openbsd
package system
@ -6,9 +6,9 @@ import (
"syscall"
)
// fromStatT creates a system.Stat_t type from a syscall.Stat_t type
func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
return &Stat_t{size: s.Size,
// fromStatT creates a system.StatT type from a syscall.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,

View File

@ -7,7 +7,9 @@ import (
"time"
)
type Stat_t struct {
// StatT type contains status of a file. It contains metadata
// like name, permission, size, etc about a file.
type StatT struct {
name string
size int64
mode os.FileMode
@ -15,22 +17,27 @@ type Stat_t struct {
isDir bool
}
func (s Stat_t) Name() string {
// Name returns file's name.
func (s StatT) Name() string {
return s.name
}
func (s Stat_t) Size() int64 {
// Size returns file's size.
func (s StatT) Size() int64 {
return s.size
}
func (s Stat_t) Mode() os.FileMode {
// Mode returns file's permission mode.
func (s StatT) Mode() os.FileMode {
return s.mode
}
func (s Stat_t) ModTime() time.Time {
// ModTime returns file's last modification time.
func (s StatT) ModTime() time.Time {
return s.modTime
}
func (s Stat_t) IsDir() bool {
// IsDir returns whether file is actually a directory.
func (s StatT) IsDir() bool {
return s.isDir
}

View File

@ -0,0 +1,11 @@
// +build linux freebsd
package system
import "syscall"
// Unmount is a platform-specific helper function to call
// the unmount syscall.
func Unmount(dest string) error {
return syscall.Unmount(dest, 0)
}

View File

@ -0,0 +1,36 @@
package system
import (
"fmt"
"syscall"
)
// OSVersion is a wrapper for Windows version information
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
type OSVersion struct {
Version uint32
MajorVersion uint8
MinorVersion uint8
Build uint16
}
// GetOSVersion gets the operating system version on Windows. Note that
// docker.exe must be manifested to get the correct version information.
func GetOSVersion() (OSVersion, error) {
var err error
osv := OSVersion{}
osv.Version, err = syscall.GetVersion()
if err != nil {
return osv, fmt.Errorf("Failed to call GetVersion()")
}
osv.MajorVersion = uint8(osv.Version & 0xFF)
osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
osv.Build = uint16(osv.Version >> 16)
return osv, nil
}
// Unmount is a platform-specific helper function to call
// the unmount syscall. Not supported on Windows
func Unmount(dest string) error {
return nil
}

View File

@ -6,6 +6,8 @@ import (
"syscall"
)
// Umask sets current process's file mode creation mask to newmask
// and return oldmask.
func Umask(newmask int) (oldmask int, err error) {
return syscall.Umask(newmask), nil
}

View File

@ -2,6 +2,7 @@
package system
// Umask is not supported on the windows platform.
func Umask(newmask int) (oldmask int, err error) {
// should not be called on cli code path
return 0, ErrNotSupportedPlatform

View File

@ -2,10 +2,7 @@ package system
import "syscall"
// LUtimesNano is not supported by darwin platform.
func LUtimesNano(path string, ts []syscall.Timespec) error {
return ErrNotSupportedPlatform
}
func UtimesNano(path string, ts []syscall.Timespec) error {
return syscall.UtimesNano(path, ts)
}

View File

@ -5,6 +5,8 @@ import (
"unsafe"
)
// LUtimesNano is used to change access and modification time of the specified path.
// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
func LUtimesNano(path string, ts []syscall.Timespec) error {
var _path *byte
_path, err := syscall.BytePtrFromString(path)
@ -18,7 +20,3 @@ func LUtimesNano(path string, ts []syscall.Timespec) error {
return nil
}
func UtimesNano(path string, ts []syscall.Timespec) error {
return syscall.UtimesNano(path, ts)
}

View File

@ -5,10 +5,12 @@ import (
"unsafe"
)
// LUtimesNano is used to change access and modification time of the specified path.
// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
func LUtimesNano(path string, ts []syscall.Timespec) error {
// These are not currently available in syscall
AT_FDCWD := -100
AT_SYMLINK_NOFOLLOW := 0x100
atFdCwd := -100
atSymLinkNoFollow := 0x100
var _path *byte
_path, err := syscall.BytePtrFromString(path)
@ -16,13 +18,9 @@ func LUtimesNano(path string, ts []syscall.Timespec) error {
return err
}
if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS {
if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS {
return err
}
return nil
}
func UtimesNano(path string, ts []syscall.Timespec) error {
return syscall.UtimesNano(path, ts)
}

View File

@ -4,10 +4,7 @@ package system
import "syscall"
// LUtimesNano is not supported on platforms other than linux, freebsd and darwin.
func LUtimesNano(path string, ts []syscall.Timespec) error {
return ErrNotSupportedPlatform
}
func UtimesNano(path string, ts []syscall.Timespec) error {
return ErrNotSupportedPlatform
}

View File

@ -5,7 +5,9 @@ import (
"unsafe"
)
// Returns a nil slice and nil error if the xattr is not set
// Lgetxattr retrieves the value of the extended attribute identified by attr
// and associated with the given path in the file system.
// It will returns a nil slice and nil error if the xattr is not set.
func Lgetxattr(path string, attr string) ([]byte, error) {
pathBytes, err := syscall.BytePtrFromString(path)
if err != nil {
@ -36,6 +38,8 @@ func Lgetxattr(path string, attr string) ([]byte, error) {
var _zero uintptr
// Lsetxattr sets the value of the extended attribute identified by attr
// and associated with the given path in the file system.
func Lsetxattr(path string, attr string, data []byte, flags int) error {
pathBytes, err := syscall.BytePtrFromString(path)
if err != nil {

View File

@ -2,10 +2,12 @@
package system
// Lgetxattr is not supported on platforms other than linux.
func Lgetxattr(path string, attr string) ([]byte, error) {
return nil, ErrNotSupportedPlatform
}
// Lsetxattr is not supported on platforms other than linux.
func Lsetxattr(path string, attr string, data []byte, flags int) error {
return ErrNotSupportedPlatform
}

View File

@ -1,61 +0,0 @@
package volume
// DefaultDriverName is the driver name used for the driver
// implemented in the local package.
const DefaultDriverName string = "local"
// Driver is for creating and removing volumes.
type Driver interface {
// Name returns the name of the volume driver.
Name() string
// Create makes a new volume with the given id.
Create(string) (Volume, error)
// Remove deletes the volume.
Remove(Volume) error
}
// Volume is a place to store data. It is backed by a specific driver, and can be mounted.
type Volume interface {
// Name returns the name of the volume
Name() string
// DriverName returns the name of the driver which owns this volume.
DriverName() string
// Path returns the absolute path to the volume.
Path() string
// Mount mounts the volume and returns the absolute path to
// where it can be consumed.
Mount() (string, error)
// Unmount unmounts the volume when it is no longer in use.
Unmount() error
}
// read-write modes
var rwModes = map[string]bool{
"rw": true,
"rw,Z": true,
"rw,z": true,
"z,rw": true,
"Z,rw": true,
"Z": true,
"z": true,
}
// read-only modes
var roModes = map[string]bool{
"ro": true,
"ro,Z": true,
"ro,z": true,
"z,ro": true,
"Z,ro": true,
}
// ValidateMountMode will make sure the mount mode is valid.
// returns if it's a valid mount mode and if it's read-write or not.
func ValidateMountMode(mode string) (bool, bool) {
return roModes[mode] || rwModes[mode], rwModes[mode]
}
// ReadWrite tells you if a mode string is a valid read-only mode or not.
func ReadWrite(mode string) bool {
return rwModes[mode]
}

View File

@ -0,0 +1,67 @@
# Contributing to go-units
Want to hack on go-units? Awesome! Here are instructions to get you started.
go-units is a part of the [Docker](https://www.docker.com) project, and follows
the same rules and principles. If you're already familiar with the way
Docker does things, you'll feel right at home.
Otherwise, go read Docker's
[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
### Sign your work
The sign-off is a simple line at the end of the explanation for the patch. Your
signature certifies that you wrote the patch or otherwise have the right to pass
it on as an open-source patch. The rules are pretty simple: if you can certify
the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@email.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.

View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2015 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,425 @@
Attribution-ShareAlike 4.0 International
=======================================================================
Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.
Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are
intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC-
licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors
Considerations for the public: By using one of our public
licenses, a licensor grants the public permission to use the
licensed material under specified terms and conditions. If
the licensor's permission is not necessary for any reason--for
example, because of any applicable exception or limitation to
copyright--then that use is not regulated by the license. Our
licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other
reasons, including because others have copyright or other
rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More_considerations
for the public:
wiki.creativecommons.org/Considerations_for_licensees
=======================================================================
Creative Commons Attribution-ShareAlike 4.0 International Public
License
By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution-ShareAlike 4.0 International Public License ("Public
License"). To the extent this Public License may be interpreted as a
contract, You are granted the Licensed Rights in consideration of Your
acceptance of these terms and conditions, and the Licensor grants You
such rights in consideration of benefits the Licensor receives from
making the Licensed Material available under these terms and
conditions.
Section 1 -- Definitions.
a. Adapted Material means material subject to Copyright and Similar
Rights that is derived from or based upon the Licensed Material
and in which the Licensed Material is translated, altered,
arranged, transformed, or otherwise modified in a manner requiring
permission under the Copyright and Similar Rights held by the
Licensor. For purposes of this Public License, where the Licensed
Material is a musical work, performance, or sound recording,
Adapted Material is always produced where the Licensed Material is
synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright
and Similar Rights in Your contributions to Adapted Material in
accordance with the terms and conditions of this Public License.
c. BY-SA Compatible License means a license listed at
creativecommons.org/compatiblelicenses, approved by Creative
Commons as essentially the equivalent of this Public License.
d. Copyright and Similar Rights means copyright and/or similar rights
closely related to copyright including, without limitation,
performance, broadcast, sound recording, and Sui Generis Database
Rights, without regard to how the rights are labeled or
categorized. For purposes of this Public License, the rights
specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights.
e. Effective Technological Measures means those measures that, in the
absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright
Treaty adopted on December 20, 1996, and/or similar international
agreements.
f. Exceptions and Limitations means fair use, fair dealing, and/or
any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material.
g. License Elements means the license attributes listed in the name
of a Creative Commons Public License. The License Elements of this
Public License are Attribution and ShareAlike.
h. Licensed Material means the artistic or literary work, database,
or other material to which the Licensor applied this Public
License.
i. Licensed Rights means the rights granted to You subject to the
terms and conditions of this Public License, which are limited to
all Copyright and Similar Rights that apply to Your use of the
Licensed Material and that the Licensor has authority to license.
j. Licensor means the individual(s) or entity(ies) granting rights
under this Public License.
k. Share means to provide material to the public by any means or
process that requires permission under the Licensed Rights, such
as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the
public may access the material from a place and at a time
individually chosen by them.
l. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of
the Council of 11 March 1996 on the legal protection of databases,
as amended and/or succeeded, as well as other essentially
equivalent rights anywhere in the world.
m. You means the individual or entity exercising the Licensed Rights
under this Public License. Your has a corresponding meaning.
Section 2 -- Scope.
a. License grant.
1. Subject to the terms and conditions of this Public License,
the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to
exercise the Licensed Rights in the Licensed Material to:
a. reproduce and Share the Licensed Material, in whole or
in part; and
b. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with
its terms and conditions.
3. Term. The term of this Public License is specified in Section
6(a).
4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material.
5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this
Public License.
b. Additional offer from the Licensor -- Adapted Material.
Every recipient of Adapted Material from You
automatically receives an offer from the Licensor to
exercise the Licensed Rights in the Adapted Material
under the conditions of the Adapter's License You apply.
c. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed
Material.
6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed
Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this
Public License.
3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties.
Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified
form), You must:
a. retain the following if it is supplied by the Licensor
with the Licensed Material:
i. identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if
designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of
warranties;
v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable;
b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or
hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required
information.
3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent
reasonably practicable.
b. ShareAlike.
In addition to the conditions in Section 3(a), if You Share
Adapted Material You produce, the following conditions also apply.
1. The Adapter's License You apply must be a Creative Commons
license with the same License Elements, this version or
later, or a BY-SA Compatible License.
2. You must include the text of, or the URI or hyperlink to, the
Adapter's License You apply. You may satisfy this condition
in any reasonable manner based on the medium, means, and
context in which You Share Adapted Material.
3. You may not offer or impose any additional or different terms
or conditions on, or apply any Effective Technological
Measures to, Adapted Material that restrict exercise of the
rights granted under the Adapter's License You apply.
Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material,
including for purposes of Section 3(b); and
c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and
waiver of all liability.
Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License
terminate automatically.
b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the
violation; or
2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations
of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so
will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License.
Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License.
Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and
conditions.
c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the
Licensor.
d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority.
=======================================================================
Creative Commons is not a party to its public licenses.
Notwithstanding, Creative Commons may elect to apply one of its public
licenses to material it publishes and in those instances will be
considered the "Licensor." Except for the limited purpose of indicating
that material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the public
licenses.
Creative Commons may be contacted at creativecommons.org.

Some files were not shown because too many files have changed in this diff Show More