Bump fsouza/go-dockerclient godep
This commit is contained in:
2
Godeps/Godeps.json
generated
2
Godeps/Godeps.json
generated
@@ -192,7 +192,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient",
|
||||
"Rev": "2f1ad24900b2777139b5becee93eb63a75b00617"
|
||||
"Rev": "933433faa3e1c0bbc825b251143f8e77affbf797"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/garyburd/redigo/internal",
|
||||
|
1
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml
generated
vendored
1
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml
generated
vendored
@@ -1,4 +1,5 @@
|
||||
language: go
|
||||
sudo: false
|
||||
go:
|
||||
- 1.3.1
|
||||
- 1.4
|
||||
|
14
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS
generated
vendored
14
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS
generated
vendored
@@ -9,16 +9,21 @@ Andy Goldstein <andy.goldstein@redhat.com>
|
||||
Ben Marini <ben@remind101.com>
|
||||
Ben McCann <benmccann.com>
|
||||
Brian Lalor <blalor@bravo5.org>
|
||||
Brendan Fosberry <brendan@codeship.com>
|
||||
Burke Libbey <burke@libbey.me>
|
||||
Carlos Diaz-Padron <cpadron@mozilla.com>
|
||||
Cezar Sa Espinola <cezar.sa@corp.globo.com>
|
||||
Cheah Chu Yeow <chuyeow@gmail.com>
|
||||
cheneydeng <cheneydeng@qq.com>
|
||||
CMGS <ilskdw@gmail.com>
|
||||
Craig Jellick <craig@rancher.com>
|
||||
Daniel, Dao Quang Minh <dqminh89@gmail.com>
|
||||
Daniel Garcia <daniel@danielgarcia.info>
|
||||
Darren Shepherd <darren@rancher.com>
|
||||
Dave Choi <dave.choi@daumkakao.com>
|
||||
David Huie <dahuie@gmail.com>
|
||||
Dawn Chen <dawnchen@google.com>
|
||||
Dinesh Subhraveti <dinesh@gemini-systems.net>
|
||||
Ed <edrocksit@gmail.com>
|
||||
Eric Anderson <anderson@copperegg.com>
|
||||
Ewout Prangsma <ewout@prangsma.net>
|
||||
@@ -27,6 +32,8 @@ Fatih Arslan <ftharsln@gmail.com>
|
||||
Flavia Missi <flaviamissi@gmail.com>
|
||||
Francisco Souza <f@souza.cc>
|
||||
Guillermo Álvarez Fernández <guillermo@cientifico.net>
|
||||
He Simei <hesimei@zju.edu.cn>
|
||||
Ivan Mikushin <i.mikushin@gmail.com>
|
||||
James Bardin <jbardin@litl.com>
|
||||
Jari Kolehmainen <jari.kolehmainen@digia.com>
|
||||
Jason Wilder <jwilder@litl.com>
|
||||
@@ -39,7 +46,8 @@ Kamil Domanski <kamil@domanski.co>
|
||||
Karan Misra <kidoman@gmail.com>
|
||||
Kim, Hirokuni <hirokuni.kim@kvh.co.jp>
|
||||
Kyle Allan <kallan357@gmail.com>
|
||||
liron-l <levinlir@gmail.com>
|
||||
Liron Levin <levinlir@gmail.com>
|
||||
Liu Peng <vslene@gmail.com>
|
||||
Lucas Clemente <lucas@clemente.io>
|
||||
Lucas Weiblen <lucasweiblen@gmail.com>
|
||||
Mantas Matelis <mmatelis@coursera.org>
|
||||
@@ -50,6 +58,8 @@ Mike Dillon <mike.dillon@synctree.com>
|
||||
Mrunal Patel <mrunalp@gmail.com>
|
||||
Nick Ethier <ncethier@gmail.com>
|
||||
Omeid Matten <public@omeid.me>
|
||||
Orivej Desh <orivej@gmx.fr>
|
||||
Paul Bellamy <paul.a.bellamy@gmail.com>
|
||||
Paul Morie <pmorie@gmail.com>
|
||||
Paul Weil <pweil@redhat.com>
|
||||
Peter Edge <peter.edge@gmail.com>
|
||||
@@ -66,9 +76,11 @@ Skolos <skolos@gopherlab.com>
|
||||
Soulou <leo@unbekandt.eu>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||
Summer Mousa <smousa@zenoss.com>
|
||||
Sunjin Lee <styner32@gmail.com>
|
||||
Tarsis Azevedo <tarsis@corp.globo.com>
|
||||
Tim Schindler <tim@catalyst-zero.com>
|
||||
Tobi Knaup <tobi@mesosphere.io>
|
||||
ttyh061 <ttyh061@gmail.com>
|
||||
Victor Marmol <vmarmol@google.com>
|
||||
Vincenzo Prignano <vincenzo.prignano@gmail.com>
|
||||
Wiliam Souza <wiliamsouza83@gmail.com>
|
||||
|
35
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile
generated
vendored
35
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile
generated
vendored
@@ -10,43 +10,38 @@
|
||||
cov \
|
||||
clean
|
||||
|
||||
SRCS = $(shell git ls-files '*.go' | grep -v '^external/')
|
||||
PKGS = ./. ./testing
|
||||
|
||||
all: test
|
||||
|
||||
vendor:
|
||||
go get -v github.com/mjibson/party
|
||||
party -d vendor -c -u
|
||||
@ go get -v github.com/mjibson/party
|
||||
party -d external -c -u
|
||||
|
||||
lint:
|
||||
go get -v github.com/golang/lint/golint
|
||||
for file in $(shell git ls-files '*.go' | grep -v '^vendor/'); do \
|
||||
golint $$file; \
|
||||
done
|
||||
@ go get -v github.com/golang/lint/golint
|
||||
$(foreach file,$(SRCS),golint $(file) || exit;)
|
||||
|
||||
vet:
|
||||
go get -v golang.org/x/tools/cmd/vet
|
||||
go vet ./...
|
||||
@-go get -v golang.org/x/tools/cmd/vet
|
||||
$(foreach pkg,$(PKGS),go vet $(pkg);)
|
||||
|
||||
fmt:
|
||||
gofmt -w $(shell git ls-files '*.go' | grep -v '^vendor/')
|
||||
gofmt -w $(SRCS)
|
||||
|
||||
fmtcheck:
|
||||
for file in $(shell git ls-files '*.go' | grep -v '^vendor/'); do \
|
||||
gofmt $$file | diff -u $$file -; \
|
||||
if [ -n "$$(gofmt $$file | diff -u $$file -)" ]; then\
|
||||
exit 1; \
|
||||
fi; \
|
||||
done
|
||||
$(foreach file,$(SRCS),gofmt $(file) | diff -u $(file) - || exit;)
|
||||
|
||||
pretest: lint vet fmtcheck
|
||||
|
||||
test: pretest
|
||||
go test
|
||||
go test ./testing
|
||||
$(foreach pkg,$(PKGS),go test $(pkg) || exit;)
|
||||
|
||||
cov:
|
||||
go get -v github.com/axw/gocov/gocov
|
||||
go get golang.org/x/tools/cmd/cover
|
||||
@ go get -v github.com/axw/gocov/gocov
|
||||
@ go get golang.org/x/tools/cmd/cover
|
||||
gocov test | gocov report
|
||||
|
||||
clean:
|
||||
go clean ./...
|
||||
$(foreach pkg,$(PKGS),go clean $(pkg) || exit;)
|
||||
|
27
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown
generated
vendored
27
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown
generated
vendored
@@ -35,11 +35,9 @@ func main() {
|
||||
}
|
||||
```
|
||||
|
||||
## Using with Boot2Docker
|
||||
## Using with TLS
|
||||
|
||||
Boot2Docker runs Docker with TLS enabled. In order to instantiate the client you should use NewTLSClient, passing the endpoint and path for key and certificates as parameters.
|
||||
|
||||
For more details about TLS support in Boot2Docker, please refer to [TLS support](https://github.com/boot2docker/boot2docker#tls-support) on Boot2Docker's readme.
|
||||
In order to instantiate the client for a TLS-enabled daemon, you should use NewTLSClient, passing the endpoint and path for key and certificates as parameters.
|
||||
|
||||
```go
|
||||
package main
|
||||
@@ -61,6 +59,27 @@ func main() {
|
||||
}
|
||||
```
|
||||
|
||||
If using [docker-machine](https://docs.docker.com/machine/), or another application that exports environment variables
|
||||
`DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH`, you can use NewClientFromEnv.
|
||||
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
func main() {
|
||||
client, _ := docker.NewClientFromEnv()
|
||||
// use client
|
||||
}
|
||||
```
|
||||
|
||||
See the documentation for more details.
|
||||
|
||||
## Developing
|
||||
|
||||
All development commands can be seen in the [Makefile](Makefile).
|
||||
|
56
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go
generated
vendored
56
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go
generated
vendored
@@ -5,8 +5,10 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
@@ -38,10 +40,16 @@ type dockerConfig struct {
|
||||
// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from the
|
||||
// ~/.dockercfg file.
|
||||
func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) {
|
||||
p := path.Join(os.Getenv("HOME"), ".dockercfg")
|
||||
r, err := os.Open(p)
|
||||
var r io.Reader
|
||||
var err error
|
||||
p := path.Join(os.Getenv("HOME"), ".docker", "config.json")
|
||||
r, err = os.Open(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
p := path.Join(os.Getenv("HOME"), ".dockercfg")
|
||||
r, err = os.Open(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return NewAuthConfigurations(r)
|
||||
}
|
||||
@@ -50,17 +58,36 @@ func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) {
|
||||
// same format as the .dockercfg file.
|
||||
func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) {
|
||||
var auth *AuthConfigurations
|
||||
var confs map[string]dockerConfig
|
||||
if err := json.NewDecoder(r).Decode(&confs); err != nil {
|
||||
confs, err := parseDockerConfig(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auth, err := authConfigs(confs)
|
||||
auth, err = authConfigs(confs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(r)
|
||||
byteData := buf.Bytes()
|
||||
|
||||
var confsWrapper map[string]map[string]dockerConfig
|
||||
if err := json.Unmarshal(byteData, &confsWrapper); err == nil {
|
||||
if confs, ok := confsWrapper["auths"]; ok {
|
||||
return confs, nil
|
||||
}
|
||||
}
|
||||
|
||||
var confs map[string]dockerConfig
|
||||
if err := json.Unmarshal(byteData, &confs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return confs, nil
|
||||
}
|
||||
|
||||
// authConfigs converts a dockerConfigs map to a AuthConfigurations object.
|
||||
func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) {
|
||||
c := &AuthConfigurations{
|
||||
@@ -81,3 +108,20 @@ func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) {
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// AuthCheck validates the given credentials. It returns nil if successful.
|
||||
//
|
||||
// See https://goo.gl/vPoEfJ for more details.
|
||||
func (c *Client) AuthCheck(conf *AuthConfiguration) error {
|
||||
if conf == nil {
|
||||
return fmt.Errorf("conf is nil")
|
||||
}
|
||||
body, statusCode, err := c.do("POST", "/auth", doOptions{data: conf})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if statusCode > 400 {
|
||||
return fmt.Errorf("auth error (%d): %s", statusCode, body)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
44
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth_test.go
generated
vendored
44
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth_test.go
generated
vendored
@@ -7,11 +7,12 @@ package docker
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAuthConfig(t *testing.T) {
|
||||
func TestAuthLegacyConfig(t *testing.T) {
|
||||
auth := base64.StdEncoding.EncodeToString([]byte("user:pass"))
|
||||
read := strings.NewReader(fmt.Sprintf(`{"docker.io":{"auth":"%s","email":"user@example.com"}}`, auth))
|
||||
ac, err := NewAuthConfigurations(read)
|
||||
@@ -35,3 +36,44 @@ func TestAuthConfig(t *testing.T) {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthConfig(t *testing.T) {
|
||||
auth := base64.StdEncoding.EncodeToString([]byte("user:pass"))
|
||||
read := strings.NewReader(fmt.Sprintf(`{"auths":{"docker.io":{"auth":"%s","email":"user@example.com"}}}`, auth))
|
||||
ac, err := NewAuthConfigurations(read)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
c, ok := ac.Configs["docker.io"]
|
||||
if !ok {
|
||||
t.Error("NewAuthConfigurations: Expected Configs to contain docker.io")
|
||||
}
|
||||
if got, want := c.Email, "user@example.com"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].Email: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
if got, want := c.Username, "user"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
if got, want := c.Password, "pass"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
if got, want := c.ServerAddress, "docker.io"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthCheck(t *testing.T) {
|
||||
fakeRT := &FakeRoundTripper{status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
if err := client.AuthCheck(nil); err == nil {
|
||||
t.Fatalf("expected error on nil auth config")
|
||||
}
|
||||
// test good auth
|
||||
if err := client.AuthCheck(&AuthConfiguration{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
*fakeRT = FakeRoundTripper{status: http.StatusUnauthorized}
|
||||
if err := client.AuthCheck(&AuthConfiguration{}); err == nil {
|
||||
t.Fatal("expected failure from unauthorized auth")
|
||||
}
|
||||
}
|
||||
|
2
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/build_test.go
generated
vendored
2
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/build_test.go
generated
vendored
@@ -9,7 +9,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/archive"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
||||
func TestBuildImageMultipleContextsError(t *testing.T) {
|
||||
|
243
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go
generated
vendored
243
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go
generated
vendored
@@ -8,6 +8,7 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
@@ -20,11 +21,17 @@ import (
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy"
|
||||
"time"
|
||||
)
|
||||
|
||||
const userAgent = "go-dockerclient"
|
||||
@@ -153,6 +160,18 @@ func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) {
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
|
||||
// server endpoint, key and certificates (passed inline to the function as opposed to being
|
||||
// read from a local file). It will use the latest remote API version available in the server.
|
||||
func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) {
|
||||
client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SkipServerVersionCheck = true
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewVersionedClient returns a Client instance ready for communication with
|
||||
// the given server endpoint, using a specific remote API version.
|
||||
func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) {
|
||||
@@ -184,6 +203,65 @@ func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString str
|
||||
// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens
|
||||
// server endpoint, key and certificates, using a specific remote API version.
|
||||
func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
|
||||
certPEMBlock, err := ioutil.ReadFile(cert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyPEMBlock, err := ioutil.ReadFile(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
caPEMCert, err := ioutil.ReadFile(ca)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString)
|
||||
}
|
||||
|
||||
// NewClientFromEnv returns a Client instance ready for communication created from
|
||||
// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH.
|
||||
//
|
||||
// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
|
||||
// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
|
||||
func NewClientFromEnv() (*Client, error) {
|
||||
client, err := NewVersionedClientFromEnv("")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SkipServerVersionCheck = true
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from
|
||||
// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH,
|
||||
// and using a specific remote API version.
|
||||
//
|
||||
// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
|
||||
// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
|
||||
func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) {
|
||||
dockerEnv, err := getDockerEnv()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dockerHost := dockerEnv.dockerHost
|
||||
if dockerEnv.dockerTLSVerify {
|
||||
parts := strings.SplitN(dockerHost, "://", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost)
|
||||
}
|
||||
dockerHost = fmt.Sprintf("https://%s", parts[1])
|
||||
cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem")
|
||||
key := filepath.Join(dockerEnv.dockerCertPath, "key.pem")
|
||||
ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem")
|
||||
return NewVersionedTLSClient(dockerHost, cert, key, ca, apiVersionString)
|
||||
}
|
||||
return NewVersionedClient(dockerHost, apiVersionString)
|
||||
}
|
||||
|
||||
// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
|
||||
// server endpoint, key and certificates (passed inline to the function as opposed to being
|
||||
// read from a local file), using a specific remote API version.
|
||||
func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) {
|
||||
u, err := parseEndpoint(endpoint, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -195,23 +273,19 @@ func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString stri
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if cert == "" || key == "" {
|
||||
return nil, errors.New("Both cert and key path are required")
|
||||
if certPEMBlock == nil || keyPEMBlock == nil {
|
||||
return nil, errors.New("Both cert and key are required")
|
||||
}
|
||||
tlsCert, err := tls.LoadX509KeyPair(cert, key)
|
||||
tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}}
|
||||
if ca == "" {
|
||||
if caPEMCert == nil {
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
} else {
|
||||
cert, err := ioutil.ReadFile(ca)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
caPool := x509.NewCertPool()
|
||||
if !caPool.AppendCertsFromPEM(cert) {
|
||||
if !caPool.AppendCertsFromPEM(caPEMCert) {
|
||||
return nil, errors.New("Could not add RootCA pem")
|
||||
}
|
||||
tlsConfig.RootCAs = caPool
|
||||
@@ -272,13 +346,15 @@ func (c *Client) getServerAPIVersionString() (version string, err error) {
|
||||
if status != http.StatusOK {
|
||||
return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", status)
|
||||
}
|
||||
var versionResponse map[string]string
|
||||
var versionResponse map[string]interface{}
|
||||
err = json.Unmarshal(body, &versionResponse)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
version = versionResponse["ApiVersion"]
|
||||
return version, nil
|
||||
if version, ok := (versionResponse["ApiVersion"]).(string); ok {
|
||||
return version, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
type doOptions struct {
|
||||
@@ -315,17 +391,18 @@ func (c *Client) do(method, path string, doOptions doOptions) ([]byte, int, erro
|
||||
protocol := c.endpointURL.Scheme
|
||||
address := c.endpointURL.Path
|
||||
if protocol == "unix" {
|
||||
dial, err := net.Dial(protocol, address)
|
||||
var dial net.Conn
|
||||
dial, err = net.Dial(protocol, address)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
defer dial.Close()
|
||||
clientconn := httputil.NewClientConn(dial, nil)
|
||||
resp, err = clientconn.Do(req)
|
||||
breader := bufio.NewReader(dial)
|
||||
err = req.Write(dial)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
defer clientconn.Close()
|
||||
resp, err = http.ReadResponse(breader, req)
|
||||
} else {
|
||||
resp, err = c.HTTPClient.Do(req)
|
||||
}
|
||||
@@ -349,10 +426,13 @@ func (c *Client) do(method, path string, doOptions doOptions) ([]byte, int, erro
|
||||
type streamOptions struct {
|
||||
setRawTerminal bool
|
||||
rawJSONStream bool
|
||||
useJSONDecoder bool
|
||||
headers map[string]string
|
||||
in io.Reader
|
||||
stdout io.Writer
|
||||
stderr io.Writer
|
||||
// timeout is the inital connection timeout
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (c *Client) stream(method, path string, streamOptions streamOptions) error {
|
||||
@@ -390,17 +470,35 @@ func (c *Client) stream(method, path string, streamOptions streamOptions) error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clientconn := httputil.NewClientConn(dial, nil)
|
||||
resp, err = clientconn.Do(req)
|
||||
defer clientconn.Close()
|
||||
} else {
|
||||
resp, err = c.HTTPClient.Do(req)
|
||||
}
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return ErrConnectionRefused
|
||||
defer dial.Close()
|
||||
breader := bufio.NewReader(dial)
|
||||
err = req.Write(dial)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadResponse may hang if server does not replay
|
||||
if streamOptions.timeout > 0 {
|
||||
dial.SetDeadline(time.Now().Add(streamOptions.timeout))
|
||||
}
|
||||
|
||||
if resp, err = http.ReadResponse(breader, req); err != nil {
|
||||
// Cancel timeout for future I/O operations
|
||||
if streamOptions.timeout > 0 {
|
||||
dial.SetDeadline(time.Time{})
|
||||
}
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return ErrConnectionRefused
|
||||
}
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if resp, err = c.HTTPClient.Do(req); err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return ErrConnectionRefused
|
||||
}
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
@@ -410,7 +508,7 @@ func (c *Client) stream(method, path string, streamOptions streamOptions) error
|
||||
}
|
||||
return newError(resp.StatusCode, body)
|
||||
}
|
||||
if resp.Header.Get("Content-Type") == "application/json" {
|
||||
if streamOptions.useJSONDecoder || resp.Header.Get("Content-Type") == "application/json" {
|
||||
// if we want to get raw json stream, just copy it back to output
|
||||
// without decoding it
|
||||
if streamOptions.rawJSONStream {
|
||||
@@ -653,28 +751,10 @@ func parseEndpoint(endpoint string, tls bool) (*url.URL, error) {
|
||||
if tls {
|
||||
u.Scheme = "https"
|
||||
}
|
||||
if u.Scheme == "tcp" {
|
||||
_, port, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
if e, ok := err.(*net.AddrError); ok {
|
||||
if e.Err == "missing port in address" {
|
||||
return u, nil
|
||||
}
|
||||
}
|
||||
return nil, ErrInvalidEndpoint
|
||||
}
|
||||
|
||||
number, err := strconv.ParseInt(port, 10, 64)
|
||||
if err == nil && number == 2376 {
|
||||
u.Scheme = "https"
|
||||
} else {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
}
|
||||
if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" {
|
||||
return nil, ErrInvalidEndpoint
|
||||
}
|
||||
if u.Scheme != "unix" {
|
||||
switch u.Scheme {
|
||||
case "unix":
|
||||
return u, nil
|
||||
case "http", "https", "tcp":
|
||||
_, port, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
if e, ok := err.(*net.AddrError); ok {
|
||||
@@ -686,10 +766,67 @@ func parseEndpoint(endpoint string, tls bool) (*url.URL, error) {
|
||||
}
|
||||
number, err := strconv.ParseInt(port, 10, 64)
|
||||
if err == nil && number > 0 && number < 65536 {
|
||||
if u.Scheme == "tcp" {
|
||||
if number == 2376 {
|
||||
u.Scheme = "https"
|
||||
} else {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
} else {
|
||||
return u, nil // we don't need port when using a unix socket
|
||||
return nil, ErrInvalidEndpoint
|
||||
default:
|
||||
return nil, ErrInvalidEndpoint
|
||||
}
|
||||
return nil, ErrInvalidEndpoint
|
||||
}
|
||||
|
||||
type dockerEnv struct {
|
||||
dockerHost string
|
||||
dockerTLSVerify bool
|
||||
dockerCertPath string
|
||||
}
|
||||
|
||||
func getDockerEnv() (*dockerEnv, error) {
|
||||
dockerHost := os.Getenv("DOCKER_HOST")
|
||||
var err error
|
||||
if dockerHost == "" {
|
||||
dockerHost, err = getDefaultDockerHost()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != ""
|
||||
var dockerCertPath string
|
||||
if dockerTLSVerify {
|
||||
dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
|
||||
if dockerCertPath == "" {
|
||||
home := homedir.Get()
|
||||
if home == "" {
|
||||
return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set")
|
||||
}
|
||||
dockerCertPath = filepath.Join(home, ".docker")
|
||||
dockerCertPath, err = filepath.Abs(dockerCertPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return &dockerEnv{
|
||||
dockerHost: dockerHost,
|
||||
dockerTLSVerify: dockerTLSVerify,
|
||||
dockerCertPath: dockerCertPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getDefaultDockerHost() (string, error) {
|
||||
var defaultHost string
|
||||
if runtime.GOOS != "windows" {
|
||||
// If we do not have a host, default to unix socket
|
||||
defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket)
|
||||
} else {
|
||||
// If we do not have a host, default to TCP socket on Windows
|
||||
defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort)
|
||||
}
|
||||
return opts.ValidateHost(defaultHost)
|
||||
}
|
||||
|
52
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go
generated
vendored
52
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go
generated
vendored
@@ -7,12 +7,14 @@ package docker
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNewAPIClient(t *testing.T) {
|
||||
@@ -324,6 +326,56 @@ func TestPingFailingWrongStatus(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingErrorWithUnixSocket(t *testing.T) {
|
||||
go func() {
|
||||
li, err := net.Listen("unix", "/tmp/echo.sock")
|
||||
defer li.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("Expected to get listner, but failed: %#v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fd, err := li.Accept()
|
||||
if err != nil {
|
||||
t.Fatalf("Expected to accept connection, but failed: %#v", err)
|
||||
return
|
||||
}
|
||||
|
||||
buf := make([]byte, 512)
|
||||
nr, err := fd.Read(buf)
|
||||
|
||||
// Create invalid response message to occur error
|
||||
data := buf[0:nr]
|
||||
for i := 0; i < 10; i++ {
|
||||
data[i] = 63
|
||||
}
|
||||
|
||||
_, err = fd.Write(data)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected to write to socket, but failed: %#v", err)
|
||||
}
|
||||
|
||||
return
|
||||
}()
|
||||
|
||||
// Wait for unix socket to listen
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
endpoint := "unix:///tmp/echo.sock"
|
||||
u, _ := parseEndpoint(endpoint, false)
|
||||
client := Client{
|
||||
HTTPClient: http.DefaultClient,
|
||||
endpoint: endpoint,
|
||||
endpointURL: u,
|
||||
SkipServerVersionCheck: true,
|
||||
}
|
||||
|
||||
err := client.Ping()
|
||||
if err == nil {
|
||||
t.Fatal("Expected non nil error, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
type FakeRoundTripper struct {
|
||||
message string
|
||||
status int
|
||||
|
103
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go
generated
vendored
103
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go
generated
vendored
@@ -7,6 +7,7 @@ package docker
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -16,6 +17,10 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrContainerAlreadyExists is the error returned by CreateContainer when the
|
||||
// container already exists.
|
||||
var ErrContainerAlreadyExists = errors.New("container already exists")
|
||||
|
||||
// ListContainersOptions specify parameters to the ListContainers function.
|
||||
//
|
||||
// See http://goo.gl/6Y4Gz7 for more details.
|
||||
@@ -125,6 +130,7 @@ type PortMapping map[string]string
|
||||
type NetworkSettings struct {
|
||||
IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"`
|
||||
IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"`
|
||||
MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
|
||||
Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty"`
|
||||
Bridge string `json:"Bridge,omitempty" yaml:"Bridge,omitempty"`
|
||||
PortMapping map[string]PortMapping `json:"PortMapping,omitempty" yaml:"PortMapping,omitempty"`
|
||||
@@ -186,14 +192,14 @@ type Config struct {
|
||||
OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
|
||||
StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"`
|
||||
Env []string `json:"Env,omitempty" yaml:"Env,omitempty"`
|
||||
Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"`
|
||||
Cmd []string `json:"Cmd" yaml:"Cmd"`
|
||||
DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only
|
||||
Image string `json:"Image,omitempty" yaml:"Image,omitempty"`
|
||||
Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
|
||||
VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
|
||||
WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"`
|
||||
MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
|
||||
Entrypoint []string `json:"Entrypoint,omitempty" yaml:"Entrypoint,omitempty"`
|
||||
Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"`
|
||||
NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"`
|
||||
SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"`
|
||||
OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"`
|
||||
@@ -206,6 +212,14 @@ type LogConfig struct {
|
||||
Config map[string]string `json:"Config,omitempty" yaml:"Config,omitempty"`
|
||||
}
|
||||
|
||||
// ULimit defines system-wide resource limitations
|
||||
// This can help a lot in system administration, e.g. when a user starts too many processes and therefore makes the system unresponsive for other users.
|
||||
type ULimit struct {
|
||||
Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
|
||||
Soft int64 `json:"Soft,omitempty" yaml:"Soft,omitempty"`
|
||||
Hard int64 `json:"Hard,omitempty" yaml:"Hard,omitempty"`
|
||||
}
|
||||
|
||||
// SwarmNode containers information about which Swarm node the container is on
|
||||
type SwarmNode struct {
|
||||
ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
|
||||
@@ -239,6 +253,7 @@ type Container struct {
|
||||
ResolvConfPath string `json:"ResolvConfPath,omitempty" yaml:"ResolvConfPath,omitempty"`
|
||||
HostnamePath string `json:"HostnamePath,omitempty" yaml:"HostnamePath,omitempty"`
|
||||
HostsPath string `json:"HostsPath,omitempty" yaml:"HostsPath,omitempty"`
|
||||
LogPath string `json:"LogPath,omitempty" yaml:"LogPath,omitempty"`
|
||||
Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
|
||||
Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"`
|
||||
|
||||
@@ -247,6 +262,8 @@ type Container struct {
|
||||
HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"`
|
||||
ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty"`
|
||||
|
||||
RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty"`
|
||||
|
||||
AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty"`
|
||||
}
|
||||
|
||||
@@ -314,8 +331,8 @@ func (c *Client) ContainerChanges(id string) ([]Change, error) {
|
||||
// See http://goo.gl/2xxQQK for more details.
|
||||
type CreateContainerOptions struct {
|
||||
Name string
|
||||
Config *Config `qs:"-"`
|
||||
HostConfig *HostConfig
|
||||
Config *Config `qs:"-"`
|
||||
HostConfig *HostConfig `qs:"-"`
|
||||
}
|
||||
|
||||
// CreateContainer creates a new container, returning the container instance,
|
||||
@@ -341,6 +358,9 @@ func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error
|
||||
if status == http.StatusNotFound {
|
||||
return nil, ErrNoSuchImage
|
||||
}
|
||||
if status == http.StatusConflict {
|
||||
return nil, ErrContainerAlreadyExists
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -420,6 +440,7 @@ type HostConfig struct {
|
||||
NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty"`
|
||||
IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty"`
|
||||
PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty"`
|
||||
UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty"`
|
||||
RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"`
|
||||
Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty"`
|
||||
LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty"`
|
||||
@@ -430,6 +451,9 @@ type HostConfig struct {
|
||||
MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"`
|
||||
CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
|
||||
CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
|
||||
CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty"`
|
||||
CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty"`
|
||||
Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"`
|
||||
}
|
||||
|
||||
// StartContainer starts a container, returning an error in case of failure.
|
||||
@@ -610,26 +634,30 @@ type Stats struct {
|
||||
IOTimeRecursive []BlkioStatsEntry `json:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty"`
|
||||
SectorsRecursive []BlkioStatsEntry `json:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty"`
|
||||
} `json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty"`
|
||||
CPUStats struct {
|
||||
CPUUsage struct {
|
||||
PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty"`
|
||||
UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty"`
|
||||
TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty"`
|
||||
UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty"`
|
||||
} `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty"`
|
||||
SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty"`
|
||||
ThrottlingData struct {
|
||||
Periods uint64 `json:"periods,omitempty"`
|
||||
ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
|
||||
ThrottledTime uint64 `json:"throttled_time,omitempty"`
|
||||
} `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty"`
|
||||
} `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty"`
|
||||
CPUStats CPUStats `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty"`
|
||||
PreCPUStats CPUStats `json:"precpu_stats,omitempty"`
|
||||
}
|
||||
|
||||
// CPUStats is a stats entry for cpu stats
|
||||
type CPUStats struct {
|
||||
CPUUsage struct {
|
||||
PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty"`
|
||||
UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty"`
|
||||
TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty"`
|
||||
UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty"`
|
||||
} `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty"`
|
||||
SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty"`
|
||||
ThrottlingData struct {
|
||||
Periods uint64 `json:"periods,omitempty"`
|
||||
ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
|
||||
ThrottledTime uint64 `json:"throttled_time,omitempty"`
|
||||
} `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty"`
|
||||
}
|
||||
|
||||
// BlkioStatsEntry is a stats entry for blkio_stats
|
||||
type BlkioStatsEntry struct {
|
||||
Major uint64 `json:"major,omitempty" yaml:"major,omitempty"`
|
||||
Minor uint64 `json:"major,omitempty" yaml:"major,omitempty"`
|
||||
Minor uint64 `json:"minor,omitempty" yaml:"minor,omitempty"`
|
||||
Op string `json:"op,omitempty" yaml:"op,omitempty"`
|
||||
Value uint64 `json:"value,omitempty" yaml:"value,omitempty"`
|
||||
}
|
||||
@@ -638,8 +666,13 @@ type BlkioStatsEntry struct {
|
||||
//
|
||||
// See http://goo.gl/DFMiYD for more details.
|
||||
type StatsOptions struct {
|
||||
ID string
|
||||
Stats chan<- *Stats
|
||||
ID string
|
||||
Stats chan<- *Stats
|
||||
Stream bool
|
||||
// A flag that enables stopping the stats operation
|
||||
Done <-chan bool
|
||||
// Initial connection timeout
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// Stats sends container statistics for the given container to the given channel.
|
||||
@@ -647,7 +680,7 @@ type StatsOptions struct {
|
||||
// This function is blocking, similar to a streaming call for logs, and should be run
|
||||
// on a separate goroutine from the caller. Note that this function will block until
|
||||
// the given container is removed, not just exited. When finished, this function
|
||||
// will close the given channel.
|
||||
// will close the given channel. Alternatively, function can be stopped by signaling on the Done channel
|
||||
//
|
||||
// See http://goo.gl/DFMiYD for more details.
|
||||
func (c *Client) Stats(opts StatsOptions) (retErr error) {
|
||||
@@ -656,18 +689,27 @@ func (c *Client) Stats(opts StatsOptions) (retErr error) {
|
||||
|
||||
defer func() {
|
||||
close(opts.Stats)
|
||||
if err := <-errC; err != nil && retErr == nil {
|
||||
retErr = err
|
||||
|
||||
select {
|
||||
case err := <-errC:
|
||||
if err != nil && retErr == nil {
|
||||
retErr = err
|
||||
}
|
||||
default:
|
||||
// No errors
|
||||
}
|
||||
|
||||
if err := readCloser.Close(); err != nil && retErr == nil {
|
||||
retErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
err := c.stream("GET", fmt.Sprintf("/containers/%s/stats", opts.ID), streamOptions{
|
||||
rawJSONStream: true,
|
||||
stdout: writeCloser,
|
||||
err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{
|
||||
rawJSONStream: true,
|
||||
useJSONDecoder: true,
|
||||
stdout: writeCloser,
|
||||
timeout: opts.Timeout,
|
||||
})
|
||||
if err != nil {
|
||||
dockerError, ok := err.(*Error)
|
||||
@@ -692,6 +734,12 @@ func (c *Client) Stats(opts StatsOptions) (retErr error) {
|
||||
}
|
||||
opts.Stats <- stats
|
||||
stats = new(Stats)
|
||||
select {
|
||||
case <-opts.Done:
|
||||
readCloser.Close()
|
||||
default:
|
||||
// Continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -901,6 +949,7 @@ type LogsOptions struct {
|
||||
Follow bool
|
||||
Stdout bool
|
||||
Stderr bool
|
||||
Since int64
|
||||
Timestamps bool
|
||||
Tail string
|
||||
|
||||
|
84
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go
generated
vendored
84
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go
generated
vendored
@@ -8,6 +8,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -182,6 +183,9 @@ func TestInspectContainer(t *testing.T) {
|
||||
"VolumesFrom": "",
|
||||
"SecurityOpt": [
|
||||
"label:user:USER"
|
||||
],
|
||||
"Ulimits": [
|
||||
{ "Name": "nofile", "Soft": 1024, "Hard": 2048 }
|
||||
]
|
||||
},
|
||||
"State": {
|
||||
@@ -473,6 +477,18 @@ func TestCreateContainerImageNotFound(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateContainerDuplicateName(t *testing.T) {
|
||||
client := newTestClient(&FakeRoundTripper{message: "No such image", status: http.StatusConflict})
|
||||
config := Config{AttachStdout: true, AttachStdin: true}
|
||||
container, err := client.CreateContainer(CreateContainerOptions{Config: &config})
|
||||
if container != nil {
|
||||
t.Errorf("CreateContainer: expected <nil> container, got %#v.", container)
|
||||
}
|
||||
if err != ErrContainerAlreadyExists {
|
||||
t.Errorf("CreateContainer: Wrong error type. Want %#v. Got %#v.", ErrContainerAlreadyExists, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateContainerWithHostConfig(t *testing.T) {
|
||||
fakeRT := &FakeRoundTripper{message: "{}", status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
@@ -1347,7 +1363,7 @@ func TestExportContainer(t *testing.T) {
|
||||
|
||||
func TestExportContainerViaUnixSocket(t *testing.T) {
|
||||
if runtime.GOOS != "darwin" {
|
||||
t.Skip("skipping test on %q", runtime.GOOS)
|
||||
t.Skip(fmt.Sprintf("skipping test on %s", runtime.GOOS))
|
||||
}
|
||||
content := "exported container tar content"
|
||||
var buf []byte
|
||||
@@ -1570,6 +1586,38 @@ func TestTopContainerWithPsArgs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatsTimeout(t *testing.T) {
|
||||
|
||||
l, err := net.Listen("unix", "/tmp/docker_test.sock")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
received := false
|
||||
defer l.Close()
|
||||
go func() {
|
||||
l.Accept()
|
||||
received = true
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
}()
|
||||
client, _ := NewClient("unix:///tmp/docker_test.sock")
|
||||
client.SkipServerVersionCheck = true
|
||||
errC := make(chan error, 1)
|
||||
statsC := make(chan *Stats)
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
errC <- client.Stats(StatsOptions{"c", statsC, true, done, time.Millisecond * 100})
|
||||
close(errC)
|
||||
}()
|
||||
err = <-errC
|
||||
e, ok := err.(net.Error)
|
||||
if !ok || !e.Timeout() {
|
||||
t.Error("Failed to receive timeout exception")
|
||||
}
|
||||
if !received {
|
||||
t.Fatal("Failed to receive message")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStats(t *testing.T) {
|
||||
jsonStats1 := `{
|
||||
"read" : "2015-01-08T22:57:31.547920715Z",
|
||||
@@ -1669,6 +1717,20 @@ func TestStats(t *testing.T) {
|
||||
"usage_in_kernelmode" : 20000000
|
||||
},
|
||||
"system_cpu_usage" : 20091722000000000
|
||||
},
|
||||
"precpu_stats" : {
|
||||
"cpu_usage" : {
|
||||
"percpu_usage" : [
|
||||
16970827,
|
||||
1839451,
|
||||
7107380,
|
||||
10571290
|
||||
],
|
||||
"usage_in_usermode" : 10000000,
|
||||
"total_usage" : 36488948,
|
||||
"usage_in_kernelmode" : 20000000
|
||||
},
|
||||
"system_cpu_usage" : 20091722000000000
|
||||
}
|
||||
}`
|
||||
// 1 second later, cache is 100
|
||||
@@ -1769,6 +1831,20 @@ func TestStats(t *testing.T) {
|
||||
"usage_in_kernelmode" : 20000000
|
||||
},
|
||||
"system_cpu_usage" : 20091722000000000
|
||||
},
|
||||
"precpu_stats" : {
|
||||
"cpu_usage" : {
|
||||
"percpu_usage" : [
|
||||
16970827,
|
||||
1839451,
|
||||
7107380,
|
||||
10571290
|
||||
],
|
||||
"usage_in_usermode" : 10000000,
|
||||
"total_usage" : 36488948,
|
||||
"usage_in_kernelmode" : 20000000
|
||||
},
|
||||
"system_cpu_usage" : 20091722000000000
|
||||
}
|
||||
}`
|
||||
var expected1 Stats
|
||||
@@ -1795,8 +1871,9 @@ func TestStats(t *testing.T) {
|
||||
client.SkipServerVersionCheck = true
|
||||
errC := make(chan error, 1)
|
||||
statsC := make(chan *Stats)
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
errC <- client.Stats(StatsOptions{id, statsC})
|
||||
errC <- client.Stats(StatsOptions{id, statsC, true, done, 0})
|
||||
close(errC)
|
||||
}()
|
||||
var resultStats []*Stats
|
||||
@@ -1832,7 +1909,8 @@ func TestStats(t *testing.T) {
|
||||
func TestStatsContainerNotFound(t *testing.T) {
|
||||
client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
|
||||
statsC := make(chan *Stats)
|
||||
err := client.Stats(StatsOptions{"abef348", statsC})
|
||||
done := make(chan bool)
|
||||
err := client.Stats(StatsOptions{"abef348", statsC, true, done, 0})
|
||||
expected := &NoSuchContainer{ID: "abef348"}
|
||||
if !reflect.DeepEqual(err, expected) {
|
||||
t.Errorf("Stats: Wrong error returned. Want %#v. Got %#v.", expected, err)
|
||||
|
28
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go
generated
vendored
28
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go
generated
vendored
@@ -82,10 +82,7 @@ func (c *Client) RemoveEventListener(listener chan *APIEvents) error {
|
||||
return err
|
||||
}
|
||||
if len(c.eventMonitor.listeners) == 0 {
|
||||
err = c.eventMonitor.disableEventMonitoring()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.eventMonitor.disableEventMonitoring()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -118,8 +115,6 @@ func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvent
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) closeListeners() {
|
||||
eventState.Lock()
|
||||
defer eventState.Unlock()
|
||||
for _, l := range eventState.listeners {
|
||||
close(l)
|
||||
eventState.Add(-1)
|
||||
@@ -151,9 +146,13 @@ func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error {
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) disableEventMonitoring() error {
|
||||
eventState.Wait()
|
||||
eventState.Lock()
|
||||
defer eventState.Unlock()
|
||||
|
||||
eventState.closeListeners()
|
||||
|
||||
eventState.Wait()
|
||||
|
||||
if eventState.enabled {
|
||||
eventState.enabled = false
|
||||
close(eventState.C)
|
||||
@@ -168,7 +167,9 @@ func (eventState *eventMonitoringState) monitorEvents(c *Client) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
if err = eventState.connectWithRetry(c); err != nil {
|
||||
eventState.terminate()
|
||||
// terminate if connect failed
|
||||
eventState.disableEventMonitoring()
|
||||
return
|
||||
}
|
||||
for eventState.isEnabled() {
|
||||
timeout := time.After(100 * time.Millisecond)
|
||||
@@ -178,15 +179,14 @@ func (eventState *eventMonitoringState) monitorEvents(c *Client) {
|
||||
return
|
||||
}
|
||||
if ev == EOFEvent {
|
||||
eventState.closeListeners()
|
||||
eventState.terminate()
|
||||
eventState.disableEventMonitoring()
|
||||
return
|
||||
}
|
||||
eventState.updateLastSeen(ev)
|
||||
go eventState.sendEvent(ev)
|
||||
case err = <-eventState.errC:
|
||||
if err == ErrNoListeners {
|
||||
eventState.terminate()
|
||||
eventState.disableEventMonitoring()
|
||||
return
|
||||
} else if err != nil {
|
||||
defer func() { go eventState.monitorEvents(c) }()
|
||||
@@ -226,7 +226,7 @@ func (eventState *eventMonitoringState) sendEvent(event *APIEvents) {
|
||||
defer eventState.RUnlock()
|
||||
eventState.Add(1)
|
||||
defer eventState.Done()
|
||||
if eventState.isEnabled() {
|
||||
if eventState.enabled {
|
||||
if len(eventState.listeners) == 0 {
|
||||
eventState.errC <- ErrNoListeners
|
||||
return
|
||||
@@ -246,10 +246,6 @@ func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) {
|
||||
}
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) terminate() {
|
||||
eventState.disableEventMonitoring()
|
||||
}
|
||||
|
||||
func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error {
|
||||
uri := "/events"
|
||||
if startTime != 0 {
|
||||
|
1
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go
generated
vendored
1
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go
generated
vendored
@@ -25,6 +25,7 @@ type CreateExecOptions struct {
|
||||
Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
|
||||
Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"`
|
||||
Container string `json:"Container,omitempty" yaml:"Container,omitempty"`
|
||||
User string `json:"User,omitempty" yaml:"User,omitempty"`
|
||||
}
|
||||
|
||||
// StartExecOptions specify parameters to the StartExecContainer function.
|
||||
|
1
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go
generated
vendored
1
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go
generated
vendored
@@ -31,6 +31,7 @@ func TestExecCreate(t *testing.T) {
|
||||
AttachStderr: false,
|
||||
Tty: false,
|
||||
Cmd: []string{"touch", "/tmp/file"},
|
||||
User: "a-user",
|
||||
}
|
||||
execObj, err := client.CreateExec(config)
|
||||
if err != nil {
|
||||
|
26
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
Normal file
26
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
# (Unreleased)
|
||||
|
||||
logrus/core: improve performance of text formatter by 40%
|
||||
logrus/core: expose `LevelHooks` type
|
||||
|
||||
# 0.8.2
|
||||
|
||||
logrus: fix more Fatal family functions
|
||||
|
||||
# 0.8.1
|
||||
|
||||
logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||
|
||||
# 0.8.0
|
||||
|
||||
logrus: defaults to stderr instead of stdout
|
||||
hooks/sentry: add special field for `*http.Request`
|
||||
formatter/text: ignore Windows for colors
|
||||
|
||||
# 0.7.3
|
||||
|
||||
formatter/\*: allow configuration of timestamp layout
|
||||
|
||||
# 0.7.2
|
||||
|
||||
formatter/text: Add configuration option for time format (#158)
|
@@ -32,7 +32,7 @@ ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||
```
|
||||
|
||||
With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
|
||||
With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not
|
||||
attached, the output is compatible with the
|
||||
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
||||
|
||||
@@ -183,7 +183,7 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/airbrake"
|
||||
"github.com/Sirupsen/logrus/hooks/syslog"
|
||||
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
@@ -206,11 +206,17 @@ func init() {
|
||||
| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. |
|
||||
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||
| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||
| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. |
|
||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||
| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
||||
|
||||
#### Level logging
|
||||
|
||||
@@ -266,10 +272,10 @@ init() {
|
||||
// do something here to set environment depending on an environment variable
|
||||
// or command-line flag
|
||||
if Environment == "production" {
|
||||
log.SetFormatter(logrus.JSONFormatter)
|
||||
log.SetFormatter(&logrus.JSONFormatter{})
|
||||
} else {
|
||||
// The TextFormatter is default, you don't actually have to do this.
|
||||
log.SetFormatter(logrus.TextFormatter)
|
||||
log.SetFormatter(&log.TextFormatter{})
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -323,7 +329,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
|
||||
#### Logger as an `io.Writer`
|
||||
|
||||
Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
||||
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
||||
|
||||
```go
|
||||
w := logger.Writer()
|
@@ -188,6 +188,7 @@ func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
entry.Fatal(fmt.Sprintf(format, args...))
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
@@ -234,6 +235,7 @@ func (entry *Entry) Fatalln(args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
entry.Fatal(entry.sprintlnn(args...))
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicln(args ...interface{}) {
|
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/stretchr/testify/assert"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestEntryPanicln(t *testing.T) {
|
@@ -1,6 +1,7 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -45,6 +46,15 @@ var largeFields = Fields{
|
||||
"entries": "yeah",
|
||||
}
|
||||
|
||||
var errorFields = Fields{
|
||||
"foo": fmt.Errorf("bar"),
|
||||
"baz": fmt.Errorf("qux"),
|
||||
}
|
||||
|
||||
func BenchmarkErrorTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
|
||||
}
|
@@ -3,7 +3,7 @@ package logrus
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/stretchr/testify/assert"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type TestHook struct {
|
@@ -11,11 +11,11 @@ type Hook interface {
|
||||
}
|
||||
|
||||
// Internal type for storing the hooks on a logger instance.
|
||||
type levelHooks map[Level][]Hook
|
||||
type LevelHooks map[Level][]Hook
|
||||
|
||||
// Add a hook to an instance of logger. This is called with
|
||||
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
||||
func (hooks levelHooks) Add(hook Hook) {
|
||||
func (hooks LevelHooks) Add(hook Hook) {
|
||||
for _, level := range hook.Levels() {
|
||||
hooks[level] = append(hooks[level], hook)
|
||||
}
|
||||
@@ -23,7 +23,7 @@ func (hooks levelHooks) Add(hook Hook) {
|
||||
|
||||
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
||||
// appropriate hooks for a log entry.
|
||||
func (hooks levelHooks) Fire(level Level, entry *Entry) error {
|
||||
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
|
||||
for _, hook := range hooks[level] {
|
||||
if err := hook.Fire(entry); err != nil {
|
||||
return err
|
@@ -24,11 +24,12 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
}
|
||||
prefixFieldClashes(data)
|
||||
|
||||
if f.TimestampFormat == "" {
|
||||
f.TimestampFormat = DefaultTimestampFormat
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = DefaultTimestampFormat
|
||||
}
|
||||
|
||||
data["time"] = entry.Time.Format(f.TimestampFormat)
|
||||
data["time"] = entry.Time.Format(timestampFormat)
|
||||
data["msg"] = entry.Message
|
||||
data["level"] = entry.Level.String()
|
||||
|
@@ -14,7 +14,7 @@ type Logger struct {
|
||||
// Hooks for the logger instance. These allow firing events based on logging
|
||||
// levels and log entries. For example, to send errors to an error tracking
|
||||
// service, log to StatsD or dump the core on fatal errors.
|
||||
Hooks levelHooks
|
||||
Hooks LevelHooks
|
||||
// All log entries pass through the formatter before logged to Out. The
|
||||
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
||||
// TextFormatter is the default. In development (when a TTY is attached) it
|
||||
@@ -37,7 +37,7 @@ type Logger struct {
|
||||
// var log = &Logger{
|
||||
// Out: os.Stderr,
|
||||
// Formatter: new(JSONFormatter),
|
||||
// Hooks: make(levelHooks),
|
||||
// Hooks: make(LevelHooks),
|
||||
// Level: logrus.DebugLevel,
|
||||
// }
|
||||
//
|
||||
@@ -46,7 +46,7 @@ func New() *Logger {
|
||||
return &Logger{
|
||||
Out: os.Stderr,
|
||||
Formatter: new(TextFormatter),
|
||||
Hooks: make(levelHooks),
|
||||
Hooks: make(LevelHooks),
|
||||
Level: InfoLevel,
|
||||
}
|
||||
}
|
||||
@@ -102,6 +102,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
NewEntry(logger).Fatalf(format, args...)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
@@ -148,6 +149,7 @@ func (logger *Logger) Fatal(args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
NewEntry(logger).Fatal(args...)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panic(args ...interface{}) {
|
||||
@@ -194,6 +196,7 @@ func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
NewEntry(logger).Fatalln(args...)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicln(args ...interface{}) {
|
@@ -8,7 +8,7 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/stretchr/testify/assert"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
|
9
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
Normal file
9
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// +build darwin freebsd openbsd netbsd dragonfly
|
||||
|
||||
package logrus
|
||||
|
||||
import "syscall"
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
|
||||
type Termios syscall.Termios
|
@@ -3,6 +3,7 @@ package logrus
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -69,7 +70,8 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
|
||||
prefixFieldClashes(entry.Data)
|
||||
|
||||
isColored := (f.ForceColors || isTerminal) && !f.DisableColors
|
||||
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
|
||||
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
|
||||
|
||||
if f.TimestampFormat == "" {
|
||||
f.TimestampFormat = DefaultTimestampFormat
|
||||
@@ -129,21 +131,28 @@ func needsQuoting(text string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
|
||||
switch value.(type) {
|
||||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
||||
|
||||
b.WriteString(key)
|
||||
b.WriteByte('=')
|
||||
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
if needsQuoting(value.(string)) {
|
||||
fmt.Fprintf(b, "%v=%s ", key, value)
|
||||
if needsQuoting(value) {
|
||||
b.WriteString(value)
|
||||
} else {
|
||||
fmt.Fprintf(b, "%v=%q ", key, value)
|
||||
fmt.Fprintf(b, "%q", value)
|
||||
}
|
||||
case error:
|
||||
if needsQuoting(value.(error).Error()) {
|
||||
fmt.Fprintf(b, "%v=%s ", key, value)
|
||||
errmsg := value.Error()
|
||||
if needsQuoting(errmsg) {
|
||||
b.WriteString(errmsg)
|
||||
} else {
|
||||
fmt.Fprintf(b, "%v=%q ", key, value)
|
||||
fmt.Fprintf(b, "%q", value)
|
||||
}
|
||||
default:
|
||||
fmt.Fprintf(b, "%v=%v ", key, value)
|
||||
fmt.Fprint(b, value)
|
||||
}
|
||||
|
||||
b.WriteByte(' ')
|
||||
}
|
62
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go
generated
vendored
Normal file
62
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// EnvironmentVariableRegexp A regexp to validate correct environment variables
|
||||
// Environment variables set by the user must have a name consisting solely of
|
||||
// alphabetics, numerics, and underscores - the first of which must not be numeric.
|
||||
EnvironmentVariableRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$")
|
||||
)
|
||||
|
||||
// ParseEnvFile Read in a line delimited file with environment variables enumerated
|
||||
func ParseEnvFile(filename string) ([]string, error) {
|
||||
fh, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
defer fh.Close()
|
||||
|
||||
lines := []string{}
|
||||
scanner := bufio.NewScanner(fh)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
// line is not empty, and not starting with '#'
|
||||
if len(line) > 0 && !strings.HasPrefix(line, "#") {
|
||||
data := strings.SplitN(line, "=", 2)
|
||||
|
||||
// trim the front of a variable, but nothing else
|
||||
variable := strings.TrimLeft(data[0], whiteSpaces)
|
||||
|
||||
if !EnvironmentVariableRegexp.MatchString(variable) {
|
||||
return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", variable)}
|
||||
}
|
||||
if len(data) > 1 {
|
||||
|
||||
// pass the value through, no trimming
|
||||
lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1]))
|
||||
} else {
|
||||
// if only a pass-through variable is given, clean it up.
|
||||
lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line)))
|
||||
}
|
||||
}
|
||||
}
|
||||
return lines, scanner.Err()
|
||||
}
|
||||
|
||||
var whiteSpaces = " \t"
|
||||
|
||||
// ErrBadEnvVariable typed error for bad environment variable
|
||||
type ErrBadEnvVariable struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e ErrBadEnvVariable) Error() string {
|
||||
return fmt.Sprintf("poorly formatted environment: %s", e.msg)
|
||||
}
|
133
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go
generated
vendored
Normal file
133
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func tmpFileWithContent(content string, t *testing.T) string {
|
||||
tmpFile, err := ioutil.TempFile("", "envfile-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
|
||||
tmpFile.WriteString(content)
|
||||
return tmpFile.Name()
|
||||
}
|
||||
|
||||
// Test ParseEnvFile for a file with a few well formatted lines
|
||||
func TestParseEnvFileGoodFile(t *testing.T) {
|
||||
content := `foo=bar
|
||||
baz=quux
|
||||
# comment
|
||||
|
||||
_foobar=foobaz
|
||||
`
|
||||
|
||||
tmpFile := tmpFileWithContent(content, t)
|
||||
defer os.Remove(tmpFile)
|
||||
|
||||
lines, err := ParseEnvFile(tmpFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedLines := []string{
|
||||
"foo=bar",
|
||||
"baz=quux",
|
||||
"_foobar=foobaz",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(lines, expectedLines) {
|
||||
t.Fatal("lines not equal to expected_lines")
|
||||
}
|
||||
}
|
||||
|
||||
// Test ParseEnvFile for an empty file
|
||||
func TestParseEnvFileEmptyFile(t *testing.T) {
|
||||
tmpFile := tmpFileWithContent("", t)
|
||||
defer os.Remove(tmpFile)
|
||||
|
||||
lines, err := ParseEnvFile(tmpFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(lines) != 0 {
|
||||
t.Fatal("lines not empty; expected empty")
|
||||
}
|
||||
}
|
||||
|
||||
// Test ParseEnvFile for a non existent file
|
||||
func TestParseEnvFileNonExistentFile(t *testing.T) {
|
||||
_, err := ParseEnvFile("foo_bar_baz")
|
||||
if err == nil {
|
||||
t.Fatal("ParseEnvFile succeeded; expected failure")
|
||||
}
|
||||
if _, ok := err.(*os.PathError); !ok {
|
||||
t.Fatalf("Expected a PathError, got [%v]", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test ParseEnvFile for a badly formatted file
|
||||
func TestParseEnvFileBadlyFormattedFile(t *testing.T) {
|
||||
content := `foo=bar
|
||||
f =quux
|
||||
`
|
||||
|
||||
tmpFile := tmpFileWithContent(content, t)
|
||||
defer os.Remove(tmpFile)
|
||||
|
||||
_, err := ParseEnvFile(tmpFile)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected a ErrBadEnvVariable, got nothing")
|
||||
}
|
||||
if _, ok := err.(ErrBadEnvVariable); !ok {
|
||||
t.Fatalf("Expected a ErrBadEnvVariable, got [%v]", err)
|
||||
}
|
||||
expectedMessage := "poorly formatted environment: variable 'f ' is not a valid environment variable"
|
||||
if err.Error() != expectedMessage {
|
||||
t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Test ParseEnvFile for a file with a line exeeding bufio.MaxScanTokenSize
|
||||
func TestParseEnvFileLineTooLongFile(t *testing.T) {
|
||||
content := strings.Repeat("a", bufio.MaxScanTokenSize+42)
|
||||
content = fmt.Sprint("foo=", content)
|
||||
|
||||
tmpFile := tmpFileWithContent(content, t)
|
||||
defer os.Remove(tmpFile)
|
||||
|
||||
_, err := ParseEnvFile(tmpFile)
|
||||
if err == nil {
|
||||
t.Fatal("ParseEnvFile succeeded; expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
// ParseEnvFile with a random file, pass through
|
||||
func TestParseEnvFileRandomFile(t *testing.T) {
|
||||
content := `first line
|
||||
another invalid line`
|
||||
tmpFile := tmpFileWithContent(content, t)
|
||||
defer os.Remove(tmpFile)
|
||||
|
||||
_, err := ParseEnvFile(tmpFile)
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("Expected a ErrBadEnvVariable, got nothing")
|
||||
}
|
||||
if _, ok := err.(ErrBadEnvVariable); !ok {
|
||||
t.Fatalf("Expected a ErrBadEnvvariable, got [%v]", err)
|
||||
}
|
||||
expectedMessage := "poorly formatted environment: variable 'first line' is not a valid environment variable"
|
||||
if err.Error() != expectedMessage {
|
||||
t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error())
|
||||
}
|
||||
}
|
7
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go
generated
vendored
Normal file
7
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
// +build !windows
|
||||
|
||||
package opts
|
||||
|
||||
import "fmt"
|
||||
|
||||
var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
|
7
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go
generated
vendored
Normal file
7
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
// +build windows
|
||||
|
||||
package opts
|
||||
|
||||
import "fmt"
|
||||
|
||||
var DefaultHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
|
35
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go
generated
vendored
Normal file
35
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
// IpOpt type that hold an IP
|
||||
type IpOpt struct {
|
||||
*net.IP
|
||||
}
|
||||
|
||||
func NewIpOpt(ref *net.IP, defaultVal string) *IpOpt {
|
||||
o := &IpOpt{
|
||||
IP: ref,
|
||||
}
|
||||
o.Set(defaultVal)
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *IpOpt) Set(val string) error {
|
||||
ip := net.ParseIP(val)
|
||||
if ip == nil {
|
||||
return fmt.Errorf("%s is not an ip address", val)
|
||||
}
|
||||
*o.IP = ip
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *IpOpt) String() string {
|
||||
if *o.IP == nil {
|
||||
return ""
|
||||
}
|
||||
return o.IP.String()
|
||||
}
|
54
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go
generated
vendored
Normal file
54
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIpOptString(t *testing.T) {
|
||||
addresses := []string{"", "0.0.0.0"}
|
||||
var ip net.IP
|
||||
|
||||
for _, address := range addresses {
|
||||
stringAddress := NewIpOpt(&ip, address).String()
|
||||
if stringAddress != address {
|
||||
t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewIpOptInvalidDefaultVal(t *testing.T) {
|
||||
ip := net.IPv4(127, 0, 0, 1)
|
||||
defaultVal := "Not an ip"
|
||||
|
||||
ipOpt := NewIpOpt(&ip, defaultVal)
|
||||
|
||||
expected := "127.0.0.1"
|
||||
if ipOpt.String() != expected {
|
||||
t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewIpOptValidDefaultVal(t *testing.T) {
|
||||
ip := net.IPv4(127, 0, 0, 1)
|
||||
defaultVal := "192.168.1.1"
|
||||
|
||||
ipOpt := NewIpOpt(&ip, defaultVal)
|
||||
|
||||
expected := "192.168.1.1"
|
||||
if ipOpt.String() != expected {
|
||||
t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestIpOptSetInvalidVal(t *testing.T) {
|
||||
ip := net.IPv4(127, 0, 0, 1)
|
||||
ipOpt := &IpOpt{IP: &ip}
|
||||
|
||||
invalidIp := "invalid ip"
|
||||
expectedError := "invalid ip is not an ip address"
|
||||
err := ipOpt.Set(invalidIp)
|
||||
if err == nil || err.Error() != expectedError {
|
||||
t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error())
|
||||
}
|
||||
}
|
323
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go
generated
vendored
Normal file
323
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go
generated
vendored
Normal file
@@ -0,0 +1,323 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume"
|
||||
)
|
||||
|
||||
var (
|
||||
alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
|
||||
domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
|
||||
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080
|
||||
DefaultHTTPHost = "127.0.0.1"
|
||||
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker -d -H tcp://
|
||||
// TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter
|
||||
// is not supplied. A better longer term solution would be to use a named
|
||||
// pipe as the default on the Windows daemon.
|
||||
DefaultHTTPPort = 2375 // Default HTTP Port
|
||||
// DefaultUnixSocket Path for the unix socket.
|
||||
// Docker daemon by default always listens on the default unix socket
|
||||
DefaultUnixSocket = "/var/run/docker.sock"
|
||||
)
|
||||
|
||||
// ListOpts type that hold a list of values and a validation function.
|
||||
type ListOpts struct {
|
||||
values *[]string
|
||||
validator ValidatorFctType
|
||||
}
|
||||
|
||||
// NewListOpts Create a new ListOpts with the specified validator.
|
||||
func NewListOpts(validator ValidatorFctType) ListOpts {
|
||||
var values []string
|
||||
return *NewListOptsRef(&values, validator)
|
||||
}
|
||||
|
||||
func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
|
||||
return &ListOpts{
|
||||
values: values,
|
||||
validator: validator,
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *ListOpts) String() string {
|
||||
return fmt.Sprintf("%v", []string((*opts.values)))
|
||||
}
|
||||
|
||||
// Set validates if needed the input value and add it to the
|
||||
// internal slice.
|
||||
func (opts *ListOpts) Set(value string) error {
|
||||
if opts.validator != nil {
|
||||
v, err := opts.validator(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = v
|
||||
}
|
||||
(*opts.values) = append((*opts.values), value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete remove the given element from the slice.
|
||||
func (opts *ListOpts) Delete(key string) {
|
||||
for i, k := range *opts.values {
|
||||
if k == key {
|
||||
(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetMap returns the content of values in a map in order to avoid
|
||||
// duplicates.
|
||||
// FIXME: can we remove this?
|
||||
func (opts *ListOpts) GetMap() map[string]struct{} {
|
||||
ret := make(map[string]struct{})
|
||||
for _, k := range *opts.values {
|
||||
ret[k] = struct{}{}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// GetAll returns the values' slice.
|
||||
// FIXME: Can we remove this?
|
||||
func (opts *ListOpts) GetAll() []string {
|
||||
return (*opts.values)
|
||||
}
|
||||
|
||||
// Get checks the existence of the given key.
|
||||
func (opts *ListOpts) Get(key string) bool {
|
||||
for _, k := range *opts.values {
|
||||
if k == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Len returns the amount of element in the slice.
|
||||
func (opts *ListOpts) Len() int {
|
||||
return len((*opts.values))
|
||||
}
|
||||
|
||||
//MapOpts type that holds a map of values and a validation function.
|
||||
type MapOpts struct {
|
||||
values map[string]string
|
||||
validator ValidatorFctType
|
||||
}
|
||||
|
||||
// Set validates if needed the input value and add it to the
|
||||
// internal map, by splitting on '='.
|
||||
func (opts *MapOpts) Set(value string) error {
|
||||
if opts.validator != nil {
|
||||
v, err := opts.validator(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = v
|
||||
}
|
||||
vals := strings.SplitN(value, "=", 2)
|
||||
if len(vals) == 1 {
|
||||
(opts.values)[vals[0]] = ""
|
||||
} else {
|
||||
(opts.values)[vals[0]] = vals[1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (opts *MapOpts) String() string {
|
||||
return fmt.Sprintf("%v", map[string]string((opts.values)))
|
||||
}
|
||||
|
||||
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
|
||||
if values == nil {
|
||||
values = make(map[string]string)
|
||||
}
|
||||
return &MapOpts{
|
||||
values: values,
|
||||
validator: validator,
|
||||
}
|
||||
}
|
||||
|
||||
// ValidatorFctType validator that return a validate string and/or an error
|
||||
type ValidatorFctType func(val string) (string, error)
|
||||
|
||||
// ValidatorFctListType validator that return a validate list of string and/or an error
|
||||
type ValidatorFctListType func(val string) ([]string, error)
|
||||
|
||||
// ValidateAttach Validates that the specified string is a valid attach option.
|
||||
func ValidateAttach(val string) (string, error) {
|
||||
s := strings.ToLower(val)
|
||||
for _, str := range []string{"stdin", "stdout", "stderr"} {
|
||||
if s == str {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR")
|
||||
}
|
||||
|
||||
// ValidateLink Validates that the specified string has a valid link format (containerName:alias).
|
||||
func ValidateLink(val string) (string, error) {
|
||||
if _, _, err := parsers.ParseLink(val); err != nil {
|
||||
return val, err
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// ValidateDevice Validate a path for devices
|
||||
// It will make sure 'val' is in the form:
|
||||
// [host-dir:]container-path[:mode]
|
||||
func ValidateDevice(val string) (string, error) {
|
||||
return validatePath(val, false)
|
||||
}
|
||||
|
||||
// ValidatePath Validate a path for volumes
|
||||
// It will make sure 'val' is in the form:
|
||||
// [host-dir:]container-path[:rw|ro]
|
||||
// It will also validate the mount mode.
|
||||
func ValidatePath(val string) (string, error) {
|
||||
return validatePath(val, true)
|
||||
}
|
||||
|
||||
func validatePath(val string, validateMountMode bool) (string, error) {
|
||||
var containerPath string
|
||||
var mode string
|
||||
|
||||
if strings.Count(val, ":") > 2 {
|
||||
return val, fmt.Errorf("bad format for volumes: %s", val)
|
||||
}
|
||||
|
||||
splited := strings.SplitN(val, ":", 3)
|
||||
if splited[0] == "" {
|
||||
return val, fmt.Errorf("bad format for volumes: %s", val)
|
||||
}
|
||||
switch len(splited) {
|
||||
case 1:
|
||||
containerPath = splited[0]
|
||||
val = path.Clean(containerPath)
|
||||
case 2:
|
||||
if isValid, _ := volume.ValidateMountMode(splited[1]); validateMountMode && isValid {
|
||||
containerPath = splited[0]
|
||||
mode = splited[1]
|
||||
val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode)
|
||||
} else {
|
||||
containerPath = splited[1]
|
||||
val = fmt.Sprintf("%s:%s", splited[0], path.Clean(containerPath))
|
||||
}
|
||||
case 3:
|
||||
containerPath = splited[1]
|
||||
mode = splited[2]
|
||||
if isValid, _ := volume.ValidateMountMode(splited[2]); validateMountMode && !isValid {
|
||||
return val, fmt.Errorf("bad mount mode specified : %s", mode)
|
||||
}
|
||||
val = fmt.Sprintf("%s:%s:%s", splited[0], containerPath, mode)
|
||||
}
|
||||
|
||||
if !path.IsAbs(containerPath) {
|
||||
return val, fmt.Errorf("%s is not an absolute path", containerPath)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// ValidateEnv Validate an environment variable and returns it
|
||||
// It will use EnvironmentVariableRegexp to ensure the name of the environment variable is valid.
|
||||
// If no value is specified, it returns the current value using os.Getenv.
|
||||
func ValidateEnv(val string) (string, error) {
|
||||
arr := strings.Split(val, "=")
|
||||
if len(arr) > 1 {
|
||||
return val, nil
|
||||
}
|
||||
if !EnvironmentVariableRegexp.MatchString(arr[0]) {
|
||||
return val, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", val)}
|
||||
}
|
||||
if !doesEnvExist(val) {
|
||||
return val, nil
|
||||
}
|
||||
return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
|
||||
}
|
||||
|
||||
// ValidateIPAddress Validates an Ip address
|
||||
func ValidateIPAddress(val string) (string, error) {
|
||||
var ip = net.ParseIP(strings.TrimSpace(val))
|
||||
if ip != nil {
|
||||
return ip.String(), nil
|
||||
}
|
||||
return "", fmt.Errorf("%s is not an ip address", val)
|
||||
}
|
||||
|
||||
// ValidateMACAddress Validates a MAC address
|
||||
func ValidateMACAddress(val string) (string, error) {
|
||||
_, err := net.ParseMAC(strings.TrimSpace(val))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// ValidateDNSSearch Validates domain for resolvconf search configuration.
|
||||
// A zero length domain is represented by .
|
||||
func ValidateDNSSearch(val string) (string, error) {
|
||||
if val = strings.Trim(val, " "); val == "." {
|
||||
return val, nil
|
||||
}
|
||||
return validateDomain(val)
|
||||
}
|
||||
|
||||
func validateDomain(val string) (string, error) {
|
||||
if alphaRegexp.FindString(val) == "" {
|
||||
return "", fmt.Errorf("%s is not a valid domain", val)
|
||||
}
|
||||
ns := domainRegexp.FindSubmatch([]byte(val))
|
||||
if len(ns) > 0 && len(ns[1]) < 255 {
|
||||
return string(ns[1]), nil
|
||||
}
|
||||
return "", fmt.Errorf("%s is not a valid domain", val)
|
||||
}
|
||||
|
||||
// ValidateExtraHost Validate that the given string is a valid extrahost and returns it
|
||||
// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6)
|
||||
func ValidateExtraHost(val string) (string, error) {
|
||||
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
|
||||
arr := strings.SplitN(val, ":", 2)
|
||||
if len(arr) != 2 || len(arr[0]) == 0 {
|
||||
return "", fmt.Errorf("bad format for add-host: %q", val)
|
||||
}
|
||||
if _, err := ValidateIPAddress(arr[1]); err != nil {
|
||||
return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// ValidateLabel Validate that the given string is a valid label, and returns it
|
||||
// Labels are in the form on key=value
|
||||
func ValidateLabel(val string) (string, error) {
|
||||
if strings.Count(val, "=") < 1 {
|
||||
return "", fmt.Errorf("bad attribute format: %s", val)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// ValidateHost Validate that the given string is a valid host and returns it
|
||||
func ValidateHost(val string) (string, error) {
|
||||
host, err := parsers.ParseHost(DefaultHTTPHost, DefaultUnixSocket, val)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
return host, nil
|
||||
}
|
||||
|
||||
func doesEnvExist(name string) bool {
|
||||
for _, entry := range os.Environ() {
|
||||
parts := strings.SplitN(entry, "=", 2)
|
||||
if parts[0] == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
479
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go
generated
vendored
Normal file
479
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go
generated
vendored
Normal file
@@ -0,0 +1,479 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestValidateIPAddress(t *testing.T) {
|
||||
if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" {
|
||||
t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err)
|
||||
}
|
||||
|
||||
if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" {
|
||||
t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err)
|
||||
}
|
||||
|
||||
if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" {
|
||||
t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err)
|
||||
}
|
||||
|
||||
if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" {
|
||||
t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err)
|
||||
}
|
||||
|
||||
if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" {
|
||||
t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestMapOpts(t *testing.T) {
|
||||
tmpMap := make(map[string]string)
|
||||
o := NewMapOpts(tmpMap, logOptsValidator)
|
||||
o.Set("max-size=1")
|
||||
if o.String() != "map[max-size:1]" {
|
||||
t.Errorf("%s != [map[max-size:1]", o.String())
|
||||
}
|
||||
|
||||
o.Set("max-file=2")
|
||||
if len(tmpMap) != 2 {
|
||||
t.Errorf("map length %d != 2", len(tmpMap))
|
||||
}
|
||||
|
||||
if tmpMap["max-file"] != "2" {
|
||||
t.Errorf("max-file = %s != 2", tmpMap["max-file"])
|
||||
}
|
||||
|
||||
if tmpMap["max-size"] != "1" {
|
||||
t.Errorf("max-size = %s != 1", tmpMap["max-size"])
|
||||
}
|
||||
if o.Set("dummy-val=3") == nil {
|
||||
t.Errorf("validator is not being called")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateMACAddress(t *testing.T) {
|
||||
if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil {
|
||||
t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err)
|
||||
}
|
||||
|
||||
if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil {
|
||||
t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC")
|
||||
}
|
||||
|
||||
if _, err := ValidateMACAddress(`random invalid string`); err == nil {
|
||||
t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC")
|
||||
}
|
||||
}
|
||||
|
||||
func TestListOptsWithoutValidator(t *testing.T) {
|
||||
o := NewListOpts(nil)
|
||||
o.Set("foo")
|
||||
if o.String() != "[foo]" {
|
||||
t.Errorf("%s != [foo]", o.String())
|
||||
}
|
||||
o.Set("bar")
|
||||
if o.Len() != 2 {
|
||||
t.Errorf("%d != 2", o.Len())
|
||||
}
|
||||
o.Set("bar")
|
||||
if o.Len() != 3 {
|
||||
t.Errorf("%d != 3", o.Len())
|
||||
}
|
||||
if !o.Get("bar") {
|
||||
t.Error("o.Get(\"bar\") == false")
|
||||
}
|
||||
if o.Get("baz") {
|
||||
t.Error("o.Get(\"baz\") == true")
|
||||
}
|
||||
o.Delete("foo")
|
||||
if o.String() != "[bar bar]" {
|
||||
t.Errorf("%s != [bar bar]", o.String())
|
||||
}
|
||||
listOpts := o.GetAll()
|
||||
if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" {
|
||||
t.Errorf("Expected [[bar bar]], got [%v]", listOpts)
|
||||
}
|
||||
mapListOpts := o.GetMap()
|
||||
if len(mapListOpts) != 1 {
|
||||
t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestListOptsWithValidator(t *testing.T) {
|
||||
// Re-using logOptsvalidator (used by MapOpts)
|
||||
o := NewListOpts(logOptsValidator)
|
||||
o.Set("foo")
|
||||
if o.String() != "[]" {
|
||||
t.Errorf("%s != []", o.String())
|
||||
}
|
||||
o.Set("foo=bar")
|
||||
if o.String() != "[]" {
|
||||
t.Errorf("%s != []", o.String())
|
||||
}
|
||||
o.Set("max-file=2")
|
||||
if o.Len() != 1 {
|
||||
t.Errorf("%d != 1", o.Len())
|
||||
}
|
||||
if !o.Get("max-file=2") {
|
||||
t.Error("o.Get(\"max-file=2\") == false")
|
||||
}
|
||||
if o.Get("baz") {
|
||||
t.Error("o.Get(\"baz\") == true")
|
||||
}
|
||||
o.Delete("max-file=2")
|
||||
if o.String() != "[]" {
|
||||
t.Errorf("%s != []", o.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateDNSSearch(t *testing.T) {
|
||||
valid := []string{
|
||||
`.`,
|
||||
`a`,
|
||||
`a.`,
|
||||
`1.foo`,
|
||||
`17.foo`,
|
||||
`foo.bar`,
|
||||
`foo.bar.baz`,
|
||||
`foo.bar.`,
|
||||
`foo.bar.baz`,
|
||||
`foo1.bar2`,
|
||||
`foo1.bar2.baz`,
|
||||
`1foo.2bar.`,
|
||||
`1foo.2bar.baz`,
|
||||
`foo-1.bar-2`,
|
||||
`foo-1.bar-2.baz`,
|
||||
`foo-1.bar-2.`,
|
||||
`foo-1.bar-2.baz`,
|
||||
`1-foo.2-bar`,
|
||||
`1-foo.2-bar.baz`,
|
||||
`1-foo.2-bar.`,
|
||||
`1-foo.2-bar.baz`,
|
||||
}
|
||||
|
||||
invalid := []string{
|
||||
``,
|
||||
` `,
|
||||
` `,
|
||||
`17`,
|
||||
`17.`,
|
||||
`.17`,
|
||||
`17-.`,
|
||||
`17-.foo`,
|
||||
`.foo`,
|
||||
`foo-.bar`,
|
||||
`-foo.bar`,
|
||||
`foo.bar-`,
|
||||
`foo.bar-.baz`,
|
||||
`foo.-bar`,
|
||||
`foo.-bar.baz`,
|
||||
`foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`,
|
||||
}
|
||||
|
||||
for _, domain := range valid {
|
||||
if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" {
|
||||
t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, domain := range invalid {
|
||||
if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" {
|
||||
t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateExtraHosts(t *testing.T) {
|
||||
valid := []string{
|
||||
`myhost:192.168.0.1`,
|
||||
`thathost:10.0.2.1`,
|
||||
`anipv6host:2003:ab34:e::1`,
|
||||
`ipv6local:::1`,
|
||||
}
|
||||
|
||||
invalid := map[string]string{
|
||||
`myhost:192.notanipaddress.1`: `invalid IP`,
|
||||
`thathost-nosemicolon10.0.0.1`: `bad format`,
|
||||
`anipv6host:::::1`: `invalid IP`,
|
||||
`ipv6local:::0::`: `invalid IP`,
|
||||
}
|
||||
|
||||
for _, extrahost := range valid {
|
||||
if _, err := ValidateExtraHost(extrahost); err != nil {
|
||||
t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for extraHost, expectedError := range invalid {
|
||||
if _, err := ValidateExtraHost(extraHost); err == nil {
|
||||
t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost)
|
||||
} else {
|
||||
if !strings.Contains(err.Error(), expectedError) {
|
||||
t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAttach(t *testing.T) {
|
||||
valid := []string{
|
||||
"stdin",
|
||||
"stdout",
|
||||
"stderr",
|
||||
"STDIN",
|
||||
"STDOUT",
|
||||
"STDERR",
|
||||
}
|
||||
if _, err := ValidateAttach("invalid"); err == nil {
|
||||
t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing")
|
||||
}
|
||||
|
||||
for _, attach := range valid {
|
||||
value, err := ValidateAttach(attach)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if value != strings.ToLower(attach) {
|
||||
t.Fatalf("Expected [%v], got [%v]", attach, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateLink(t *testing.T) {
|
||||
valid := []string{
|
||||
"name",
|
||||
"dcdfbe62ecd0:alias",
|
||||
"7a67485460b7642516a4ad82ecefe7f57d0c4916f530561b71a50a3f9c4e33da",
|
||||
"angry_torvalds:linus",
|
||||
}
|
||||
invalid := map[string]string{
|
||||
"": "empty string specified for links",
|
||||
"too:much:of:it": "bad format for links: too:much:of:it",
|
||||
}
|
||||
|
||||
for _, link := range valid {
|
||||
if _, err := ValidateLink(link); err != nil {
|
||||
t.Fatalf("ValidateLink(`%q`) should succeed: error %q", link, err)
|
||||
}
|
||||
}
|
||||
|
||||
for link, expectedError := range invalid {
|
||||
if _, err := ValidateLink(link); err == nil {
|
||||
t.Fatalf("ValidateLink(`%q`) should have failed validation", link)
|
||||
} else {
|
||||
if !strings.Contains(err.Error(), expectedError) {
|
||||
t.Fatalf("ValidateLink(`%q`) error should contain %q", link, expectedError)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePath(t *testing.T) {
|
||||
valid := []string{
|
||||
"/home",
|
||||
"/home:/home",
|
||||
"/home:/something/else",
|
||||
"/with space",
|
||||
"/home:/with space",
|
||||
"relative:/absolute-path",
|
||||
"hostPath:/containerPath:ro",
|
||||
"/hostPath:/containerPath:rw",
|
||||
"/rw:/ro",
|
||||
"/path:rw",
|
||||
"/path:ro",
|
||||
"/rw:rw",
|
||||
}
|
||||
invalid := map[string]string{
|
||||
"": "bad format for volumes: ",
|
||||
"./": "./ is not an absolute path",
|
||||
"../": "../ is not an absolute path",
|
||||
"/:../": "../ is not an absolute path",
|
||||
"/:path": "path is not an absolute path",
|
||||
":": "bad format for volumes: :",
|
||||
"/tmp:": " is not an absolute path",
|
||||
":test": "bad format for volumes: :test",
|
||||
":/test": "bad format for volumes: :/test",
|
||||
"tmp:": " is not an absolute path",
|
||||
":test:": "bad format for volumes: :test:",
|
||||
"::": "bad format for volumes: ::",
|
||||
":::": "bad format for volumes: :::",
|
||||
"/tmp:::": "bad format for volumes: /tmp:::",
|
||||
":/tmp::": "bad format for volumes: :/tmp::",
|
||||
"path:ro": "path is not an absolute path",
|
||||
"/path:/path:sw": "bad mount mode specified : sw",
|
||||
"/path:/path:rwz": "bad mount mode specified : rwz",
|
||||
}
|
||||
|
||||
for _, path := range valid {
|
||||
if _, err := ValidatePath(path); err != nil {
|
||||
t.Fatalf("ValidatePath(`%q`) should succeed: error %q", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
for path, expectedError := range invalid {
|
||||
if _, err := ValidatePath(path); err == nil {
|
||||
t.Fatalf("ValidatePath(`%q`) should have failed validation", path)
|
||||
} else {
|
||||
if err.Error() != expectedError {
|
||||
t.Fatalf("ValidatePath(`%q`) error should contain %q, got %q", path, expectedError, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestValidateDevice(t *testing.T) {
|
||||
valid := []string{
|
||||
"/home",
|
||||
"/home:/home",
|
||||
"/home:/something/else",
|
||||
"/with space",
|
||||
"/home:/with space",
|
||||
"relative:/absolute-path",
|
||||
"hostPath:/containerPath:ro",
|
||||
"/hostPath:/containerPath:rw",
|
||||
"/hostPath:/containerPath:mrw",
|
||||
}
|
||||
invalid := map[string]string{
|
||||
"": "bad format for volumes: ",
|
||||
"./": "./ is not an absolute path",
|
||||
"../": "../ is not an absolute path",
|
||||
"/:../": "../ is not an absolute path",
|
||||
"/:path": "path is not an absolute path",
|
||||
":": "bad format for volumes: :",
|
||||
"/tmp:": " is not an absolute path",
|
||||
":test": "bad format for volumes: :test",
|
||||
":/test": "bad format for volumes: :/test",
|
||||
"tmp:": " is not an absolute path",
|
||||
":test:": "bad format for volumes: :test:",
|
||||
"::": "bad format for volumes: ::",
|
||||
":::": "bad format for volumes: :::",
|
||||
"/tmp:::": "bad format for volumes: /tmp:::",
|
||||
":/tmp::": "bad format for volumes: :/tmp::",
|
||||
"path:ro": "ro is not an absolute path",
|
||||
}
|
||||
|
||||
for _, path := range valid {
|
||||
if _, err := ValidateDevice(path); err != nil {
|
||||
t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
for path, expectedError := range invalid {
|
||||
if _, err := ValidateDevice(path); err == nil {
|
||||
t.Fatalf("ValidateDevice(`%q`) should have failed validation", path)
|
||||
} else {
|
||||
if err.Error() != expectedError {
|
||||
t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateEnv(t *testing.T) {
|
||||
invalids := map[string]string{
|
||||
"some spaces": "poorly formatted environment: variable 'some spaces' is not a valid environment variable",
|
||||
"asd!qwe": "poorly formatted environment: variable 'asd!qwe' is not a valid environment variable",
|
||||
"1asd": "poorly formatted environment: variable '1asd' is not a valid environment variable",
|
||||
"123": "poorly formatted environment: variable '123' is not a valid environment variable",
|
||||
}
|
||||
valids := map[string]string{
|
||||
"a": "a",
|
||||
"something": "something",
|
||||
"_=a": "_=a",
|
||||
"env1=value1": "env1=value1",
|
||||
"_env1=value1": "_env1=value1",
|
||||
"env2=value2=value3": "env2=value2=value3",
|
||||
"env3=abc!qwe": "env3=abc!qwe",
|
||||
"env_4=value 4": "env_4=value 4",
|
||||
"PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")),
|
||||
"PATH=something": "PATH=something",
|
||||
}
|
||||
for value, expectedError := range invalids {
|
||||
_, err := ValidateEnv(value)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected ErrBadEnvVariable, got nothing")
|
||||
}
|
||||
if _, ok := err.(ErrBadEnvVariable); !ok {
|
||||
t.Fatalf("Expected ErrBadEnvVariable, got [%s]", err)
|
||||
}
|
||||
if err.Error() != expectedError {
|
||||
t.Fatalf("Expected ErrBadEnvVariable with message [%s], got [%s]", expectedError, err.Error())
|
||||
}
|
||||
}
|
||||
for value, expected := range valids {
|
||||
actual, err := ValidateEnv(value)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if actual != expected {
|
||||
t.Fatalf("Expected [%v], got [%v]", expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateLabel(t *testing.T) {
|
||||
if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" {
|
||||
t.Fatalf("Expected an error [bad attribute format: label], go %v", err)
|
||||
}
|
||||
if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" {
|
||||
t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err)
|
||||
}
|
||||
// Validate it's working with more than one =
|
||||
if actual, err := ValidateLabel("key1=value1=value2"); err != nil {
|
||||
t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err)
|
||||
}
|
||||
// Validate it's working with one more
|
||||
if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil {
|
||||
t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateHost(t *testing.T) {
|
||||
invalid := map[string]string{
|
||||
"anything": "Invalid bind address format: anything",
|
||||
"something with spaces": "Invalid bind address format: something with spaces",
|
||||
"://": "Invalid bind address format: ://",
|
||||
"unknown://": "Invalid bind address format: unknown://",
|
||||
"tcp://": "Invalid proto, expected tcp: ",
|
||||
"tcp://:port": "Invalid bind address format: :port",
|
||||
"tcp://invalid": "Invalid bind address format: invalid",
|
||||
"tcp://invalid:port": "Invalid bind address format: invalid:port",
|
||||
}
|
||||
valid := map[string]string{
|
||||
"fd://": "fd://",
|
||||
"fd://something": "fd://something",
|
||||
"tcp://:2375": "tcp://127.0.0.1:2375", // default ip address
|
||||
"tcp://:2376": "tcp://127.0.0.1:2376", // default ip address
|
||||
"tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080",
|
||||
"tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000",
|
||||
"tcp://192.168:8080": "tcp://192.168:8080",
|
||||
"tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P
|
||||
"tcp://docker.com:2375": "tcp://docker.com:2375",
|
||||
"unix://": "unix:///var/run/docker.sock", // default unix:// value
|
||||
"unix://path/to/socket": "unix://path/to/socket",
|
||||
}
|
||||
|
||||
for value, errorMessage := range invalid {
|
||||
if _, err := ValidateHost(value); err == nil || err.Error() != errorMessage {
|
||||
t.Fatalf("Expected an error for %v with [%v], got [%v]", value, errorMessage, err)
|
||||
}
|
||||
}
|
||||
for value, expected := range valid {
|
||||
if actual, err := ValidateHost(value); err != nil || actual != expected {
|
||||
t.Fatalf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func logOptsValidator(val string) (string, error) {
|
||||
allowedKeys := map[string]string{"max-size": "1", "max-file": "2"}
|
||||
vals := strings.Split(val, "=")
|
||||
if allowedKeys[vals[0]] != "" {
|
||||
return val, nil
|
||||
}
|
||||
return "", fmt.Errorf("invalid key %s", vals[0])
|
||||
}
|
47
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go
generated
vendored
Normal file
47
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit"
|
||||
)
|
||||
|
||||
type UlimitOpt struct {
|
||||
values *map[string]*ulimit.Ulimit
|
||||
}
|
||||
|
||||
func NewUlimitOpt(ref *map[string]*ulimit.Ulimit) *UlimitOpt {
|
||||
if ref == nil {
|
||||
ref = &map[string]*ulimit.Ulimit{}
|
||||
}
|
||||
return &UlimitOpt{ref}
|
||||
}
|
||||
|
||||
func (o *UlimitOpt) Set(val string) error {
|
||||
l, err := ulimit.Parse(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
(*o.values)[l.Name] = l
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *UlimitOpt) String() string {
|
||||
var out []string
|
||||
for _, v := range *o.values {
|
||||
out = append(out, v.String())
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v", out)
|
||||
}
|
||||
|
||||
func (o *UlimitOpt) GetList() []*ulimit.Ulimit {
|
||||
var ulimits []*ulimit.Ulimit
|
||||
for _, v := range *o.values {
|
||||
ulimits = append(ulimits, v)
|
||||
}
|
||||
|
||||
return ulimits
|
||||
}
|
42
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit_test.go
generated
vendored
Normal file
42
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit_test.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package opts
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit"
|
||||
)
|
||||
|
||||
func TestUlimitOpt(t *testing.T) {
|
||||
ulimitMap := map[string]*ulimit.Ulimit{
|
||||
"nofile": {"nofile", 1024, 512},
|
||||
}
|
||||
|
||||
ulimitOpt := NewUlimitOpt(&ulimitMap)
|
||||
|
||||
expected := "[nofile=512:1024]"
|
||||
if ulimitOpt.String() != expected {
|
||||
t.Fatalf("Expected %v, got %v", expected, ulimitOpt)
|
||||
}
|
||||
|
||||
// Valid ulimit append to opts
|
||||
if err := ulimitOpt.Set("core=1024:1024"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Invalid ulimit type returns an error and do not append to opts
|
||||
if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil {
|
||||
t.Fatalf("Expected error on invalid ulimit type")
|
||||
}
|
||||
expected = "[nofile=512:1024 core=1024:1024]"
|
||||
expected2 := "[core=1024:1024 nofile=512:1024]"
|
||||
result := ulimitOpt.String()
|
||||
if result != expected && result != expected2 {
|
||||
t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt)
|
||||
}
|
||||
|
||||
// And test GetList
|
||||
ulimits := ulimitOpt.GetList()
|
||||
if len(ulimits) != 2 {
|
||||
t.Fatalf("Expected a ulimit list of 2, got %v", ulimits)
|
||||
}
|
||||
}
|
@@ -12,28 +12,36 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/Sirupsen/logrus"
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/pools"
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/promise"
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/system"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
type (
|
||||
Archive io.ReadCloser
|
||||
ArchiveReader io.Reader
|
||||
Compression int
|
||||
TarOptions struct {
|
||||
IncludeFiles []string
|
||||
ExcludePatterns []string
|
||||
Compression Compression
|
||||
NoLchown bool
|
||||
Name string
|
||||
Archive io.ReadCloser
|
||||
ArchiveReader io.Reader
|
||||
Compression int
|
||||
TarChownOptions struct {
|
||||
UID, GID int
|
||||
}
|
||||
TarOptions struct {
|
||||
IncludeFiles []string
|
||||
ExcludePatterns []string
|
||||
Compression Compression
|
||||
NoLchown bool
|
||||
ChownOpts *TarChownOptions
|
||||
Name string
|
||||
IncludeSourceDir bool
|
||||
// When unpacking, specifies whether overwriting a directory with a
|
||||
// non-directory is allowed and vice versa.
|
||||
NoOverwriteDirNonDir bool
|
||||
}
|
||||
|
||||
// Archiver allows the reuse of most utility functions of this package
|
||||
@@ -262,7 +270,7 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error {
|
||||
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error {
|
||||
// hdr.Mode is in linux format, which we can use for sycalls,
|
||||
// but for os.Foo() calls we need the mode converted to os.FileMode,
|
||||
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
|
||||
@@ -291,17 +299,8 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||
file.Close()
|
||||
|
||||
case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
|
||||
mode := uint32(hdr.Mode & 07777)
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeBlock:
|
||||
mode |= syscall.S_IFBLK
|
||||
case tar.TypeChar:
|
||||
mode |= syscall.S_IFCHR
|
||||
case tar.TypeFifo:
|
||||
mode |= syscall.S_IFIFO
|
||||
}
|
||||
|
||||
if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
|
||||
// Handle this is an OS-specific way
|
||||
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -337,8 +336,14 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||
return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
|
||||
}
|
||||
|
||||
if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown {
|
||||
return err
|
||||
// Lchown is not supported on Windows.
|
||||
if Lchown && runtime.GOOS != "windows" {
|
||||
if chownOpts == nil {
|
||||
chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid}
|
||||
}
|
||||
if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for key, value := range hdr.Xattrs {
|
||||
@@ -349,20 +354,12 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||
|
||||
// There is no LChmod, so ignore mode for symlink. Also, this
|
||||
// must happen after chown, as that can modify the file mode
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := handleLChmod(hdr, path, hdrInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
|
||||
// syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
|
||||
// syscall.UtimesNano doesn't support a NOFOLLOW flag atm
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||
if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
|
||||
@@ -410,6 +407,20 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
Buffer: pools.BufioWriter32KPool.Get(nil),
|
||||
SeenFiles: make(map[uint64]string),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Make sure to check the error on Close.
|
||||
if err := ta.TarWriter.Close(); err != nil {
|
||||
logrus.Debugf("Can't close tar writer: %s", err)
|
||||
}
|
||||
if err := compressWriter.Close(); err != nil {
|
||||
logrus.Debugf("Can't close compress writer: %s", err)
|
||||
}
|
||||
if err := pipeWriter.Close(); err != nil {
|
||||
logrus.Debugf("Can't close pipe writer: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// this buffer is needed for the duration of this piped stream
|
||||
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||
|
||||
@@ -418,7 +429,26 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
// mutating the filesystem and we can see transient errors
|
||||
// from this
|
||||
|
||||
if options.IncludeFiles == nil {
|
||||
stat, err := os.Lstat(srcPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !stat.IsDir() {
|
||||
// We can't later join a non-dir with any includes because the
|
||||
// 'walk' will error if "file/." is stat-ed and "file" is not a
|
||||
// directory. So, we must split the source path and use the
|
||||
// basename as the include.
|
||||
if len(options.IncludeFiles) > 0 {
|
||||
logrus.Warn("Tar: Can't archive a file with includes")
|
||||
}
|
||||
|
||||
dir, base := SplitPathDirEntry(srcPath)
|
||||
srcPath = dir
|
||||
options.IncludeFiles = []string{base}
|
||||
}
|
||||
|
||||
if len(options.IncludeFiles) == 0 {
|
||||
options.IncludeFiles = []string{"."}
|
||||
}
|
||||
|
||||
@@ -426,19 +456,26 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
|
||||
var renamedRelFilePath string // For when tar.Options.Name is set
|
||||
for _, include := range options.IncludeFiles {
|
||||
filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
|
||||
// We can't use filepath.Join(srcPath, include) because this will
|
||||
// clean away a trailing "." or "/" which may be important.
|
||||
walkRoot := strings.Join([]string{srcPath, include}, string(filepath.Separator))
|
||||
filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
relFilePath, err := filepath.Rel(srcPath, filePath)
|
||||
if err != nil || (relFilePath == "." && f.IsDir()) {
|
||||
if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
|
||||
// Error getting relative path OR we are looking
|
||||
// at the root path. Skip in both situations.
|
||||
// at the source directory path. Skip in both situations.
|
||||
return nil
|
||||
}
|
||||
|
||||
if options.IncludeSourceDir && include == "." && relFilePath != "." {
|
||||
relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
|
||||
}
|
||||
|
||||
skip := false
|
||||
|
||||
// If "include" is an exact match for the current file
|
||||
@@ -449,7 +486,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
if include != relFilePath {
|
||||
skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error matching %s", relFilePath, err)
|
||||
logrus.Debugf("Error matching %s: %v", relFilePath, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -466,6 +503,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
}
|
||||
seen[relFilePath] = true
|
||||
|
||||
// TODO Windows: Verify if this needs to be os.Pathseparator
|
||||
// Rename the base resource
|
||||
if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) {
|
||||
renamedRelFilePath = relFilePath
|
||||
@@ -481,17 +519,6 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Make sure to check the error on Close.
|
||||
if err := ta.TarWriter.Close(); err != nil {
|
||||
logrus.Debugf("Can't close tar writer: %s", err)
|
||||
}
|
||||
if err := compressWriter.Close(); err != nil {
|
||||
logrus.Debugf("Can't close compress writer: %s", err)
|
||||
}
|
||||
if err := pipeWriter.Close(); err != nil {
|
||||
logrus.Debugf("Can't close pipe writer: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return pipeReader, nil
|
||||
@@ -517,7 +544,8 @@ loop:
|
||||
}
|
||||
|
||||
// Normalize name, for safety and for a simple is-root check
|
||||
// This keeps "../" as-is, but normalizes "/../" to "/"
|
||||
// This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
|
||||
// This keeps "..\" as-is, but normalizes "\..\" to "\".
|
||||
hdr.Name = filepath.Clean(hdr.Name)
|
||||
|
||||
for _, exclude := range options.ExcludePatterns {
|
||||
@@ -526,12 +554,15 @@ loop:
|
||||
}
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(hdr.Name, "/") {
|
||||
// After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
|
||||
// the filepath format for the OS on which the daemon is running. Hence
|
||||
// the check for a slash-suffix MUST be done in an OS-agnostic way.
|
||||
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||
// Not the root directory, ensure that the parent directory exists
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = os.MkdirAll(parentPath, 0777)
|
||||
err = system.MkdirAll(parentPath, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -543,7 +574,7 @@ loop:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.HasPrefix(rel, "../") {
|
||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
|
||||
}
|
||||
|
||||
@@ -552,9 +583,22 @@ loop:
|
||||
// the layer is also a directory. Then we want to merge them (i.e.
|
||||
// just apply the metadata from the layer).
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
|
||||
// If NoOverwriteDirNonDir is true then we cannot replace
|
||||
// an existing directory with a non-directory from the archive.
|
||||
return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
|
||||
}
|
||||
|
||||
if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
|
||||
// If NoOverwriteDirNonDir is true then we cannot replace
|
||||
// an existing non-directory with a directory from the archive.
|
||||
return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
|
||||
}
|
||||
|
||||
if fi.IsDir() && hdr.Name == "." {
|
||||
continue
|
||||
}
|
||||
|
||||
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return err
|
||||
@@ -562,7 +606,8 @@ loop:
|
||||
}
|
||||
}
|
||||
trBuf.Reset(tr)
|
||||
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil {
|
||||
|
||||
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -588,8 +633,20 @@ loop:
|
||||
// The archive may be compressed with one of the following algorithms:
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
// FIXME: specify behavior when target path exists vs. doesn't exist.
|
||||
func Untar(archive io.Reader, dest string, options *TarOptions) error {
|
||||
if archive == nil {
|
||||
func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
|
||||
return untarHandler(tarArchive, dest, options, true)
|
||||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive must be an uncompressed stream.
|
||||
func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
|
||||
return untarHandler(tarArchive, dest, options, false)
|
||||
}
|
||||
|
||||
// Handler for teasing out the automatic decompression
|
||||
func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
|
||||
if tarArchive == nil {
|
||||
return fmt.Errorf("Empty archive")
|
||||
}
|
||||
dest = filepath.Clean(dest)
|
||||
@@ -599,12 +656,18 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
decompressedArchive, err := DecompressStream(archive)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
var r io.Reader = tarArchive
|
||||
if decompress {
|
||||
decompressedArchive, err := DecompressStream(tarArchive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer decompressedArchive.Close()
|
||||
r = decompressedArchive
|
||||
}
|
||||
defer decompressedArchive.Close()
|
||||
return Unpack(decompressedArchive, dest, options)
|
||||
|
||||
return Unpack(r, dest, options)
|
||||
}
|
||||
|
||||
func (archiver *Archiver) TarUntar(src, dst string) error {
|
||||
@@ -651,7 +714,7 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error {
|
||||
}
|
||||
// Create dst, copy src's content into it
|
||||
logrus.Debugf("Creating dest directory: %s", dst)
|
||||
if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
|
||||
if err := system.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
|
||||
@@ -672,15 +735,18 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if srcSt.IsDir() {
|
||||
return fmt.Errorf("Can't copy a directory")
|
||||
}
|
||||
// Clean up the trailing /
|
||||
if dst[len(dst)-1] == '/' {
|
||||
dst = path.Join(dst, filepath.Base(src))
|
||||
|
||||
// Clean up the trailing slash. This must be done in an operating
|
||||
// system specific manner.
|
||||
if dst[len(dst)-1] == os.PathSeparator {
|
||||
dst = filepath.Join(dst, filepath.Base(src))
|
||||
}
|
||||
// Create the holding directory if necessary
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
|
||||
if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -723,8 +789,10 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
||||
// for a single file. It copies a regular file from path `src` to
|
||||
// path `dst`, and preserves all its metadata.
|
||||
//
|
||||
// If `dst` ends with a trailing slash '/', the final destination path
|
||||
// will be `dst/base(src)`.
|
||||
// Destination handling is in an operating specific manner depending
|
||||
// where the daemon is running. If `dst` ends with a trailing slash
|
||||
// the final destination path will be `dst/base(src)` (Linux) or
|
||||
// `dst\base(src)` (Windows).
|
||||
func CopyFileWithTar(src, dst string) (err error) {
|
||||
return defaultArchiver.CopyFileWithTar(src, dst)
|
||||
}
|
@@ -15,7 +15,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/system"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func TestIsArchiveNilHeader(t *testing.T) {
|
||||
@@ -719,7 +719,7 @@ func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true)
|
||||
err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -873,7 +873,8 @@ func getNlink(path string) (uint64, error) {
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys())
|
||||
}
|
||||
return statT.Nlink, nil
|
||||
// We need this conversion on ARM64
|
||||
return uint64(statT.Nlink), nil
|
||||
}
|
||||
|
||||
func getInode(path string) (uint64, error) {
|
@@ -7,9 +7,11 @@ import (
|
||||
"errors"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
// canonicalTarNameForPath returns platform-specific filepath
|
||||
// CanonicalTarNameForPath returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
func CanonicalTarNameForPath(p string) (string, error) {
|
||||
@@ -51,3 +53,37 @@ func major(device uint64) uint64 {
|
||||
func minor(device uint64) uint64 {
|
||||
return (device & 0xff) | ((device >> 12) & 0xfff00)
|
||||
}
|
||||
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
mode := uint32(hdr.Mode & 07777)
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeBlock:
|
||||
mode |= syscall.S_IFBLK
|
||||
case tar.TypeChar:
|
||||
mode |= syscall.S_IFCHR
|
||||
case tar.TypeFifo:
|
||||
mode |= syscall.S_IFIFO
|
||||
}
|
||||
|
||||
if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
@@ -14,11 +14,11 @@ import (
|
||||
// path.
|
||||
func CanonicalTarNameForPath(p string) (string, error) {
|
||||
// windows: convert windows style relative path with backslashes
|
||||
// into forward slashes. since windows does not allow '/' or '\'
|
||||
// into forward slashes. Since windows does not allow '/' or '\'
|
||||
// in file names, it is mostly safe to replace however we must
|
||||
// check just in case
|
||||
if strings.Contains(p, "/") {
|
||||
return "", fmt.Errorf("windows path contains forward slash: %s", p)
|
||||
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
|
||||
}
|
||||
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
|
||||
|
||||
@@ -38,3 +38,13 @@ func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, st
|
||||
// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
|
||||
return
|
||||
}
|
||||
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||
return nil
|
||||
}
|
@@ -5,6 +5,7 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@@ -12,9 +13,9 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/Sirupsen/logrus"
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/pools"
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/system"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
type ChangeType int
|
||||
@@ -68,7 +69,11 @@ func sameFsTimeSpec(a, b syscall.Timespec) bool {
|
||||
// Changes walks the path rw and determines changes for the files in the path,
|
||||
// with respect to the parent layers
|
||||
func Changes(layers []string, rw string) ([]Change, error) {
|
||||
var changes []Change
|
||||
var (
|
||||
changes []Change
|
||||
changedDirs = make(map[string]struct{})
|
||||
)
|
||||
|
||||
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -79,15 +84,17 @@ func Changes(layers []string, rw string) ([]Change, error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path = filepath.Join("/", path)
|
||||
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
path = filepath.Join(string(os.PathSeparator), path)
|
||||
|
||||
// Skip root
|
||||
if path == "/" {
|
||||
if path == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip AUFS metadata
|
||||
if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched {
|
||||
if matched, err := filepath.Match(string(os.PathSeparator)+".wh..wh.*", path); err != nil || matched {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -129,6 +136,21 @@ func Changes(layers []string, rw string) ([]Change, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
|
||||
// This block is here to ensure the change is recorded even if the
|
||||
// modify time, mode and size of the parent directoriy in the rw and ro layers are all equal.
|
||||
// Check https://github.com/docker/docker/pull/13590 for details.
|
||||
if f.IsDir() {
|
||||
changedDirs[path] = struct{}{}
|
||||
}
|
||||
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
|
||||
parent := filepath.Dir(path)
|
||||
if _, ok := changedDirs[parent]; !ok && parent != "/" {
|
||||
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
|
||||
changedDirs[parent] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Record change
|
||||
changes = append(changes, change)
|
||||
return nil
|
||||
@@ -149,12 +171,13 @@ type FileInfo struct {
|
||||
}
|
||||
|
||||
func (root *FileInfo) LookUp(path string) *FileInfo {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
parent := root
|
||||
if path == "/" {
|
||||
if path == string(os.PathSeparator) {
|
||||
return root
|
||||
}
|
||||
|
||||
pathElements := strings.Split(path, "/")
|
||||
pathElements := strings.Split(path, string(os.PathSeparator))
|
||||
for _, elem := range pathElements {
|
||||
if elem != "" {
|
||||
child := parent.children[elem]
|
||||
@@ -169,15 +192,12 @@ func (root *FileInfo) LookUp(path string) *FileInfo {
|
||||
|
||||
func (info *FileInfo) path() string {
|
||||
if info.parent == nil {
|
||||
return "/"
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
return string(os.PathSeparator)
|
||||
}
|
||||
return filepath.Join(info.parent.path(), info.name)
|
||||
}
|
||||
|
||||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
|
||||
}
|
||||
|
||||
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
|
||||
sizeAtEntry := len(*changes)
|
||||
@@ -214,13 +234,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
// be visible when actually comparing the stat fields. The only time this
|
||||
// breaks down is if some code intentionally hides a change by setting
|
||||
// back mtime
|
||||
if oldStat.Mode() != newStat.Mode() ||
|
||||
oldStat.Uid() != newStat.Uid() ||
|
||||
oldStat.Gid() != newStat.Gid() ||
|
||||
oldStat.Rdev() != newStat.Rdev() ||
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
|
||||
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) ||
|
||||
if statDifferent(oldStat, newStat) ||
|
||||
bytes.Compare(oldChild.capability, newChild.capability) != 0 {
|
||||
change := Change{
|
||||
Path: newChild.path(),
|
||||
@@ -247,7 +261,8 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
|
||||
// If there were changes inside this directory, we need to add it, even if the directory
|
||||
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
|
||||
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != "/" {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
|
||||
change := Change{
|
||||
Path: info.path(),
|
||||
Kind: ChangeModify,
|
||||
@@ -269,85 +284,31 @@ func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
|
||||
}
|
||||
|
||||
func newRootFileInfo() *FileInfo {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
root := &FileInfo{
|
||||
name: "/",
|
||||
name: string(os.PathSeparator),
|
||||
children: make(map[string]*FileInfo),
|
||||
}
|
||||
return root
|
||||
}
|
||||
|
||||
func collectFileInfo(sourceDir string) (*FileInfo, error) {
|
||||
root := newRootFileInfo()
|
||||
|
||||
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
relPath, err := filepath.Rel(sourceDir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relPath = filepath.Join("/", relPath)
|
||||
|
||||
if relPath == "/" {
|
||||
return nil
|
||||
}
|
||||
|
||||
parent := root.LookUp(filepath.Dir(relPath))
|
||||
if parent == nil {
|
||||
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
|
||||
}
|
||||
|
||||
info := &FileInfo{
|
||||
name: filepath.Base(relPath),
|
||||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
}
|
||||
|
||||
s, err := system.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.stat = s
|
||||
|
||||
info.capability, _ = system.Lgetxattr(path, "security.capability")
|
||||
|
||||
parent.children[info.name] = info
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
|
||||
// If oldDir is "", then all files in newDir will be Add-Changes.
|
||||
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
|
||||
var (
|
||||
oldRoot, newRoot *FileInfo
|
||||
err1, err2 error
|
||||
errs = make(chan error, 2)
|
||||
)
|
||||
go func() {
|
||||
if oldDir != "" {
|
||||
oldRoot, err1 = collectFileInfo(oldDir)
|
||||
}
|
||||
errs <- err1
|
||||
}()
|
||||
go func() {
|
||||
newRoot, err2 = collectFileInfo(newDir)
|
||||
errs <- err2
|
||||
}()
|
||||
|
||||
// block until both routines have returned
|
||||
for i := 0; i < 2; i++ {
|
||||
if err := <-errs; err != nil {
|
||||
if oldDir == "" {
|
||||
emptyDir, err := ioutil.TempDir("", "empty")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.Remove(emptyDir)
|
||||
oldDir = emptyDir
|
||||
}
|
||||
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newRoot.Changes(oldRoot), nil
|
285
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go
generated
vendored
Normal file
285
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go
generated
vendored
Normal file
@@ -0,0 +1,285 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
// walker is used to implement collectFileInfoForChanges on linux. Where this
|
||||
// method in general returns the entire contents of two directory trees, we
|
||||
// optimize some FS calls out on linux. In particular, we take advantage of the
|
||||
// fact that getdents(2) returns the inode of each file in the directory being
|
||||
// walked, which, when walking two trees in parallel to generate a list of
|
||||
// changes, can be used to prune subtrees without ever having to lstat(2) them
|
||||
// directly. Eliminating stat calls in this way can save up to seconds on large
|
||||
// images.
|
||||
type walker struct {
|
||||
dir1 string
|
||||
dir2 string
|
||||
root1 *FileInfo
|
||||
root2 *FileInfo
|
||||
}
|
||||
|
||||
// collectFileInfoForChanges returns a complete representation of the trees
|
||||
// rooted at dir1 and dir2, with one important exception: any subtree or
|
||||
// leaf where the inode and device numbers are an exact match between dir1
|
||||
// and dir2 will be pruned from the results. This method is *only* to be used
|
||||
// to generating a list of changes between the two directories, as it does not
|
||||
// reflect the full contents.
|
||||
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
|
||||
w := &walker{
|
||||
dir1: dir1,
|
||||
dir2: dir2,
|
||||
root1: newRootFileInfo(),
|
||||
root2: newRootFileInfo(),
|
||||
}
|
||||
|
||||
i1, err := os.Lstat(w.dir1)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
i2, err := os.Lstat(w.dir2)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err := w.walk("/", i1, i2); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return w.root1, w.root2, nil
|
||||
}
|
||||
|
||||
// Given a FileInfo, its path info, and a reference to the root of the tree
|
||||
// being constructed, register this file with the tree.
|
||||
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
||||
if fi == nil {
|
||||
return nil
|
||||
}
|
||||
parent := root.LookUp(filepath.Dir(path))
|
||||
if parent == nil {
|
||||
return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path)
|
||||
}
|
||||
info := &FileInfo{
|
||||
name: filepath.Base(path),
|
||||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
}
|
||||
cpath := filepath.Join(dir, path)
|
||||
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.stat = stat
|
||||
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
|
||||
parent.children[info.name] = info
|
||||
return nil
|
||||
}
|
||||
|
||||
// Walk a subtree rooted at the same path in both trees being iterated. For
|
||||
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
|
||||
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
|
||||
// Register these nodes with the return trees, unless we're still at the
|
||||
// (already-created) roots:
|
||||
if path != "/" {
|
||||
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
is1Dir := i1 != nil && i1.IsDir()
|
||||
is2Dir := i2 != nil && i2.IsDir()
|
||||
|
||||
sameDevice := false
|
||||
if i1 != nil && i2 != nil {
|
||||
si1 := i1.Sys().(*syscall.Stat_t)
|
||||
si2 := i2.Sys().(*syscall.Stat_t)
|
||||
if si1.Dev == si2.Dev {
|
||||
sameDevice = true
|
||||
}
|
||||
}
|
||||
|
||||
// If these files are both non-existent, or leaves (non-dirs), we are done.
|
||||
if !is1Dir && !is2Dir {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetch the names of all the files contained in both directories being walked:
|
||||
var names1, names2 []nameIno
|
||||
if is1Dir {
|
||||
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if is2Dir {
|
||||
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We have lists of the files contained in both parallel directories, sorted
|
||||
// in the same order. Walk them in parallel, generating a unique merged list
|
||||
// of all items present in either or both directories.
|
||||
var names []string
|
||||
ix1 := 0
|
||||
ix2 := 0
|
||||
|
||||
for {
|
||||
if ix1 >= len(names1) {
|
||||
break
|
||||
}
|
||||
if ix2 >= len(names2) {
|
||||
break
|
||||
}
|
||||
|
||||
ni1 := names1[ix1]
|
||||
ni2 := names2[ix2]
|
||||
|
||||
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
|
||||
case -1: // ni1 < ni2 -- advance ni1
|
||||
// we will not encounter ni1 in names2
|
||||
names = append(names, ni1.name)
|
||||
ix1++
|
||||
case 0: // ni1 == ni2
|
||||
if ni1.ino != ni2.ino || !sameDevice {
|
||||
names = append(names, ni1.name)
|
||||
}
|
||||
ix1++
|
||||
ix2++
|
||||
case 1: // ni1 > ni2 -- advance ni2
|
||||
// we will not encounter ni2 in names1
|
||||
names = append(names, ni2.name)
|
||||
ix2++
|
||||
}
|
||||
}
|
||||
for ix1 < len(names1) {
|
||||
names = append(names, names1[ix1].name)
|
||||
ix1++
|
||||
}
|
||||
for ix2 < len(names2) {
|
||||
names = append(names, names2[ix2].name)
|
||||
ix2++
|
||||
}
|
||||
|
||||
// For each of the names present in either or both of the directories being
|
||||
// iterated, stat the name under each root, and recurse the pair of them:
|
||||
for _, name := range names {
|
||||
fname := filepath.Join(path, name)
|
||||
var cInfo1, cInfo2 os.FileInfo
|
||||
if is1Dir {
|
||||
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if is2Dir {
|
||||
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// {name,inode} pairs used to support the early-pruning logic of the walker type
|
||||
type nameIno struct {
|
||||
name string
|
||||
ino uint64
|
||||
}
|
||||
|
||||
type nameInoSlice []nameIno
|
||||
|
||||
func (s nameInoSlice) Len() int { return len(s) }
|
||||
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
|
||||
|
||||
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
|
||||
// numbers further up the stack when reading directory contents. Unlike
|
||||
// os.Readdirnames, which returns a list of filenames, this function returns a
|
||||
// list of {filename,inode} pairs.
|
||||
func readdirnames(dirname string) (names []nameIno, err error) {
|
||||
var (
|
||||
size = 100
|
||||
buf = make([]byte, 4096)
|
||||
nbuf int
|
||||
bufp int
|
||||
nb int
|
||||
)
|
||||
|
||||
f, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
names = make([]nameIno, 0, size) // Empty with room to grow.
|
||||
for {
|
||||
// Refill the buffer if necessary
|
||||
if bufp >= nbuf {
|
||||
bufp = 0
|
||||
nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux
|
||||
if nbuf < 0 {
|
||||
nbuf = 0
|
||||
}
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("readdirent", err)
|
||||
}
|
||||
if nbuf <= 0 {
|
||||
break // EOF
|
||||
}
|
||||
}
|
||||
|
||||
// Drain the buffer
|
||||
nb, names = parseDirent(buf[bufp:nbuf], names)
|
||||
bufp += nb
|
||||
}
|
||||
|
||||
sl := nameInoSlice(names)
|
||||
sort.Sort(sl)
|
||||
return sl, nil
|
||||
}
|
||||
|
||||
// parseDirent is a minor modification of syscall.ParseDirent (linux version)
|
||||
// which returns {name,inode} pairs instead of just names.
|
||||
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
|
||||
origlen := len(buf)
|
||||
for len(buf) > 0 {
|
||||
dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
|
||||
buf = buf[dirent.Reclen:]
|
||||
if dirent.Ino == 0 { // File absent in directory.
|
||||
continue
|
||||
}
|
||||
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
|
||||
var name = string(bytes[0:clen(bytes[:])])
|
||||
if name == "." || name == ".." { // Useless names
|
||||
continue
|
||||
}
|
||||
names = append(names, nameIno{name, dirent.Ino})
|
||||
}
|
||||
return origlen - len(buf), names
|
||||
}
|
||||
|
||||
func clen(n []byte) int {
|
||||
for i := 0; i < len(n); i++ {
|
||||
if n[i] == 0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(n)
|
||||
}
|
97
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go
generated
vendored
Normal file
97
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
// +build !linux
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
|
||||
var (
|
||||
oldRoot, newRoot *FileInfo
|
||||
err1, err2 error
|
||||
errs = make(chan error, 2)
|
||||
)
|
||||
go func() {
|
||||
oldRoot, err1 = collectFileInfo(oldDir)
|
||||
errs <- err1
|
||||
}()
|
||||
go func() {
|
||||
newRoot, err2 = collectFileInfo(newDir)
|
||||
errs <- err2
|
||||
}()
|
||||
|
||||
// block until both routines have returned
|
||||
for i := 0; i < 2; i++ {
|
||||
if err := <-errs; err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return oldRoot, newRoot, nil
|
||||
}
|
||||
|
||||
func collectFileInfo(sourceDir string) (*FileInfo, error) {
|
||||
root := newRootFileInfo()
|
||||
|
||||
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
relPath, err := filepath.Rel(sourceDir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
relPath = filepath.Join(string(os.PathSeparator), relPath)
|
||||
|
||||
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
|
||||
// Temporary workaround. If the returned path starts with two backslashes,
|
||||
// trim it down to a single backslash. Only relevant on Windows.
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.HasPrefix(relPath, `\\`) {
|
||||
relPath = relPath[1:]
|
||||
}
|
||||
}
|
||||
|
||||
if relPath == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
parent := root.LookUp(filepath.Dir(relPath))
|
||||
if parent == nil {
|
||||
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
|
||||
}
|
||||
|
||||
info := &FileInfo{
|
||||
name: filepath.Base(relPath),
|
||||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
}
|
||||
|
||||
s, err := system.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.stat = s
|
||||
|
||||
info.capability, _ = system.Lgetxattr(path, "security.capability")
|
||||
|
||||
parent.children[info.name] = info
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return root, nil
|
||||
}
|
@@ -6,7 +6,6 @@ import (
|
||||
"os/exec"
|
||||
"path"
|
||||
"sort"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -132,12 +131,23 @@ func TestChangesWithNoChanges(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChangesWithChanges(t *testing.T) {
|
||||
// Mock the readonly layer
|
||||
layer, err := ioutil.TempDir("", "docker-changes-test-layer")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(layer)
|
||||
createSampleDir(t, layer)
|
||||
os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740)
|
||||
|
||||
// Mock the RW layer
|
||||
rwLayer, err := ioutil.TempDir("", "docker-changes-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rwLayer)
|
||||
// Create a folder
|
||||
|
||||
// Create a folder in RW layer
|
||||
dir1 := path.Join(rwLayer, "dir1")
|
||||
os.MkdirAll(dir1, 0740)
|
||||
deletedFile := path.Join(dir1, ".wh.file1-2")
|
||||
@@ -149,58 +159,76 @@ func TestChangesWithChanges(t *testing.T) {
|
||||
os.MkdirAll(subfolder, 0740)
|
||||
newFile := path.Join(subfolder, "newFile")
|
||||
ioutil.WriteFile(newFile, []byte{}, 0740)
|
||||
// Let's create folders that with have the role of layers with the same data
|
||||
layer, err := ioutil.TempDir("", "docker-changes-test-layer")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(layer)
|
||||
createSampleDir(t, layer)
|
||||
os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740)
|
||||
|
||||
// Let's modify modtime for dir1 to be sure it's the same for the two layer (to not having false positive)
|
||||
fi, err := os.Stat(dir1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
mtime := fi.ModTime()
|
||||
stat := fi.Sys().(*syscall.Stat_t)
|
||||
atime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
|
||||
|
||||
layerDir1 := path.Join(layer, "dir1")
|
||||
os.Chtimes(layerDir1, atime, mtime)
|
||||
|
||||
changes, err := Changes([]string{layer}, rwLayer)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sort.Sort(changesByPath(changes))
|
||||
|
||||
expectedChanges := []Change{
|
||||
{"/dir1", ChangeModify},
|
||||
{"/dir1/file1-1", ChangeModify},
|
||||
{"/dir1/file1-2", ChangeDelete},
|
||||
{"/dir1/subfolder", ChangeModify},
|
||||
{"/dir1/subfolder/newFile", ChangeAdd},
|
||||
}
|
||||
checkChanges(expectedChanges, changes, t)
|
||||
}
|
||||
|
||||
for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
|
||||
if i >= len(expectedChanges) {
|
||||
t.Fatalf("unexpected change %s\n", changes[i].String())
|
||||
}
|
||||
if i >= len(changes) {
|
||||
t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
|
||||
}
|
||||
if changes[i].Path == expectedChanges[i].Path {
|
||||
if changes[i] != expectedChanges[i] {
|
||||
t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
|
||||
}
|
||||
} else if changes[i].Path < expectedChanges[i].Path {
|
||||
t.Fatalf("unexpected change %s\n", changes[i].String())
|
||||
} else {
|
||||
t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
|
||||
}
|
||||
// See https://github.com/docker/docker/pull/13590
|
||||
func TestChangesWithChangesGH13590(t *testing.T) {
|
||||
baseLayer, err := ioutil.TempDir("", "docker-changes-test.")
|
||||
defer os.RemoveAll(baseLayer)
|
||||
|
||||
dir3 := path.Join(baseLayer, "dir1/dir2/dir3")
|
||||
os.MkdirAll(dir3, 07400)
|
||||
|
||||
file := path.Join(dir3, "file.txt")
|
||||
ioutil.WriteFile(file, []byte("hello"), 0666)
|
||||
|
||||
layer, err := ioutil.TempDir("", "docker-changes-test2.")
|
||||
defer os.RemoveAll(layer)
|
||||
|
||||
// Test creating a new file
|
||||
if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil {
|
||||
t.Fatalf("Cmd failed: %q", err)
|
||||
}
|
||||
|
||||
os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt"))
|
||||
file = path.Join(layer, "dir1/dir2/dir3/file1.txt")
|
||||
ioutil.WriteFile(file, []byte("bye"), 0666)
|
||||
|
||||
changes, err := Changes([]string{baseLayer}, layer)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedChanges := []Change{
|
||||
{"/dir1/dir2/dir3", ChangeModify},
|
||||
{"/dir1/dir2/dir3/file1.txt", ChangeAdd},
|
||||
}
|
||||
checkChanges(expectedChanges, changes, t)
|
||||
|
||||
// Now test changing a file
|
||||
layer, err = ioutil.TempDir("", "docker-changes-test3.")
|
||||
defer os.RemoveAll(layer)
|
||||
|
||||
if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil {
|
||||
t.Fatalf("Cmd failed: %q", err)
|
||||
}
|
||||
|
||||
file = path.Join(layer, "dir1/dir2/dir3/file.txt")
|
||||
ioutil.WriteFile(file, []byte("bye"), 0666)
|
||||
|
||||
changes, err = Changes([]string{baseLayer}, layer)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedChanges = []Change{
|
||||
{"/dir1/dir2/dir3/file.txt", ChangeModify},
|
||||
}
|
||||
checkChanges(expectedChanges, changes, t)
|
||||
}
|
||||
|
||||
// Create an directory, copy it, make sure we report no changes between the two
|
||||
@@ -443,3 +471,25 @@ func TestChangesSize(t *testing.T) {
|
||||
t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size)
|
||||
}
|
||||
}
|
||||
|
||||
func checkChanges(expectedChanges, changes []Change, t *testing.T) {
|
||||
sort.Sort(changesByPath(expectedChanges))
|
||||
sort.Sort(changesByPath(changes))
|
||||
for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
|
||||
if i >= len(expectedChanges) {
|
||||
t.Fatalf("unexpected change %s\n", changes[i].String())
|
||||
}
|
||||
if i >= len(changes) {
|
||||
t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
|
||||
}
|
||||
if changes[i].Path == expectedChanges[i].Path {
|
||||
if changes[i] != expectedChanges[i] {
|
||||
t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
|
||||
}
|
||||
} else if changes[i].Path < expectedChanges[i].Path {
|
||||
t.Fatalf("unexpected change %s\n", changes[i].String())
|
||||
} else {
|
||||
t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
|
||||
}
|
||||
}
|
||||
}
|
27
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
// +build !windows
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
if oldStat.Mode() != newStat.Mode() ||
|
||||
oldStat.Uid() != newStat.Uid() ||
|
||||
oldStat.Gid() != newStat.Gid() ||
|
||||
oldStat.Rdev() != newStat.Rdev() ||
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
|
||||
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
|
||||
}
|
20
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go
generated
vendored
Normal file
20
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
|
||||
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
if oldStat.ModTime() != newStat.ModTime() ||
|
||||
oldStat.Mode() != newStat.Mode() ||
|
||||
oldStat.Size() != newStat.Size() && !oldStat.IsDir() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.IsDir()
|
||||
}
|
308
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
Normal file
308
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
Normal file
@@ -0,0 +1,308 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Errors used or returned by this file.
|
||||
var (
|
||||
ErrNotDirectory = errors.New("not a directory")
|
||||
ErrDirNotExists = errors.New("no such directory")
|
||||
ErrCannotCopyDir = errors.New("cannot copy directory")
|
||||
ErrInvalidCopySource = errors.New("invalid copy source content")
|
||||
)
|
||||
|
||||
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
|
||||
// processing using any utility functions from the path or filepath stdlib
|
||||
// packages) and appends a trailing `/.` or `/` if its corresponding original
|
||||
// path (from before being processed by utility functions from the path or
|
||||
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
|
||||
// path already ends in a `.` path segment, then another is not added. If the
|
||||
// clean path already ends in a path separator, then another is not added.
|
||||
func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
|
||||
if !SpecifiesCurrentDir(cleanedPath) && SpecifiesCurrentDir(originalPath) {
|
||||
if !HasTrailingPathSeparator(cleanedPath) {
|
||||
// Add a separator if it doesn't already end with one (a cleaned
|
||||
// path would only end in a separator if it is the root).
|
||||
cleanedPath += string(filepath.Separator)
|
||||
}
|
||||
cleanedPath += "."
|
||||
}
|
||||
|
||||
if !HasTrailingPathSeparator(cleanedPath) && HasTrailingPathSeparator(originalPath) {
|
||||
cleanedPath += string(filepath.Separator)
|
||||
}
|
||||
|
||||
return cleanedPath
|
||||
}
|
||||
|
||||
// AssertsDirectory returns whether the given path is
|
||||
// asserted to be a directory, i.e., the path ends with
|
||||
// a trailing '/' or `/.`, assuming a path separator of `/`.
|
||||
func AssertsDirectory(path string) bool {
|
||||
return HasTrailingPathSeparator(path) || SpecifiesCurrentDir(path)
|
||||
}
|
||||
|
||||
// HasTrailingPathSeparator returns whether the given
|
||||
// path ends with the system's path separator character.
|
||||
func HasTrailingPathSeparator(path string) bool {
|
||||
return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
|
||||
}
|
||||
|
||||
// SpecifiesCurrentDir returns whether the given path specifies
|
||||
// a "current directory", i.e., the last path segment is `.`.
|
||||
func SpecifiesCurrentDir(path string) bool {
|
||||
return filepath.Base(path) == "."
|
||||
}
|
||||
|
||||
// SplitPathDirEntry splits the given path between its
|
||||
// parent directory and its basename in that directory.
|
||||
func SplitPathDirEntry(localizedPath string) (dir, base string) {
|
||||
normalizedPath := filepath.ToSlash(localizedPath)
|
||||
vol := filepath.VolumeName(normalizedPath)
|
||||
normalizedPath = normalizedPath[len(vol):]
|
||||
|
||||
if normalizedPath == "/" {
|
||||
// Specifies the root path.
|
||||
return filepath.FromSlash(vol + normalizedPath), "."
|
||||
}
|
||||
|
||||
trimmedPath := vol + strings.TrimRight(normalizedPath, "/")
|
||||
|
||||
dir = filepath.FromSlash(path.Dir(trimmedPath))
|
||||
base = filepath.FromSlash(path.Base(trimmedPath))
|
||||
|
||||
return dir, base
|
||||
}
|
||||
|
||||
// TarResource archives the resource at the given sourcePath into a Tar
|
||||
// archive. A non-nil error is returned if sourcePath does not exist or is
|
||||
// asserted to be a directory but exists as another type of file.
|
||||
//
|
||||
// This function acts as a convenient wrapper around TarWithOptions, which
|
||||
// requires a directory as the source path. TarResource accepts either a
|
||||
// directory or a file path and correctly sets the Tar options.
|
||||
func TarResource(sourcePath string) (content Archive, err error) {
|
||||
if _, err = os.Lstat(sourcePath); err != nil {
|
||||
// Catches the case where the source does not exist or is not a
|
||||
// directory if asserted to be a directory, as this also causes an
|
||||
// error.
|
||||
return
|
||||
}
|
||||
|
||||
if len(sourcePath) > 1 && HasTrailingPathSeparator(sourcePath) {
|
||||
// In the case where the source path is a symbolic link AND it ends
|
||||
// with a path separator, we will want to evaluate the symbolic link.
|
||||
trimmedPath := sourcePath[:len(sourcePath)-1]
|
||||
stat, err := os.Lstat(trimmedPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if stat.Mode()&os.ModeSymlink != 0 {
|
||||
if sourcePath, err = filepath.EvalSymlinks(trimmedPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Separate the source path between it's directory and
|
||||
// the entry in that directory which we are archiving.
|
||||
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
||||
|
||||
filter := []string{sourceBase}
|
||||
|
||||
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
|
||||
|
||||
return TarWithOptions(sourceDir, &TarOptions{
|
||||
Compression: Uncompressed,
|
||||
IncludeFiles: filter,
|
||||
IncludeSourceDir: true,
|
||||
})
|
||||
}
|
||||
|
||||
// CopyInfo holds basic info about the source
|
||||
// or destination path of a copy operation.
|
||||
type CopyInfo struct {
|
||||
Path string
|
||||
Exists bool
|
||||
IsDir bool
|
||||
}
|
||||
|
||||
// CopyInfoStatPath stats the given path to create a CopyInfo
|
||||
// struct representing that resource. If mustExist is true, then
|
||||
// it is an error if there is no file or directory at the given path.
|
||||
func CopyInfoStatPath(path string, mustExist bool) (CopyInfo, error) {
|
||||
pathInfo := CopyInfo{Path: path}
|
||||
|
||||
fileInfo, err := os.Lstat(path)
|
||||
|
||||
if err == nil {
|
||||
pathInfo.Exists, pathInfo.IsDir = true, fileInfo.IsDir()
|
||||
} else if os.IsNotExist(err) && !mustExist {
|
||||
err = nil
|
||||
}
|
||||
|
||||
return pathInfo, err
|
||||
}
|
||||
|
||||
// PrepareArchiveCopy prepares the given srcContent archive, which should
|
||||
// contain the archived resource described by srcInfo, to the destination
|
||||
// described by dstInfo. Returns the possibly modified content archive along
|
||||
// with the path to the destination directory which it should be extracted to.
|
||||
func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
|
||||
// Separate the destination path between its directory and base
|
||||
// components in case the source archive contents need to be rebased.
|
||||
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
|
||||
_, srcBase := SplitPathDirEntry(srcInfo.Path)
|
||||
|
||||
switch {
|
||||
case dstInfo.Exists && dstInfo.IsDir:
|
||||
// The destination exists as a directory. No alteration
|
||||
// to srcContent is needed as its contents can be
|
||||
// simply extracted to the destination directory.
|
||||
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
|
||||
case dstInfo.Exists && srcInfo.IsDir:
|
||||
// The destination exists as some type of file and the source
|
||||
// content is a directory. This is an error condition since
|
||||
// you cannot copy a directory to an existing file location.
|
||||
return "", nil, ErrCannotCopyDir
|
||||
case dstInfo.Exists:
|
||||
// The destination exists as some type of file and the source content
|
||||
// is also a file. The source content entry will have to be renamed to
|
||||
// have a basename which matches the destination path's basename.
|
||||
return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
case srcInfo.IsDir:
|
||||
// The destination does not exist and the source content is an archive
|
||||
// of a directory. The archive should be extracted to the parent of
|
||||
// the destination path instead, and when it is, the directory that is
|
||||
// created as a result should take the name of the destination path.
|
||||
// The source content entries will have to be renamed to have a
|
||||
// basename which matches the destination path's basename.
|
||||
return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
case AssertsDirectory(dstInfo.Path):
|
||||
// The destination does not exist and is asserted to be created as a
|
||||
// directory, but the source content is not a directory. This is an
|
||||
// error condition since you cannot create a directory from a file
|
||||
// source.
|
||||
return "", nil, ErrDirNotExists
|
||||
default:
|
||||
// The last remaining case is when the destination does not exist, is
|
||||
// not asserted to be a directory, and the source content is not an
|
||||
// archive of a directory. It this case, the destination file will need
|
||||
// to be created when the archive is extracted and the source content
|
||||
// entry will have to be renamed to have a basename which matches the
|
||||
// destination path's basename.
|
||||
return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// rebaseArchiveEntries rewrites the given srcContent archive replacing
|
||||
// an occurance of oldBase with newBase at the beginning of entry names.
|
||||
func rebaseArchiveEntries(srcContent ArchiveReader, oldBase, newBase string) Archive {
|
||||
rebased, w := io.Pipe()
|
||||
|
||||
go func() {
|
||||
srcTar := tar.NewReader(srcContent)
|
||||
rebasedTar := tar.NewWriter(w)
|
||||
|
||||
for {
|
||||
hdr, err := srcTar.Next()
|
||||
if err == io.EOF {
|
||||
// Signals end of archive.
|
||||
rebasedTar.Close()
|
||||
w.Close()
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
|
||||
|
||||
if err = rebasedTar.WriteHeader(hdr); err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return rebased
|
||||
}
|
||||
|
||||
// CopyResource performs an archive copy from the given source path to the
|
||||
// given destination path. The source path MUST exist and the destination
|
||||
// path's parent directory must exist.
|
||||
func CopyResource(srcPath, dstPath string) error {
|
||||
var (
|
||||
srcInfo CopyInfo
|
||||
err error
|
||||
)
|
||||
|
||||
// Clean the source and destination paths.
|
||||
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
|
||||
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
|
||||
|
||||
if srcInfo, err = CopyInfoStatPath(srcPath, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
content, err := TarResource(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
return CopyTo(content, srcInfo, dstPath)
|
||||
}
|
||||
|
||||
// CopyTo handles extracting the given content whose
|
||||
// entries should be sourced from srcInfo to dstPath.
|
||||
func CopyTo(content ArchiveReader, srcInfo CopyInfo, dstPath string) error {
|
||||
dstInfo, err := CopyInfoStatPath(dstPath, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !dstInfo.Exists {
|
||||
// Ensure destination parent dir exists.
|
||||
dstParent, _ := SplitPathDirEntry(dstPath)
|
||||
|
||||
dstStat, err := os.Lstat(dstParent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !dstStat.IsDir() {
|
||||
return ErrNotDirectory
|
||||
}
|
||||
}
|
||||
|
||||
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer copyArchive.Close()
|
||||
|
||||
options := &TarOptions{
|
||||
NoLchown: true,
|
||||
NoOverwriteDirNonDir: true,
|
||||
}
|
||||
|
||||
return Untar(copyArchive, dstDir, options)
|
||||
}
|
637
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go
generated
vendored
Normal file
637
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go
generated
vendored
Normal file
@@ -0,0 +1,637 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func removeAllPaths(paths ...string) {
|
||||
for _, path := range paths {
|
||||
os.RemoveAll(path)
|
||||
}
|
||||
}
|
||||
|
||||
func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) {
|
||||
var err error
|
||||
|
||||
if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func isNotDir(err error) bool {
|
||||
return strings.Contains(err.Error(), "not a directory")
|
||||
}
|
||||
|
||||
func joinTrailingSep(pathElements ...string) string {
|
||||
joined := filepath.Join(pathElements...)
|
||||
|
||||
return fmt.Sprintf("%s%c", joined, filepath.Separator)
|
||||
}
|
||||
|
||||
func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) {
|
||||
t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB)
|
||||
|
||||
fileA, err := os.Open(filenameA)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fileA.Close()
|
||||
|
||||
fileB, err := os.Open(filenameB)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fileB.Close()
|
||||
|
||||
hasher := sha256.New()
|
||||
|
||||
if _, err = io.Copy(hasher, fileA); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hashA := hasher.Sum(nil)
|
||||
hasher.Reset()
|
||||
|
||||
if _, err = io.Copy(hasher, fileB); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hashB := hasher.Sum(nil)
|
||||
|
||||
if !bytes.Equal(hashA, hashB) {
|
||||
err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) {
|
||||
t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir)
|
||||
|
||||
var changes []Change
|
||||
|
||||
if changes, err = ChangesDirs(newDir, oldDir); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(changes) != 0 {
|
||||
err = fmt.Errorf("expected no changes between directories, but got: %v", changes)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func logDirContents(t *testing.T, dirPath string) {
|
||||
logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
t.Errorf("stat error for path %q: %s", path, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
path = joinTrailingSep(path)
|
||||
}
|
||||
|
||||
t.Logf("\t%s", path)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
t.Logf("logging directory contents: %q", dirPath)
|
||||
|
||||
if err := filepath.Walk(dirPath, logWalkedPaths); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) {
|
||||
t.Logf("copying from %q to %q", srcPath, dstPath)
|
||||
|
||||
return CopyResource(srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Basic assumptions about SRC and DST:
|
||||
// 1. SRC must exist.
|
||||
// 2. If SRC ends with a trailing separator, it must be a directory.
|
||||
// 3. DST parent directory must exist.
|
||||
// 4. If DST exists as a file, it must not end with a trailing separator.
|
||||
|
||||
// First get these easy error cases out of the way.
|
||||
|
||||
// Test for error when SRC does not exist.
|
||||
func TestCopyErrSrcNotExists(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
content, err := TarResource(filepath.Join(tmpDirA, "file1"))
|
||||
if err == nil {
|
||||
content.Close()
|
||||
t.Fatal("expected IsNotExist error, but got nil instead")
|
||||
}
|
||||
|
||||
if !os.IsNotExist(err) {
|
||||
t.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test for error when SRC ends in a trailing
|
||||
// path separator but it exists as a file.
|
||||
func TestCopyErrSrcNotDir(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
|
||||
content, err := TarResource(joinTrailingSep(tmpDirA, "file1"))
|
||||
if err == nil {
|
||||
content.Close()
|
||||
t.Fatal("expected IsNotDir error, but got nil instead")
|
||||
}
|
||||
|
||||
if !isNotDir(err) {
|
||||
t.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test for error when SRC is a valid file or directory,
|
||||
// but the DST parent directory does not exist.
|
||||
func TestCopyErrDstParentNotExists(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
|
||||
srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false}
|
||||
|
||||
// Try with a file source.
|
||||
content, err := TarResource(srcInfo.Path)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
// Copy to a file whose parent does not exist.
|
||||
if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil {
|
||||
t.Fatal("expected IsNotExist error, but got nil instead")
|
||||
}
|
||||
|
||||
if !os.IsNotExist(err) {
|
||||
t.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
|
||||
}
|
||||
|
||||
// Try with a directory source.
|
||||
srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true}
|
||||
|
||||
content, err = TarResource(srcInfo.Path)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
// Copy to a directory whose parent does not exist.
|
||||
if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil {
|
||||
t.Fatal("expected IsNotExist error, but got nil instead")
|
||||
}
|
||||
|
||||
if !os.IsNotExist(err) {
|
||||
t.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test for error when DST ends in a trailing
|
||||
// path separator but exists as a file.
|
||||
func TestCopyErrDstNotDir(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A and B with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
createSampleDir(t, tmpDirB)
|
||||
|
||||
// Try with a file source.
|
||||
srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false}
|
||||
|
||||
content, err := TarResource(srcInfo.Path)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil {
|
||||
t.Fatal("expected IsNotDir error, but got nil instead")
|
||||
}
|
||||
|
||||
if !isNotDir(err) {
|
||||
t.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
|
||||
}
|
||||
|
||||
// Try with a directory source.
|
||||
srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true}
|
||||
|
||||
content, err = TarResource(srcInfo.Path)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil {
|
||||
t.Fatal("expected IsNotDir error, but got nil instead")
|
||||
}
|
||||
|
||||
if !isNotDir(err) {
|
||||
t.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Possibilities are reduced to the remaining 10 cases:
|
||||
//
|
||||
// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action
|
||||
// ===================================================================================================
|
||||
// A | no | - | no | - | no | create file
|
||||
// B | no | - | no | - | yes | error
|
||||
// C | no | - | yes | no | - | overwrite file
|
||||
// D | no | - | yes | yes | - | create file in dst dir
|
||||
// E | yes | no | no | - | - | create dir, copy contents
|
||||
// F | yes | no | yes | no | - | error
|
||||
// G | yes | no | yes | yes | - | copy dir and contents
|
||||
// H | yes | yes | no | - | - | create dir, copy contents
|
||||
// I | yes | yes | yes | no | - | error
|
||||
// J | yes | yes | yes | yes | - | copy dir contents
|
||||
//
|
||||
|
||||
// A. SRC specifies a file and DST (no trailing path separator) doesn't
|
||||
// exist. This should create a file with the name DST and copy the
|
||||
// contents of the source file into it.
|
||||
func TestCopyCaseA(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
|
||||
srcPath := filepath.Join(tmpDirA, "file1")
|
||||
dstPath := filepath.Join(tmpDirB, "itWorks.txt")
|
||||
|
||||
var err error
|
||||
|
||||
if err = testCopyHelper(t, srcPath, dstPath); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// B. SRC specifies a file and DST (with trailing path separator) doesn't
|
||||
// exist. This should cause an error because the copy operation cannot
|
||||
// create a directory when copying a single file.
|
||||
func TestCopyCaseB(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
|
||||
srcPath := filepath.Join(tmpDirA, "file1")
|
||||
dstDir := joinTrailingSep(tmpDirB, "testDir")
|
||||
|
||||
var err error
|
||||
|
||||
if err = testCopyHelper(t, srcPath, dstDir); err == nil {
|
||||
t.Fatal("expected ErrDirNotExists error, but got nil instead")
|
||||
}
|
||||
|
||||
if err != ErrDirNotExists {
|
||||
t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// C. SRC specifies a file and DST exists as a file. This should overwrite
|
||||
// the file at DST with the contents of the source file.
|
||||
func TestCopyCaseC(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A and B with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
createSampleDir(t, tmpDirB)
|
||||
|
||||
srcPath := filepath.Join(tmpDirA, "file1")
|
||||
dstPath := filepath.Join(tmpDirB, "file2")
|
||||
|
||||
var err error
|
||||
|
||||
// Ensure they start out different.
|
||||
if err = fileContentsEqual(t, srcPath, dstPath); err == nil {
|
||||
t.Fatal("expected different file contents")
|
||||
}
|
||||
|
||||
if err = testCopyHelper(t, srcPath, dstPath); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// D. SRC specifies a file and DST exists as a directory. This should place
|
||||
// a copy of the source file inside it using the basename from SRC. Ensure
|
||||
// this works whether DST has a trailing path separator or not.
|
||||
func TestCopyCaseD(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A and B with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
createSampleDir(t, tmpDirB)
|
||||
|
||||
srcPath := filepath.Join(tmpDirA, "file1")
|
||||
dstDir := filepath.Join(tmpDirB, "dir1")
|
||||
dstPath := filepath.Join(dstDir, "file1")
|
||||
|
||||
var err error
|
||||
|
||||
// Ensure that dstPath doesn't exist.
|
||||
if _, err = os.Stat(dstPath); !os.IsNotExist(err) {
|
||||
t.Fatalf("did not expect dstPath %q to exist", dstPath)
|
||||
}
|
||||
|
||||
if err = testCopyHelper(t, srcPath, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now try again but using a trailing path separator for dstDir.
|
||||
|
||||
if err = os.RemoveAll(dstDir); err != nil {
|
||||
t.Fatalf("unable to remove dstDir: %s", err)
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
|
||||
t.Fatalf("unable to make dstDir: %s", err)
|
||||
}
|
||||
|
||||
dstDir = joinTrailingSep(tmpDirB, "dir1")
|
||||
|
||||
if err = testCopyHelper(t, srcPath, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// E. SRC specifies a directory and DST does not exist. This should create a
|
||||
// directory at DST and copy the contents of the SRC directory into the DST
|
||||
// directory. Ensure this works whether DST has a trailing path separator or
|
||||
// not.
|
||||
func TestCopyCaseE(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
|
||||
srcDir := filepath.Join(tmpDirA, "dir1")
|
||||
dstDir := filepath.Join(tmpDirB, "testDir")
|
||||
|
||||
var err error
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
|
||||
t.Log("dir contents not equal")
|
||||
logDirContents(t, tmpDirA)
|
||||
logDirContents(t, tmpDirB)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now try again but using a trailing path separator for dstDir.
|
||||
|
||||
if err = os.RemoveAll(dstDir); err != nil {
|
||||
t.Fatalf("unable to remove dstDir: %s", err)
|
||||
}
|
||||
|
||||
dstDir = joinTrailingSep(tmpDirB, "testDir")
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// F. SRC specifies a directory and DST exists as a file. This should cause an
|
||||
// error as it is not possible to overwrite a file with a directory.
|
||||
func TestCopyCaseF(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A and B with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
createSampleDir(t, tmpDirB)
|
||||
|
||||
srcDir := filepath.Join(tmpDirA, "dir1")
|
||||
dstFile := filepath.Join(tmpDirB, "file1")
|
||||
|
||||
var err error
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstFile); err == nil {
|
||||
t.Fatal("expected ErrCannotCopyDir error, but got nil instead")
|
||||
}
|
||||
|
||||
if err != ErrCannotCopyDir {
|
||||
t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// G. SRC specifies a directory and DST exists as a directory. This should copy
|
||||
// the SRC directory and all its contents to the DST directory. Ensure this
|
||||
// works whether DST has a trailing path separator or not.
|
||||
func TestCopyCaseG(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A and B with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
createSampleDir(t, tmpDirB)
|
||||
|
||||
srcDir := filepath.Join(tmpDirA, "dir1")
|
||||
dstDir := filepath.Join(tmpDirB, "dir2")
|
||||
resultDir := filepath.Join(dstDir, "dir1")
|
||||
|
||||
var err error
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = dirContentsEqual(t, resultDir, srcDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now try again but using a trailing path separator for dstDir.
|
||||
|
||||
if err = os.RemoveAll(dstDir); err != nil {
|
||||
t.Fatalf("unable to remove dstDir: %s", err)
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
|
||||
t.Fatalf("unable to make dstDir: %s", err)
|
||||
}
|
||||
|
||||
dstDir = joinTrailingSep(tmpDirB, "dir2")
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = dirContentsEqual(t, resultDir, srcDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// H. SRC specifies a directory's contents only and DST does not exist. This
|
||||
// should create a directory at DST and copy the contents of the SRC
|
||||
// directory (but not the directory itself) into the DST directory. Ensure
|
||||
// this works whether DST has a trailing path separator or not.
|
||||
func TestCopyCaseH(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
|
||||
srcDir := joinTrailingSep(tmpDirA, "dir1") + "."
|
||||
dstDir := filepath.Join(tmpDirB, "testDir")
|
||||
|
||||
var err error
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
|
||||
t.Log("dir contents not equal")
|
||||
logDirContents(t, tmpDirA)
|
||||
logDirContents(t, tmpDirB)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now try again but using a trailing path separator for dstDir.
|
||||
|
||||
if err = os.RemoveAll(dstDir); err != nil {
|
||||
t.Fatalf("unable to remove dstDir: %s", err)
|
||||
}
|
||||
|
||||
dstDir = joinTrailingSep(tmpDirB, "testDir")
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
|
||||
t.Log("dir contents not equal")
|
||||
logDirContents(t, tmpDirA)
|
||||
logDirContents(t, tmpDirB)
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// I. SRC specifies a directory's contents only and DST exists as a file. This
|
||||
// should cause an error as it is not possible to overwrite a file with a
|
||||
// directory.
|
||||
func TestCopyCaseI(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A and B with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
createSampleDir(t, tmpDirB)
|
||||
|
||||
srcDir := joinTrailingSep(tmpDirA, "dir1") + "."
|
||||
dstFile := filepath.Join(tmpDirB, "file1")
|
||||
|
||||
var err error
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstFile); err == nil {
|
||||
t.Fatal("expected ErrCannotCopyDir error, but got nil instead")
|
||||
}
|
||||
|
||||
if err != ErrCannotCopyDir {
|
||||
t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// J. SRC specifies a directory's contents only and DST exists as a directory.
|
||||
// This should copy the contents of the SRC directory (but not the directory
|
||||
// itself) into the DST directory. Ensure this works whether DST has a
|
||||
// trailing path separator or not.
|
||||
func TestCopyCaseJ(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A and B with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
createSampleDir(t, tmpDirB)
|
||||
|
||||
srcDir := joinTrailingSep(tmpDirA, "dir1") + "."
|
||||
dstDir := filepath.Join(tmpDirB, "dir5")
|
||||
|
||||
var err error
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now try again but using a trailing path separator for dstDir.
|
||||
|
||||
if err = os.RemoveAll(dstDir); err != nil {
|
||||
t.Fatalf("unable to remove dstDir: %s", err)
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
|
||||
t.Fatalf("unable to make dstDir: %s", err)
|
||||
}
|
||||
|
||||
dstDir = joinTrailingSep(tmpDirB, "dir5")
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
@@ -7,11 +7,13 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/pools"
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/system"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
||||
@@ -40,14 +42,37 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
||||
// Normalize name, for safety and for a simple is-root check
|
||||
hdr.Name = filepath.Clean(hdr.Name)
|
||||
|
||||
if !strings.HasSuffix(hdr.Name, "/") {
|
||||
// Windows does not support filenames with colons in them. Ignore
|
||||
// these files. This is not a problem though (although it might
|
||||
// appear that it is). Let's suppose a client is running docker pull.
|
||||
// The daemon it points to is Windows. Would it make sense for the
|
||||
// client to be doing a docker pull Ubuntu for example (which has files
|
||||
// with colons in the name under /usr/share/man/man3)? No, absolutely
|
||||
// not as it would really only make sense that they were pulling a
|
||||
// Windows image. However, for development, it is necessary to be able
|
||||
// to pull Linux images which are in the repository.
|
||||
//
|
||||
// TODO Windows. Once the registry is aware of what images are Windows-
|
||||
// specific or Linux-specific, this warning should be changed to an error
|
||||
// to cater for the situation where someone does manage to upload a Linux
|
||||
// image but have it tagged as Windows inadvertantly.
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.Contains(hdr.Name, ":") {
|
||||
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Note as these operations are platform specific, so must the slash be.
|
||||
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||
// Not the root directory, ensure that the parent directory exists.
|
||||
// This happened in some tests where an image had a tarfile without any
|
||||
// parent directories.
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = os.MkdirAll(parentPath, 0600)
|
||||
err = system.MkdirAll(parentPath, 0600)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -68,19 +93,20 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
||||
}
|
||||
defer os.RemoveAll(aufsTempdir)
|
||||
}
|
||||
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil {
|
||||
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
rel, err := filepath.Rel(dest, path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if strings.HasPrefix(rel, "../") {
|
||||
|
||||
// Note as these operations are platform specific, so must the slash be.
|
||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
|
||||
}
|
||||
base := filepath.Base(path)
|
||||
@@ -124,7 +150,7 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
||||
srcData = tmpFile
|
||||
}
|
||||
|
||||
if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil {
|
||||
if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -147,10 +173,24 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`, and
|
||||
// applies it to the directory `dest`. Returns the size in bytes of the
|
||||
// contents of the layer.
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||
// and applies it to the directory `dest`. The stream `layer` can be
|
||||
// compressed or uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyLayer(dest string, layer ArchiveReader) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, true)
|
||||
}
|
||||
|
||||
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||
// can only be uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyUncompressedLayer(dest string, layer ArchiveReader) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, false)
|
||||
}
|
||||
|
||||
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
|
||||
func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64, error) {
|
||||
dest = filepath.Clean(dest)
|
||||
|
||||
// We need to be able to set any perms
|
||||
@@ -160,9 +200,11 @@ func ApplyLayer(dest string, layer ArchiveReader) (int64, error) {
|
||||
}
|
||||
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
|
||||
|
||||
layer, err = DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
if decompress {
|
||||
layer, err = DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return UnpackLayer(dest, layer)
|
||||
}
|
@@ -13,8 +13,8 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/Sirupsen/logrus"
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/archive"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
||||
var (
|
@@ -133,7 +133,7 @@ func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error {
|
||||
helloStat.Size() != fi.Size() ||
|
||||
!bytes.Equal(helloData, b) {
|
||||
// codepath taken if hello has been modified
|
||||
return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v.", hello, helloData, b, helloStat, fi)
|
||||
return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi)
|
||||
}
|
||||
|
||||
// Check that nothing in dest/ has the same content as victim/hello.
|
@@ -9,18 +9,20 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/Sirupsen/logrus"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func Exclusion(pattern string) bool {
|
||||
// exclusion return true if the specified pattern is an exclusion
|
||||
func exclusion(pattern string) bool {
|
||||
return pattern[0] == '!'
|
||||
}
|
||||
|
||||
func Empty(pattern string) bool {
|
||||
// empty return true if the specified pattern is empty
|
||||
func empty(pattern string) bool {
|
||||
return pattern == ""
|
||||
}
|
||||
|
||||
// Cleanpatterns takes a slice of patterns returns a new
|
||||
// CleanPatterns takes a slice of patterns returns a new
|
||||
// slice of patterns cleaned with filepath.Clean, stripped
|
||||
// of any empty patterns and lets the caller know whether the
|
||||
// slice contains any exception patterns (prefixed with !).
|
||||
@@ -35,19 +37,18 @@ func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
|
||||
for _, pattern := range patterns {
|
||||
// Eliminate leading and trailing whitespace.
|
||||
pattern = strings.TrimSpace(pattern)
|
||||
if Empty(pattern) {
|
||||
if empty(pattern) {
|
||||
continue
|
||||
}
|
||||
if Exclusion(pattern) {
|
||||
if exclusion(pattern) {
|
||||
if len(pattern) == 1 {
|
||||
logrus.Errorf("Illegal exclusion pattern: %s", pattern)
|
||||
return nil, nil, false, errors.New("Illegal exclusion pattern: !")
|
||||
}
|
||||
exceptions = true
|
||||
}
|
||||
pattern = filepath.Clean(pattern)
|
||||
cleanedPatterns = append(cleanedPatterns, pattern)
|
||||
if Exclusion(pattern) {
|
||||
if exclusion(pattern) {
|
||||
pattern = pattern[1:]
|
||||
}
|
||||
patternDirs = append(patternDirs, strings.Split(pattern, "/"))
|
||||
@@ -74,7 +75,7 @@ func Matches(file string, patterns []string) (bool, error) {
|
||||
return OptimizedMatches(file, patterns, patDirs)
|
||||
}
|
||||
|
||||
// Matches is basically the same as fileutils.Matches() but optimized for archive.go.
|
||||
// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
|
||||
// It will assume that the inputs have been preprocessed and therefore the function
|
||||
// doen't need to do as much error checking and clean-up. This was done to avoid
|
||||
// repeating these steps on each file being checked during the archive process.
|
||||
@@ -87,14 +88,13 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool,
|
||||
for i, pattern := range patterns {
|
||||
negative := false
|
||||
|
||||
if Exclusion(pattern) {
|
||||
if exclusion(pattern) {
|
||||
negative = true
|
||||
pattern = pattern[1:]
|
||||
}
|
||||
|
||||
match, err := filepath.Match(pattern, file)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error matching: %s (pattern: %s)", file, pattern)
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -114,9 +114,13 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool,
|
||||
if matched {
|
||||
logrus.Debugf("Skipping excluded path: %s", file)
|
||||
}
|
||||
|
||||
return matched, nil
|
||||
}
|
||||
|
||||
// CopyFile copies from src to dst until either EOF is reached
|
||||
// on src or an error occurs. It verifies src exists and remove
|
||||
// the dst if it exists.
|
||||
func CopyFile(src, dst string) (int64, error) {
|
||||
cleanSrc := filepath.Clean(src)
|
||||
cleanDst := filepath.Clean(dst)
|
||||
@@ -139,6 +143,8 @@ func CopyFile(src, dst string) (int64, error) {
|
||||
return io.Copy(df, sf)
|
||||
}
|
||||
|
||||
// GetTotalUsedFds Returns the number of used File Descriptors by
|
||||
// reading it via /proc filesystem.
|
||||
func GetTotalUsedFds() int {
|
||||
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
||||
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
||||
@@ -168,3 +174,23 @@ func ReadSymlinkedDirectory(path string) (string, error) {
|
||||
}
|
||||
return realPath, nil
|
||||
}
|
||||
|
||||
// CreateIfNotExists creates a file or a directory only if it does not already exist.
|
||||
func CreateIfNotExists(path string, isDir bool) error {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if isDir {
|
||||
return os.MkdirAll(path, 0755)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(path, os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
@@ -4,6 +4,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -268,7 +269,7 @@ func TestSingleExclamationError(t *testing.T) {
|
||||
|
||||
// A string preceded with a ! should return true from Exclusion.
|
||||
func TestExclusion(t *testing.T) {
|
||||
exclusion := Exclusion("!")
|
||||
exclusion := exclusion("!")
|
||||
if !exclusion {
|
||||
t.Errorf("failed to get true for a single !, got %v", exclusion)
|
||||
}
|
||||
@@ -298,7 +299,7 @@ func TestMatchesWithMalformedPatterns(t *testing.T) {
|
||||
|
||||
// An empty string should return true from Empty.
|
||||
func TestEmpty(t *testing.T) {
|
||||
empty := Empty("")
|
||||
empty := empty("")
|
||||
if !empty {
|
||||
t.Errorf("failed to get true for an empty string, got %v", empty)
|
||||
}
|
||||
@@ -355,3 +356,47 @@ func TestCleanPatternsFolderSplit(t *testing.T) {
|
||||
t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateIfNotExistsDir(t *testing.T) {
|
||||
tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tempFolder)
|
||||
|
||||
folderToCreate := filepath.Join(tempFolder, "tocreate")
|
||||
|
||||
if err := CreateIfNotExists(folderToCreate, true); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fileinfo, err := os.Stat(folderToCreate)
|
||||
if err != nil {
|
||||
t.Fatalf("Should have create a folder, got %v", err)
|
||||
}
|
||||
|
||||
if !fileinfo.IsDir() {
|
||||
t.Fatalf("Should have been a dir, seems it's not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateIfNotExistsFile(t *testing.T) {
|
||||
tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tempFolder)
|
||||
|
||||
fileToCreate := filepath.Join(tempFolder, "file/to/create")
|
||||
|
||||
if err := CreateIfNotExists(fileToCreate, false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fileinfo, err := os.Stat(fileToCreate)
|
||||
if err != nil {
|
||||
t.Fatalf("Should have create a file, got %v", err)
|
||||
}
|
||||
|
||||
if fileinfo.IsDir() {
|
||||
t.Fatalf("Should have been a file, seems it's not")
|
||||
}
|
||||
}
|
39
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go
generated
vendored
Normal file
39
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package homedir
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user"
|
||||
)
|
||||
|
||||
// Key returns the env var name for the user's home dir based on
|
||||
// the platform being run on
|
||||
func Key() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return "USERPROFILE"
|
||||
}
|
||||
return "HOME"
|
||||
}
|
||||
|
||||
// Get returns the home directory of the current user with the help of
|
||||
// environment variables depending on the target operating system.
|
||||
// Returned path should be used with "path/filepath" to form new paths.
|
||||
func Get() string {
|
||||
home := os.Getenv(Key())
|
||||
if home == "" && runtime.GOOS != "windows" {
|
||||
if u, err := user.CurrentUser(); err == nil {
|
||||
return u.Home
|
||||
}
|
||||
}
|
||||
return home
|
||||
}
|
||||
|
||||
// GetShortcutString returns the string that is shortcut to user's home directory
|
||||
// in the native shell of the platform running on.
|
||||
func GetShortcutString() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return "%USERPROFILE%" // be careful while using in format functions
|
||||
}
|
||||
return "~"
|
||||
}
|
24
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go
generated
vendored
Normal file
24
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
package homedir
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
home := Get()
|
||||
if home == "" {
|
||||
t.Fatal("returned home directory is empty")
|
||||
}
|
||||
|
||||
if !filepath.IsAbs(home) {
|
||||
t.Fatalf("returned path is not absolute: %s", home)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetShortcutString(t *testing.T) {
|
||||
shortcut := GetShortcutString()
|
||||
if shortcut == "" {
|
||||
t.Fatal("returned shortcut string is empty")
|
||||
}
|
||||
}
|
14
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
package ioutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// FprintfIfNotEmpty prints the string value if it's not empty
|
||||
func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
|
||||
if value != "" {
|
||||
return fmt.Fprintf(w, format, value)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
17
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go
generated
vendored
Normal file
17
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
package ioutils
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestFprintfIfNotEmpty(t *testing.T) {
|
||||
wc := NewWriteCounter(&NopWriter{})
|
||||
n, _ := FprintfIfNotEmpty(wc, "foo%s", "")
|
||||
|
||||
if wc.Count != 0 || n != 0 {
|
||||
t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n)
|
||||
}
|
||||
|
||||
n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar")
|
||||
if wc.Count != 6 || n != 6 {
|
||||
t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n)
|
||||
}
|
||||
}
|
226
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go
generated
vendored
Normal file
226
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go
generated
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
package ioutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
type pos struct {
|
||||
idx int
|
||||
offset int64
|
||||
}
|
||||
|
||||
type multiReadSeeker struct {
|
||||
readers []io.ReadSeeker
|
||||
pos *pos
|
||||
posIdx map[io.ReadSeeker]int
|
||||
}
|
||||
|
||||
func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
var tmpOffset int64
|
||||
switch whence {
|
||||
case os.SEEK_SET:
|
||||
for i, rdr := range r.readers {
|
||||
// get size of the current reader
|
||||
s, err := rdr.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if offset > tmpOffset+s {
|
||||
if i == len(r.readers)-1 {
|
||||
rdrOffset := s + (offset - tmpOffset)
|
||||
if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
r.pos = &pos{i, rdrOffset}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
tmpOffset += s
|
||||
continue
|
||||
}
|
||||
|
||||
rdrOffset := offset - tmpOffset
|
||||
idx := i
|
||||
|
||||
rdr.Seek(rdrOffset, os.SEEK_SET)
|
||||
// make sure all following readers are at 0
|
||||
for _, rdr := range r.readers[i+1:] {
|
||||
rdr.Seek(0, os.SEEK_SET)
|
||||
}
|
||||
|
||||
if rdrOffset == s && i != len(r.readers)-1 {
|
||||
idx += 1
|
||||
rdrOffset = 0
|
||||
}
|
||||
r.pos = &pos{idx, rdrOffset}
|
||||
return offset, nil
|
||||
}
|
||||
case os.SEEK_END:
|
||||
for _, rdr := range r.readers {
|
||||
s, err := rdr.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
tmpOffset += s
|
||||
}
|
||||
r.Seek(tmpOffset+offset, os.SEEK_SET)
|
||||
return tmpOffset + offset, nil
|
||||
case os.SEEK_CUR:
|
||||
if r.pos == nil {
|
||||
return r.Seek(offset, os.SEEK_SET)
|
||||
}
|
||||
// Just return the current offset
|
||||
if offset == 0 {
|
||||
return r.getCurOffset()
|
||||
}
|
||||
|
||||
curOffset, err := r.getCurOffset()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
r.pos = &pos{r.posIdx[rdr], rdrOffset}
|
||||
return curOffset + offset, nil
|
||||
default:
|
||||
return -1, fmt.Errorf("Invalid whence: %d", whence)
|
||||
}
|
||||
|
||||
return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset)
|
||||
}
|
||||
|
||||
func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) {
|
||||
var rdr io.ReadSeeker
|
||||
var rdrOffset int64
|
||||
|
||||
for i, rdr := range r.readers {
|
||||
offsetTo, err := r.getOffsetToReader(rdr)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
if offsetTo > offset {
|
||||
rdr = r.readers[i-1]
|
||||
rdrOffset = offsetTo - offset
|
||||
break
|
||||
}
|
||||
|
||||
if rdr == r.readers[len(r.readers)-1] {
|
||||
rdrOffset = offsetTo + offset
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return rdr, rdrOffset, nil
|
||||
}
|
||||
|
||||
func (r *multiReadSeeker) getCurOffset() (int64, error) {
|
||||
var totalSize int64
|
||||
for _, rdr := range r.readers[:r.pos.idx+1] {
|
||||
if r.posIdx[rdr] == r.pos.idx {
|
||||
totalSize += r.pos.offset
|
||||
break
|
||||
}
|
||||
|
||||
size, err := getReadSeekerSize(rdr)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error getting seeker size: %v", err)
|
||||
}
|
||||
totalSize += size
|
||||
}
|
||||
return totalSize, nil
|
||||
}
|
||||
|
||||
func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) {
|
||||
var offset int64
|
||||
for _, r := range r.readers {
|
||||
if r == rdr {
|
||||
break
|
||||
}
|
||||
|
||||
size, err := getReadSeekerSize(rdr)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
offset += size
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (r *multiReadSeeker) Read(b []byte) (int, error) {
|
||||
if r.pos == nil {
|
||||
r.pos = &pos{0, 0}
|
||||
}
|
||||
|
||||
bCap := int64(cap(b))
|
||||
buf := bytes.NewBuffer(nil)
|
||||
var rdr io.ReadSeeker
|
||||
|
||||
for _, rdr = range r.readers[r.pos.idx:] {
|
||||
readBytes, err := io.CopyN(buf, rdr, bCap)
|
||||
if err != nil && err != io.EOF {
|
||||
return -1, err
|
||||
}
|
||||
bCap -= readBytes
|
||||
|
||||
if bCap == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
rdrPos, err := rdr.Seek(0, os.SEEK_CUR)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
r.pos = &pos{r.posIdx[rdr], rdrPos}
|
||||
return buf.Read(b)
|
||||
}
|
||||
|
||||
func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) {
|
||||
// save the current position
|
||||
pos, err := rdr.Seek(0, os.SEEK_CUR)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// get the size
|
||||
size, err := rdr.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// reset the position
|
||||
if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided
|
||||
// input readseekers. After calling this method the initial position is set to the
|
||||
// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances
|
||||
// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker.
|
||||
// Seek can be used over the sum of lengths of all readseekers.
|
||||
//
|
||||
// When a MultiReadSeeker is used, no Read and Seek operations should be made on
|
||||
// its ReadSeeker components. Also, users should make no assumption on the state
|
||||
// of individual readseekers while the MultiReadSeeker is used.
|
||||
func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
|
||||
if len(readers) == 1 {
|
||||
return readers[0]
|
||||
}
|
||||
idx := make(map[io.ReadSeeker]int)
|
||||
for i, rdr := range readers {
|
||||
idx[rdr] = i
|
||||
}
|
||||
return &multiReadSeeker{
|
||||
readers: readers,
|
||||
posIdx: idx,
|
||||
}
|
||||
}
|
149
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go
generated
vendored
Normal file
149
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go
generated
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
package ioutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMultiReadSeekerReadAll(t *testing.T) {
|
||||
str := "hello world"
|
||||
s1 := strings.NewReader(str + " 1")
|
||||
s2 := strings.NewReader(str + " 2")
|
||||
s3 := strings.NewReader(str + " 3")
|
||||
mr := MultiReadSeeker(s1, s2, s3)
|
||||
|
||||
expectedSize := int64(s1.Len() + s2.Len() + s3.Len())
|
||||
|
||||
b, err := ioutil.ReadAll(mr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := "hello world 1hello world 2hello world 3"
|
||||
if string(b) != expected {
|
||||
t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected)
|
||||
}
|
||||
|
||||
size, err := mr.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if size != expectedSize {
|
||||
t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize)
|
||||
}
|
||||
|
||||
// Reset the position and read again
|
||||
pos, err := mr.Seek(0, os.SEEK_SET)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if pos != 0 {
|
||||
t.Fatalf("expected position to be set to 0, got %d", pos)
|
||||
}
|
||||
|
||||
b, err = ioutil.ReadAll(mr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(b) != expected {
|
||||
t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReadSeekerReadEach(t *testing.T) {
|
||||
str := "hello world"
|
||||
s1 := strings.NewReader(str + " 1")
|
||||
s2 := strings.NewReader(str + " 2")
|
||||
s3 := strings.NewReader(str + " 3")
|
||||
mr := MultiReadSeeker(s1, s2, s3)
|
||||
|
||||
var totalBytes int64
|
||||
for i, s := range []*strings.Reader{s1, s2, s3} {
|
||||
sLen := int64(s.Len())
|
||||
buf := make([]byte, s.Len())
|
||||
expected := []byte(fmt.Sprintf("%s %d", str, i+1))
|
||||
|
||||
if _, err := mr.Read(buf); err != nil && err != io.EOF {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf, expected) {
|
||||
t.Fatalf("expected %q to be %q", string(buf), string(expected))
|
||||
}
|
||||
|
||||
pos, err := mr.Seek(0, os.SEEK_CUR)
|
||||
if err != nil {
|
||||
t.Fatalf("iteration: %d, error: %v", i+1, err)
|
||||
}
|
||||
|
||||
// check that the total bytes read is the current position of the seeker
|
||||
totalBytes += sLen
|
||||
if pos != totalBytes {
|
||||
t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1)
|
||||
}
|
||||
|
||||
// This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well
|
||||
newPos, err := mr.Seek(pos, os.SEEK_SET)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if newPos != pos {
|
||||
t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReadSeekerReadSpanningChunks(t *testing.T) {
|
||||
str := "hello world"
|
||||
s1 := strings.NewReader(str + " 1")
|
||||
s2 := strings.NewReader(str + " 2")
|
||||
s3 := strings.NewReader(str + " 3")
|
||||
mr := MultiReadSeeker(s1, s2, s3)
|
||||
|
||||
buf := make([]byte, s1.Len()+3)
|
||||
_, err := mr.Read(buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string
|
||||
expected := "hello world 1hel"
|
||||
if string(buf) != expected {
|
||||
t.Fatalf("expected %s to be %s", string(buf), expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReadSeekerNegativeSeek(t *testing.T) {
|
||||
str := "hello world"
|
||||
s1 := strings.NewReader(str + " 1")
|
||||
s2 := strings.NewReader(str + " 2")
|
||||
s3 := strings.NewReader(str + " 3")
|
||||
mr := MultiReadSeeker(s1, s2, s3)
|
||||
|
||||
s1Len := s1.Len()
|
||||
s2Len := s2.Len()
|
||||
s3Len := s3.Len()
|
||||
|
||||
s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s != int64(s1Len+s2Len) {
|
||||
t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len())
|
||||
}
|
||||
|
||||
buf := make([]byte, s3Len)
|
||||
if _, err := mr.Read(buf); err != nil && err != io.EOF {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := fmt.Sprintf("%s %d", str, 3)
|
||||
if string(buf) != fmt.Sprintf("%s %d", str, 3) {
|
||||
t.Fatalf("expected %q to be %q", string(buf), expected)
|
||||
}
|
||||
}
|
@@ -189,6 +189,7 @@ func (r *bufReader) drain() {
|
||||
reuseCount++
|
||||
r.wait.Signal()
|
||||
r.Unlock()
|
||||
callSchedulerIfNecessary()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
@@ -225,3 +226,29 @@ func HashData(src io.Reader) (string, error) {
|
||||
}
|
||||
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
type OnEOFReader struct {
|
||||
Rc io.ReadCloser
|
||||
Fn func()
|
||||
}
|
||||
|
||||
func (r *OnEOFReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.Rc.Read(p)
|
||||
if err == io.EOF {
|
||||
r.runFunc()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *OnEOFReader) Close() error {
|
||||
err := r.Rc.Close()
|
||||
r.runFunc()
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *OnEOFReader) runFunc() {
|
||||
if fn := r.Fn; fn != nil {
|
||||
fn()
|
||||
r.Fn = nil
|
||||
}
|
||||
}
|
@@ -43,10 +43,9 @@ func TestReaderErrWrapperReadOnError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReaderErrWrapperRead(t *testing.T) {
|
||||
called := false
|
||||
reader := strings.NewReader("a string reader.")
|
||||
wrapper := NewReaderErrWrapper(reader, func() {
|
||||
called = true // Should not be called
|
||||
t.Fatalf("readErrWrapper should not have called the anonymous function")
|
||||
})
|
||||
// Read 20 byte (should be ok with the string above)
|
||||
num, err := wrapper.Read(make([]byte, 20))
|
6
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go
generated
vendored
Normal file
6
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
// +build !gccgo
|
||||
|
||||
package ioutils
|
||||
|
||||
func callSchedulerIfNecessary() {
|
||||
}
|
13
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go
generated
vendored
Normal file
13
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// +build gccgo
|
||||
|
||||
package ioutils
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func callSchedulerIfNecessary() {
|
||||
//allow or force Go scheduler to switch context, without explicitly
|
||||
//forcing this will make it hang when using gccgo implementation
|
||||
runtime.Gosched()
|
||||
}
|
27
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/LICENSE
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
40
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/README.md
generated
vendored
Normal file
40
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/README.md
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
Package mflag (aka multiple-flag) implements command-line flag parsing.
|
||||
It's an **hacky** fork of the [official golang package](http://golang.org/pkg/flag/)
|
||||
|
||||
It adds:
|
||||
|
||||
* both short and long flag version
|
||||
`./example -s red` `./example --string blue`
|
||||
|
||||
* multiple names for the same option
|
||||
```
|
||||
$>./example -h
|
||||
Usage of example:
|
||||
-s, --string="": a simple string
|
||||
```
|
||||
|
||||
___
|
||||
It is very flexible on purpose, so you can do things like:
|
||||
```
|
||||
$>./example -h
|
||||
Usage of example:
|
||||
-s, -string, --string="": a simple string
|
||||
```
|
||||
|
||||
Or:
|
||||
```
|
||||
$>./example -h
|
||||
Usage of example:
|
||||
-oldflag, --newflag="": a simple string
|
||||
```
|
||||
|
||||
You can also hide some flags from the usage, so if we want only `--newflag`:
|
||||
```
|
||||
$>./example -h
|
||||
Usage of example:
|
||||
--newflag="": a simple string
|
||||
$>./example -oldflag str
|
||||
str
|
||||
```
|
||||
|
||||
See [example.go](example/example.go) for more details.
|
1201
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag.go
generated
vendored
Normal file
1201
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
516
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag_test.go
generated
vendored
Normal file
516
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag_test.go
generated
vendored
Normal file
@@ -0,0 +1,516 @@
|
||||
// Copyright 2014-2015 The Docker & Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mflag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ResetForTesting clears all flag state and sets the usage function as directed.
|
||||
// After calling ResetForTesting, parse errors in flag handling will not
|
||||
// exit the program.
|
||||
func ResetForTesting(usage func()) {
|
||||
CommandLine = NewFlagSet(os.Args[0], ContinueOnError)
|
||||
Usage = usage
|
||||
}
|
||||
func boolString(s string) string {
|
||||
if s == "0" {
|
||||
return "false"
|
||||
}
|
||||
return "true"
|
||||
}
|
||||
|
||||
func TestEverything(t *testing.T) {
|
||||
ResetForTesting(nil)
|
||||
Bool([]string{"test_bool"}, false, "bool value")
|
||||
Int([]string{"test_int"}, 0, "int value")
|
||||
Int64([]string{"test_int64"}, 0, "int64 value")
|
||||
Uint([]string{"test_uint"}, 0, "uint value")
|
||||
Uint64([]string{"test_uint64"}, 0, "uint64 value")
|
||||
String([]string{"test_string"}, "0", "string value")
|
||||
Float64([]string{"test_float64"}, 0, "float64 value")
|
||||
Duration([]string{"test_duration"}, 0, "time.Duration value")
|
||||
|
||||
m := make(map[string]*Flag)
|
||||
desired := "0"
|
||||
visitor := func(f *Flag) {
|
||||
for _, name := range f.Names {
|
||||
if len(name) > 5 && name[0:5] == "test_" {
|
||||
m[name] = f
|
||||
ok := false
|
||||
switch {
|
||||
case f.Value.String() == desired:
|
||||
ok = true
|
||||
case name == "test_bool" && f.Value.String() == boolString(desired):
|
||||
ok = true
|
||||
case name == "test_duration" && f.Value.String() == desired+"s":
|
||||
ok = true
|
||||
}
|
||||
if !ok {
|
||||
t.Error("Visit: bad value", f.Value.String(), "for", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
VisitAll(visitor)
|
||||
if len(m) != 8 {
|
||||
t.Error("VisitAll misses some flags")
|
||||
for k, v := range m {
|
||||
t.Log(k, *v)
|
||||
}
|
||||
}
|
||||
m = make(map[string]*Flag)
|
||||
Visit(visitor)
|
||||
if len(m) != 0 {
|
||||
t.Errorf("Visit sees unset flags")
|
||||
for k, v := range m {
|
||||
t.Log(k, *v)
|
||||
}
|
||||
}
|
||||
// Now set all flags
|
||||
Set("test_bool", "true")
|
||||
Set("test_int", "1")
|
||||
Set("test_int64", "1")
|
||||
Set("test_uint", "1")
|
||||
Set("test_uint64", "1")
|
||||
Set("test_string", "1")
|
||||
Set("test_float64", "1")
|
||||
Set("test_duration", "1s")
|
||||
desired = "1"
|
||||
Visit(visitor)
|
||||
if len(m) != 8 {
|
||||
t.Error("Visit fails after set")
|
||||
for k, v := range m {
|
||||
t.Log(k, *v)
|
||||
}
|
||||
}
|
||||
// Now test they're visited in sort order.
|
||||
var flagNames []string
|
||||
Visit(func(f *Flag) {
|
||||
for _, name := range f.Names {
|
||||
flagNames = append(flagNames, name)
|
||||
}
|
||||
})
|
||||
if !sort.StringsAreSorted(flagNames) {
|
||||
t.Errorf("flag names not sorted: %v", flagNames)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
ResetForTesting(nil)
|
||||
Bool([]string{"test_bool"}, true, "bool value")
|
||||
Int([]string{"test_int"}, 1, "int value")
|
||||
Int64([]string{"test_int64"}, 2, "int64 value")
|
||||
Uint([]string{"test_uint"}, 3, "uint value")
|
||||
Uint64([]string{"test_uint64"}, 4, "uint64 value")
|
||||
String([]string{"test_string"}, "5", "string value")
|
||||
Float64([]string{"test_float64"}, 6, "float64 value")
|
||||
Duration([]string{"test_duration"}, 7, "time.Duration value")
|
||||
|
||||
visitor := func(f *Flag) {
|
||||
for _, name := range f.Names {
|
||||
if len(name) > 5 && name[0:5] == "test_" {
|
||||
g, ok := f.Value.(Getter)
|
||||
if !ok {
|
||||
t.Errorf("Visit: value does not satisfy Getter: %T", f.Value)
|
||||
return
|
||||
}
|
||||
switch name {
|
||||
case "test_bool":
|
||||
ok = g.Get() == true
|
||||
case "test_int":
|
||||
ok = g.Get() == int(1)
|
||||
case "test_int64":
|
||||
ok = g.Get() == int64(2)
|
||||
case "test_uint":
|
||||
ok = g.Get() == uint(3)
|
||||
case "test_uint64":
|
||||
ok = g.Get() == uint64(4)
|
||||
case "test_string":
|
||||
ok = g.Get() == "5"
|
||||
case "test_float64":
|
||||
ok = g.Get() == float64(6)
|
||||
case "test_duration":
|
||||
ok = g.Get() == time.Duration(7)
|
||||
}
|
||||
if !ok {
|
||||
t.Errorf("Visit: bad value %T(%v) for %s", g.Get(), g.Get(), name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
VisitAll(visitor)
|
||||
}
|
||||
|
||||
func testParse(f *FlagSet, t *testing.T) {
|
||||
if f.Parsed() {
|
||||
t.Error("f.Parse() = true before Parse")
|
||||
}
|
||||
boolFlag := f.Bool([]string{"bool"}, false, "bool value")
|
||||
bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value")
|
||||
f.Bool([]string{"bool3"}, false, "bool3 value")
|
||||
bool4Flag := f.Bool([]string{"bool4"}, false, "bool4 value")
|
||||
intFlag := f.Int([]string{"-int"}, 0, "int value")
|
||||
int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value")
|
||||
uintFlag := f.Uint([]string{"uint"}, 0, "uint value")
|
||||
uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value")
|
||||
stringFlag := f.String([]string{"string"}, "0", "string value")
|
||||
f.String([]string{"string2"}, "0", "string2 value")
|
||||
singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value")
|
||||
doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value")
|
||||
mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value")
|
||||
mixed2QuoteFlag := f.String([]string{"mquote2"}, "", "mixed2 quoted value")
|
||||
nestedQuoteFlag := f.String([]string{"nquote"}, "", "nested quoted value")
|
||||
nested2QuoteFlag := f.String([]string{"nquote2"}, "", "nested2 quoted value")
|
||||
float64Flag := f.Float64([]string{"float64"}, 0, "float64 value")
|
||||
durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value")
|
||||
extra := "one-extra-argument"
|
||||
args := []string{
|
||||
"-bool",
|
||||
"-bool2=true",
|
||||
"-bool4=false",
|
||||
"--int", "22",
|
||||
"--int64", "0x23",
|
||||
"-uint", "24",
|
||||
"--uint64", "25",
|
||||
"-string", "hello",
|
||||
"-squote='single'",
|
||||
`-dquote="double"`,
|
||||
`-mquote='mixed"`,
|
||||
`-mquote2="mixed2'`,
|
||||
`-nquote="'single nested'"`,
|
||||
`-nquote2='"double nested"'`,
|
||||
"-float64", "2718e28",
|
||||
"-duration", "2m",
|
||||
extra,
|
||||
}
|
||||
if err := f.Parse(args); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !f.Parsed() {
|
||||
t.Error("f.Parse() = false after Parse")
|
||||
}
|
||||
if *boolFlag != true {
|
||||
t.Error("bool flag should be true, is ", *boolFlag)
|
||||
}
|
||||
if *bool2Flag != true {
|
||||
t.Error("bool2 flag should be true, is ", *bool2Flag)
|
||||
}
|
||||
if !f.IsSet("bool2") {
|
||||
t.Error("bool2 should be marked as set")
|
||||
}
|
||||
if f.IsSet("bool3") {
|
||||
t.Error("bool3 should not be marked as set")
|
||||
}
|
||||
if !f.IsSet("bool4") {
|
||||
t.Error("bool4 should be marked as set")
|
||||
}
|
||||
if *bool4Flag != false {
|
||||
t.Error("bool4 flag should be false, is ", *bool4Flag)
|
||||
}
|
||||
if *intFlag != 22 {
|
||||
t.Error("int flag should be 22, is ", *intFlag)
|
||||
}
|
||||
if *int64Flag != 0x23 {
|
||||
t.Error("int64 flag should be 0x23, is ", *int64Flag)
|
||||
}
|
||||
if *uintFlag != 24 {
|
||||
t.Error("uint flag should be 24, is ", *uintFlag)
|
||||
}
|
||||
if *uint64Flag != 25 {
|
||||
t.Error("uint64 flag should be 25, is ", *uint64Flag)
|
||||
}
|
||||
if *stringFlag != "hello" {
|
||||
t.Error("string flag should be `hello`, is ", *stringFlag)
|
||||
}
|
||||
if !f.IsSet("string") {
|
||||
t.Error("string flag should be marked as set")
|
||||
}
|
||||
if f.IsSet("string2") {
|
||||
t.Error("string2 flag should not be marked as set")
|
||||
}
|
||||
if *singleQuoteFlag != "single" {
|
||||
t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag)
|
||||
}
|
||||
if *doubleQuoteFlag != "double" {
|
||||
t.Error("double quote string flag should be `double`, is ", *doubleQuoteFlag)
|
||||
}
|
||||
if *mixedQuoteFlag != `'mixed"` {
|
||||
t.Error("mixed quote string flag should be `'mixed\"`, is ", *mixedQuoteFlag)
|
||||
}
|
||||
if *mixed2QuoteFlag != `"mixed2'` {
|
||||
t.Error("mixed2 quote string flag should be `\"mixed2'`, is ", *mixed2QuoteFlag)
|
||||
}
|
||||
if *nestedQuoteFlag != "'single nested'" {
|
||||
t.Error("nested quote string flag should be `'single nested'`, is ", *nestedQuoteFlag)
|
||||
}
|
||||
if *nested2QuoteFlag != `"double nested"` {
|
||||
t.Error("double quote string flag should be `\"double nested\"`, is ", *nested2QuoteFlag)
|
||||
}
|
||||
if *float64Flag != 2718e28 {
|
||||
t.Error("float64 flag should be 2718e28, is ", *float64Flag)
|
||||
}
|
||||
if *durationFlag != 2*time.Minute {
|
||||
t.Error("duration flag should be 2m, is ", *durationFlag)
|
||||
}
|
||||
if len(f.Args()) != 1 {
|
||||
t.Error("expected one argument, got", len(f.Args()))
|
||||
} else if f.Args()[0] != extra {
|
||||
t.Errorf("expected argument %q got %q", extra, f.Args()[0])
|
||||
}
|
||||
}
|
||||
|
||||
func testPanic(f *FlagSet, t *testing.T) {
|
||||
f.Int([]string{"-int"}, 0, "int value")
|
||||
if f.Parsed() {
|
||||
t.Error("f.Parse() = true before Parse")
|
||||
}
|
||||
args := []string{
|
||||
"-int", "21",
|
||||
}
|
||||
f.Parse(args)
|
||||
}
|
||||
|
||||
func TestParsePanic(t *testing.T) {
|
||||
ResetForTesting(func() {})
|
||||
testPanic(CommandLine, t)
|
||||
}
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
ResetForTesting(func() { t.Error("bad parse") })
|
||||
testParse(CommandLine, t)
|
||||
}
|
||||
|
||||
func TestFlagSetParse(t *testing.T) {
|
||||
testParse(NewFlagSet("test", ContinueOnError), t)
|
||||
}
|
||||
|
||||
// Declare a user-defined flag type.
|
||||
type flagVar []string
|
||||
|
||||
func (f *flagVar) String() string {
|
||||
return fmt.Sprint([]string(*f))
|
||||
}
|
||||
|
||||
func (f *flagVar) Set(value string) error {
|
||||
*f = append(*f, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestUserDefined(t *testing.T) {
|
||||
var flags FlagSet
|
||||
flags.Init("test", ContinueOnError)
|
||||
var v flagVar
|
||||
flags.Var(&v, []string{"v"}, "usage")
|
||||
if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(v) != 3 {
|
||||
t.Fatal("expected 3 args; got ", len(v))
|
||||
}
|
||||
expect := "[1 2 3]"
|
||||
if v.String() != expect {
|
||||
t.Errorf("expected value %q got %q", expect, v.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Declare a user-defined boolean flag type.
|
||||
type boolFlagVar struct {
|
||||
count int
|
||||
}
|
||||
|
||||
func (b *boolFlagVar) String() string {
|
||||
return fmt.Sprintf("%d", b.count)
|
||||
}
|
||||
|
||||
func (b *boolFlagVar) Set(value string) error {
|
||||
if value == "true" {
|
||||
b.count++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *boolFlagVar) IsBoolFlag() bool {
|
||||
return b.count < 4
|
||||
}
|
||||
|
||||
func TestUserDefinedBool(t *testing.T) {
|
||||
var flags FlagSet
|
||||
flags.Init("test", ContinueOnError)
|
||||
var b boolFlagVar
|
||||
var err error
|
||||
flags.Var(&b, []string{"b"}, "usage")
|
||||
if err = flags.Parse([]string{"-b", "-b", "-b", "-b=true", "-b=false", "-b", "barg", "-b"}); err != nil {
|
||||
if b.count < 4 {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
if b.count != 4 {
|
||||
t.Errorf("want: %d; got: %d", 4, b.count)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
t.Error("expected error; got none")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetOutput(t *testing.T) {
|
||||
var flags FlagSet
|
||||
var buf bytes.Buffer
|
||||
flags.SetOutput(&buf)
|
||||
flags.Init("test", ContinueOnError)
|
||||
flags.Parse([]string{"-unknown"})
|
||||
if out := buf.String(); !strings.Contains(out, "-unknown") {
|
||||
t.Logf("expected output mentioning unknown; got %q", out)
|
||||
}
|
||||
}
|
||||
|
||||
// This tests that one can reset the flags. This still works but not well, and is
|
||||
// superseded by FlagSet.
|
||||
func TestChangingArgs(t *testing.T) {
|
||||
ResetForTesting(func() { t.Fatal("bad parse") })
|
||||
oldArgs := os.Args
|
||||
defer func() { os.Args = oldArgs }()
|
||||
os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"}
|
||||
before := Bool([]string{"before"}, false, "")
|
||||
if err := CommandLine.Parse(os.Args[1:]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cmd := Arg(0)
|
||||
os.Args = Args()
|
||||
after := Bool([]string{"after"}, false, "")
|
||||
Parse()
|
||||
args := Args()
|
||||
|
||||
if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" {
|
||||
t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that -help invokes the usage message and returns ErrHelp.
|
||||
func TestHelp(t *testing.T) {
|
||||
var helpCalled = false
|
||||
fs := NewFlagSet("help test", ContinueOnError)
|
||||
fs.Usage = func() { helpCalled = true }
|
||||
var flag bool
|
||||
fs.BoolVar(&flag, []string{"flag"}, false, "regular flag")
|
||||
// Regular flag invocation should work
|
||||
err := fs.Parse([]string{"-flag=true"})
|
||||
if err != nil {
|
||||
t.Fatal("expected no error; got ", err)
|
||||
}
|
||||
if !flag {
|
||||
t.Error("flag was not set by -flag")
|
||||
}
|
||||
if helpCalled {
|
||||
t.Error("help called for regular flag")
|
||||
helpCalled = false // reset for next test
|
||||
}
|
||||
// Help flag should work as expected.
|
||||
err = fs.Parse([]string{"-help"})
|
||||
if err == nil {
|
||||
t.Fatal("error expected")
|
||||
}
|
||||
if err != ErrHelp {
|
||||
t.Fatal("expected ErrHelp; got ", err)
|
||||
}
|
||||
if !helpCalled {
|
||||
t.Fatal("help was not called")
|
||||
}
|
||||
// If we define a help flag, that should override.
|
||||
var help bool
|
||||
fs.BoolVar(&help, []string{"help"}, false, "help flag")
|
||||
helpCalled = false
|
||||
err = fs.Parse([]string{"-help"})
|
||||
if err != nil {
|
||||
t.Fatal("expected no error for defined -help; got ", err)
|
||||
}
|
||||
if helpCalled {
|
||||
t.Fatal("help was called; should not have been for defined help flag")
|
||||
}
|
||||
}
|
||||
|
||||
// Test the flag count functions.
|
||||
func TestFlagCounts(t *testing.T) {
|
||||
fs := NewFlagSet("help test", ContinueOnError)
|
||||
var flag bool
|
||||
fs.BoolVar(&flag, []string{"flag1"}, false, "regular flag")
|
||||
fs.BoolVar(&flag, []string{"#deprecated1"}, false, "regular flag")
|
||||
fs.BoolVar(&flag, []string{"f", "flag2"}, false, "regular flag")
|
||||
fs.BoolVar(&flag, []string{"#d", "#deprecated2"}, false, "regular flag")
|
||||
fs.BoolVar(&flag, []string{"flag3"}, false, "regular flag")
|
||||
fs.BoolVar(&flag, []string{"g", "#flag4", "-flag4"}, false, "regular flag")
|
||||
|
||||
if fs.FlagCount() != 6 {
|
||||
t.Fatal("FlagCount wrong. ", fs.FlagCount())
|
||||
}
|
||||
if fs.FlagCountUndeprecated() != 4 {
|
||||
t.Fatal("FlagCountUndeprecated wrong. ", fs.FlagCountUndeprecated())
|
||||
}
|
||||
if fs.NFlag() != 0 {
|
||||
t.Fatal("NFlag wrong. ", fs.NFlag())
|
||||
}
|
||||
err := fs.Parse([]string{"-fd", "-g", "-flag4"})
|
||||
if err != nil {
|
||||
t.Fatal("expected no error for defined -help; got ", err)
|
||||
}
|
||||
if fs.NFlag() != 4 {
|
||||
t.Fatal("NFlag wrong. ", fs.NFlag())
|
||||
}
|
||||
}
|
||||
|
||||
// Show up bug in sortFlags
|
||||
func TestSortFlags(t *testing.T) {
|
||||
fs := NewFlagSet("help TestSortFlags", ContinueOnError)
|
||||
|
||||
var err error
|
||||
|
||||
var b bool
|
||||
fs.BoolVar(&b, []string{"b", "-banana"}, false, "usage")
|
||||
|
||||
err = fs.Parse([]string{"--banana=true"})
|
||||
if err != nil {
|
||||
t.Fatal("expected no error; got ", err)
|
||||
}
|
||||
|
||||
count := 0
|
||||
|
||||
fs.VisitAll(func(flag *Flag) {
|
||||
count++
|
||||
if flag == nil {
|
||||
t.Fatal("VisitAll should not return a nil flag")
|
||||
}
|
||||
})
|
||||
flagcount := fs.FlagCount()
|
||||
if flagcount != count {
|
||||
t.Fatalf("FlagCount (%d) != number (%d) of elements visited", flagcount, count)
|
||||
}
|
||||
// Make sure its idempotent
|
||||
if flagcount != fs.FlagCount() {
|
||||
t.Fatalf("FlagCount (%d) != fs.FlagCount() (%d) of elements visited", flagcount, fs.FlagCount())
|
||||
}
|
||||
|
||||
count = 0
|
||||
fs.Visit(func(flag *Flag) {
|
||||
count++
|
||||
if flag == nil {
|
||||
t.Fatal("Visit should not return a nil flag")
|
||||
}
|
||||
})
|
||||
nflag := fs.NFlag()
|
||||
if nflag != count {
|
||||
t.Fatalf("NFlag (%d) != number (%d) of elements visited", nflag, count)
|
||||
}
|
||||
if nflag != fs.NFlag() {
|
||||
t.Fatalf("NFlag (%d) != fs.NFlag() (%d) of elements visited", nflag, fs.NFlag())
|
||||
}
|
||||
}
|
187
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go
generated
vendored
Normal file
187
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
// Package parsers provides helper functions to parse and validate different type
|
||||
// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel
|
||||
// operating system versions.
|
||||
package parsers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseHost parses the specified address and returns an address that will be used as the host.
|
||||
// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr
|
||||
// FIXME: Change this not to receive default value as parameter
|
||||
func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) {
|
||||
addr = strings.TrimSpace(addr)
|
||||
if addr == "" {
|
||||
if runtime.GOOS != "windows" {
|
||||
addr = fmt.Sprintf("unix://%s", defaultUnixAddr)
|
||||
} else {
|
||||
// Note - defaultTCPAddr already includes tcp:// prefix
|
||||
addr = defaultTCPAddr
|
||||
}
|
||||
}
|
||||
addrParts := strings.Split(addr, "://")
|
||||
if len(addrParts) == 1 {
|
||||
addrParts = []string{"tcp", addrParts[0]}
|
||||
}
|
||||
|
||||
switch addrParts[0] {
|
||||
case "tcp":
|
||||
return ParseTCPAddr(addrParts[1], defaultTCPAddr)
|
||||
case "unix":
|
||||
return ParseUnixAddr(addrParts[1], defaultUnixAddr)
|
||||
case "fd":
|
||||
return addr, nil
|
||||
default:
|
||||
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
||||
}
|
||||
}
|
||||
|
||||
// ParseUnixAddr parses and validates that the specified address is a valid UNIX
|
||||
// socket address. It returns a formatted UNIX socket address, either using the
|
||||
// address parsed from addr, or the contents of defaultAddr if addr is a blank
|
||||
// string.
|
||||
func ParseUnixAddr(addr string, defaultAddr string) (string, error) {
|
||||
addr = strings.TrimPrefix(addr, "unix://")
|
||||
if strings.Contains(addr, "://") {
|
||||
return "", fmt.Errorf("Invalid proto, expected unix: %s", addr)
|
||||
}
|
||||
if addr == "" {
|
||||
addr = defaultAddr
|
||||
}
|
||||
return fmt.Sprintf("unix://%s", addr), nil
|
||||
}
|
||||
|
||||
// ParseTCPAddr parses and validates that the specified address is a valid TCP
|
||||
// address. It returns a formatted TCP address, either using the address parsed
|
||||
// from addr, or the contents of defaultAddr if addr is a blank string.
|
||||
func ParseTCPAddr(addr string, defaultAddr string) (string, error) {
|
||||
addr = strings.TrimPrefix(addr, "tcp://")
|
||||
if strings.Contains(addr, "://") || addr == "" {
|
||||
return "", fmt.Errorf("Invalid proto, expected tcp: %s", addr)
|
||||
}
|
||||
|
||||
u, err := url.Parse("tcp://" + addr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
hostParts := strings.Split(u.Host, ":")
|
||||
if len(hostParts) != 2 {
|
||||
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
||||
}
|
||||
host := hostParts[0]
|
||||
if host == "" {
|
||||
host = defaultAddr
|
||||
}
|
||||
|
||||
p, err := strconv.Atoi(hostParts[1])
|
||||
if err != nil && p == 0 {
|
||||
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
||||
}
|
||||
return fmt.Sprintf("tcp://%s:%d%s", host, p, u.Path), nil
|
||||
}
|
||||
|
||||
// ParseRepositoryTag gets a repos name and returns the right reposName + tag|digest
|
||||
// The tag can be confusing because of a port in a repository name.
|
||||
// Ex: localhost.localdomain:5000/samalba/hipache:latest
|
||||
// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb
|
||||
func ParseRepositoryTag(repos string) (string, string) {
|
||||
n := strings.Index(repos, "@")
|
||||
if n >= 0 {
|
||||
parts := strings.Split(repos, "@")
|
||||
return parts[0], parts[1]
|
||||
}
|
||||
n = strings.LastIndex(repos, ":")
|
||||
if n < 0 {
|
||||
return repos, ""
|
||||
}
|
||||
if tag := repos[n+1:]; !strings.Contains(tag, "/") {
|
||||
return repos[:n], tag
|
||||
}
|
||||
return repos, ""
|
||||
}
|
||||
|
||||
// PartParser parses and validates the specified string (data) using the specified template
|
||||
// e.g. ip:public:private -> 192.168.0.1:80:8000
|
||||
func PartParser(template, data string) (map[string]string, error) {
|
||||
// ip:public:private
|
||||
var (
|
||||
templateParts = strings.Split(template, ":")
|
||||
parts = strings.Split(data, ":")
|
||||
out = make(map[string]string, len(templateParts))
|
||||
)
|
||||
if len(parts) != len(templateParts) {
|
||||
return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template)
|
||||
}
|
||||
|
||||
for i, t := range templateParts {
|
||||
value := ""
|
||||
if len(parts) > i {
|
||||
value = parts[i]
|
||||
}
|
||||
out[t] = value
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value)
|
||||
func ParseKeyValueOpt(opt string) (string, string, error) {
|
||||
parts := strings.SplitN(opt, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt)
|
||||
}
|
||||
return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
|
||||
}
|
||||
|
||||
// ParsePortRange parses and validates the specified string as a port-range (8000-9000)
|
||||
func ParsePortRange(ports string) (uint64, uint64, error) {
|
||||
if ports == "" {
|
||||
return 0, 0, fmt.Errorf("Empty string specified for ports.")
|
||||
}
|
||||
if !strings.Contains(ports, "-") {
|
||||
start, err := strconv.ParseUint(ports, 10, 16)
|
||||
end := start
|
||||
return start, end, err
|
||||
}
|
||||
|
||||
parts := strings.Split(ports, "-")
|
||||
start, err := strconv.ParseUint(parts[0], 10, 16)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
end, err := strconv.ParseUint(parts[1], 10, 16)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
if end < start {
|
||||
return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports)
|
||||
}
|
||||
return start, end, nil
|
||||
}
|
||||
|
||||
// ParseLink parses and validates the specified string as a link format (name:alias)
|
||||
func ParseLink(val string) (string, string, error) {
|
||||
if val == "" {
|
||||
return "", "", fmt.Errorf("empty string specified for links")
|
||||
}
|
||||
arr := strings.Split(val, ":")
|
||||
if len(arr) > 2 {
|
||||
return "", "", fmt.Errorf("bad format for links: %s", val)
|
||||
}
|
||||
if len(arr) == 1 {
|
||||
return val, val, nil
|
||||
}
|
||||
// This is kept because we can actually get an HostConfig with links
|
||||
// from an already created container and the format is not `foo:bar`
|
||||
// but `/foo:/c1/bar`
|
||||
if strings.HasPrefix(arr[0], "/") {
|
||||
_, alias := path.Split(arr[1])
|
||||
return arr[0][1:], alias, nil
|
||||
}
|
||||
return arr[0], arr[1], nil
|
||||
}
|
210
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers_test.go
generated
vendored
Normal file
210
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers_test.go
generated
vendored
Normal file
@@ -0,0 +1,210 @@
|
||||
package parsers
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseHost(t *testing.T) {
|
||||
var (
|
||||
defaultHTTPHost = "127.0.0.1"
|
||||
defaultUnix = "/var/run/docker.sock"
|
||||
)
|
||||
invalids := map[string]string{
|
||||
"0.0.0.0": "Invalid bind address format: 0.0.0.0",
|
||||
"tcp://": "Invalid proto, expected tcp: ",
|
||||
"tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d",
|
||||
"tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path",
|
||||
"udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1",
|
||||
"udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375",
|
||||
}
|
||||
valids := map[string]string{
|
||||
"0.0.0.1:5555": "tcp://0.0.0.1:5555",
|
||||
"0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path",
|
||||
":6666": "tcp://127.0.0.1:6666",
|
||||
":6666/path": "tcp://127.0.0.1:6666/path",
|
||||
"tcp://:7777": "tcp://127.0.0.1:7777",
|
||||
"tcp://:7777/path": "tcp://127.0.0.1:7777/path",
|
||||
"": "unix:///var/run/docker.sock",
|
||||
"unix:///run/docker.sock": "unix:///run/docker.sock",
|
||||
"unix://": "unix:///var/run/docker.sock",
|
||||
"fd://": "fd://",
|
||||
"fd://something": "fd://something",
|
||||
}
|
||||
for invalidAddr, expectedError := range invalids {
|
||||
if addr, err := ParseHost(defaultHTTPHost, defaultUnix, invalidAddr); err == nil || err.Error() != expectedError {
|
||||
t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr)
|
||||
}
|
||||
}
|
||||
for validAddr, expectedAddr := range valids {
|
||||
if addr, err := ParseHost(defaultHTTPHost, defaultUnix, validAddr); err != nil || addr != expectedAddr {
|
||||
t.Errorf("%v -> expected %v, got %v", validAddr, expectedAddr, addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidUnixAddrInvalid(t *testing.T) {
|
||||
if _, err := ParseUnixAddr("unix://tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" {
|
||||
t.Fatalf("Expected an error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseRepositoryTag(t *testing.T) {
|
||||
if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" {
|
||||
t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag)
|
||||
}
|
||||
if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" {
|
||||
t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag)
|
||||
}
|
||||
if repo, digest := ParseRepositoryTag("root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "root" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" {
|
||||
t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "root", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest)
|
||||
}
|
||||
if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" {
|
||||
t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag)
|
||||
}
|
||||
if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" {
|
||||
t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag)
|
||||
}
|
||||
if repo, digest := ParseRepositoryTag("user/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "user/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" {
|
||||
t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "user/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest)
|
||||
}
|
||||
if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" {
|
||||
t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag)
|
||||
}
|
||||
if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" {
|
||||
t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag)
|
||||
}
|
||||
if repo, digest := ParseRepositoryTag("url:5000/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "url:5000/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" {
|
||||
t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "url:5000/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortMapping(t *testing.T) {
|
||||
if _, err := PartParser("ip:public:private", "192.168.1.1:80"); err == nil {
|
||||
t.Fatalf("Expected an error, got %v", err)
|
||||
}
|
||||
data, err := PartParser("ip:public:private", "192.168.1.1:80:8080")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(data) != 3 {
|
||||
t.FailNow()
|
||||
}
|
||||
if data["ip"] != "192.168.1.1" {
|
||||
t.Fail()
|
||||
}
|
||||
if data["public"] != "80" {
|
||||
t.Fail()
|
||||
}
|
||||
if data["private"] != "8080" {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseKeyValueOpt(t *testing.T) {
|
||||
invalids := map[string]string{
|
||||
"": "Unable to parse key/value option: ",
|
||||
"key": "Unable to parse key/value option: key",
|
||||
}
|
||||
for invalid, expectedError := range invalids {
|
||||
if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError {
|
||||
t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err)
|
||||
}
|
||||
}
|
||||
valids := map[string][]string{
|
||||
"key=value": {"key", "value"},
|
||||
" key = value ": {"key", "value"},
|
||||
"key=value1=value2": {"key", "value1=value2"},
|
||||
" key = value1 = value2 ": {"key", "value1 = value2"},
|
||||
}
|
||||
for valid, expectedKeyValue := range valids {
|
||||
key, value, err := ParseKeyValueOpt(valid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if key != expectedKeyValue[0] || value != expectedKeyValue[1] {
|
||||
t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRange(t *testing.T) {
|
||||
if start, end, err := ParsePortRange("8000-8080"); err != nil || start != 8000 || end != 8080 {
|
||||
t.Fatalf("Error: %s or Expecting {start,end} values {8000,8080} but found {%d,%d}.", err, start, end)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRangeEmpty(t *testing.T) {
|
||||
if _, _, err := ParsePortRange(""); err == nil || err.Error() != "Empty string specified for ports." {
|
||||
t.Fatalf("Expected error 'Empty string specified for ports.', got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRangeWithNoRange(t *testing.T) {
|
||||
start, end, err := ParsePortRange("8080")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if start != 8080 || end != 8080 {
|
||||
t.Fatalf("Expected start and end to be the same and equal to 8080, but were %v and %v", start, end)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRangeIncorrectRange(t *testing.T) {
|
||||
if _, _, err := ParsePortRange("9000-8080"); err == nil || !strings.Contains(err.Error(), "Invalid range specified for the Port") {
|
||||
t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRangeIncorrectEndRange(t *testing.T) {
|
||||
if _, _, err := ParsePortRange("8000-a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") {
|
||||
t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
|
||||
}
|
||||
|
||||
if _, _, err := ParsePortRange("8000-30a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") {
|
||||
t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRangeIncorrectStartRange(t *testing.T) {
|
||||
if _, _, err := ParsePortRange("a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") {
|
||||
t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
|
||||
}
|
||||
|
||||
if _, _, err := ParsePortRange("30a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") {
|
||||
t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseLink(t *testing.T) {
|
||||
name, alias, err := ParseLink("name:alias")
|
||||
if err != nil {
|
||||
t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err)
|
||||
}
|
||||
if name != "name" {
|
||||
t.Fatalf("Link name should have been name, got %s instead", name)
|
||||
}
|
||||
if alias != "alias" {
|
||||
t.Fatalf("Link alias should have been alias, got %s instead", alias)
|
||||
}
|
||||
// short format definition
|
||||
name, alias, err = ParseLink("name")
|
||||
if err != nil {
|
||||
t.Fatalf("Expected not to error out on a valid name only format but got: %v", err)
|
||||
}
|
||||
if name != "name" {
|
||||
t.Fatalf("Link name should have been name, got %s instead", name)
|
||||
}
|
||||
if alias != "name" {
|
||||
t.Fatalf("Link alias should have been name, got %s instead", alias)
|
||||
}
|
||||
// empty string link definition is not allowed
|
||||
if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") {
|
||||
t.Fatalf("Expected error 'empty string specified for links' but got: %v", err)
|
||||
}
|
||||
// more than two colons are not allowed
|
||||
if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") {
|
||||
t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err)
|
||||
}
|
||||
}
|
@@ -14,18 +14,19 @@ import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils"
|
||||
)
|
||||
|
||||
var (
|
||||
// Pool which returns bufio.Reader with a 32K buffer
|
||||
// BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
|
||||
BufioReader32KPool *BufioReaderPool
|
||||
// Pool which returns bufio.Writer with a 32K buffer
|
||||
// BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
|
||||
BufioWriter32KPool *BufioWriterPool
|
||||
)
|
||||
|
||||
const buffer32K = 32 * 1024
|
||||
|
||||
// BufioReaderPool is a bufio reader that uses sync.Pool.
|
||||
type BufioReaderPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
@@ -57,6 +58,14 @@ func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
|
||||
bufPool.pool.Put(b)
|
||||
}
|
||||
|
||||
// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
|
||||
func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
|
||||
buf := BufioReader32KPool.Get(src)
|
||||
written, err = io.Copy(dst, buf)
|
||||
BufioReader32KPool.Put(buf)
|
||||
return
|
||||
}
|
||||
|
||||
// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
|
||||
// into the pool and closes the reader if it's an io.ReadCloser.
|
||||
func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
|
||||
@@ -69,6 +78,7 @@ func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Rea
|
||||
})
|
||||
}
|
||||
|
||||
// BufioWriterPool is a bufio writer that uses sync.Pool.
|
||||
type BufioWriterPool struct {
|
||||
pool sync.Pool
|
||||
}
|
162
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go
generated
vendored
Normal file
162
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
package pools
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) {
|
||||
reader := BufioReader32KPool.Get(nil)
|
||||
if reader == nil {
|
||||
t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBufioReaderPoolPutAndGet(t *testing.T) {
|
||||
sr := bufio.NewReader(strings.NewReader("foobar"))
|
||||
reader := BufioReader32KPool.Get(sr)
|
||||
if reader == nil {
|
||||
t.Fatalf("BufioReaderPool should not return a nil reader.")
|
||||
}
|
||||
// verify the first 3 byte
|
||||
buf1 := make([]byte, 3)
|
||||
_, err := reader.Read(buf1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if actual := string(buf1); actual != "foo" {
|
||||
t.Fatalf("The first letter should have been 'foo' but was %v", actual)
|
||||
}
|
||||
BufioReader32KPool.Put(reader)
|
||||
// Try to read the next 3 bytes
|
||||
_, err = sr.Read(make([]byte, 3))
|
||||
if err == nil || err != io.EOF {
|
||||
t.Fatalf("The buffer should have been empty, issue an EOF error.")
|
||||
}
|
||||
}
|
||||
|
||||
type simpleReaderCloser struct {
|
||||
io.Reader
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (r *simpleReaderCloser) Close() error {
|
||||
r.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) {
|
||||
br := bufio.NewReader(strings.NewReader(""))
|
||||
sr := &simpleReaderCloser{
|
||||
Reader: strings.NewReader("foobar"),
|
||||
closed: false,
|
||||
}
|
||||
reader := BufioReader32KPool.NewReadCloserWrapper(br, sr)
|
||||
if reader == nil {
|
||||
t.Fatalf("NewReadCloserWrapper should not return a nil reader.")
|
||||
}
|
||||
// Verify the content of reader
|
||||
buf := make([]byte, 3)
|
||||
_, err := reader.Read(buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if actual := string(buf); actual != "foo" {
|
||||
t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual)
|
||||
}
|
||||
reader.Close()
|
||||
// Read 3 more bytes "bar"
|
||||
_, err = reader.Read(buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if actual := string(buf); actual != "bar" {
|
||||
t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual)
|
||||
}
|
||||
if !sr.closed {
|
||||
t.Fatalf("The ReaderCloser should have been closed, it is not.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) {
|
||||
writer := BufioWriter32KPool.Get(nil)
|
||||
if writer == nil {
|
||||
t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBufioWriterPoolPutAndGet(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
bw := bufio.NewWriter(buf)
|
||||
writer := BufioWriter32KPool.Get(bw)
|
||||
if writer == nil {
|
||||
t.Fatalf("BufioReaderPool should not return a nil writer.")
|
||||
}
|
||||
written, err := writer.Write([]byte("foobar"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if written != 6 {
|
||||
t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written)
|
||||
}
|
||||
// Make sure we Flush all the way ?
|
||||
writer.Flush()
|
||||
bw.Flush()
|
||||
if len(buf.Bytes()) != 6 {
|
||||
t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes()))
|
||||
}
|
||||
// Reset the buffer
|
||||
buf.Reset()
|
||||
BufioWriter32KPool.Put(writer)
|
||||
// Try to write something
|
||||
written, err = writer.Write([]byte("barfoo"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// If we now try to flush it, it should panic (the writer is nil)
|
||||
// recover it
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Fatal("Trying to flush the writter should have 'paniced', did not.")
|
||||
}
|
||||
}()
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
type simpleWriterCloser struct {
|
||||
io.Writer
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (r *simpleWriterCloser) Close() error {
|
||||
r.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
bw := bufio.NewWriter(buf)
|
||||
sw := &simpleWriterCloser{
|
||||
Writer: new(bytes.Buffer),
|
||||
closed: false,
|
||||
}
|
||||
bw.Flush()
|
||||
writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw)
|
||||
if writer == nil {
|
||||
t.Fatalf("BufioReaderPool should not return a nil writer.")
|
||||
}
|
||||
written, err := writer.Write([]byte("foobar"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if written != 6 {
|
||||
t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written)
|
||||
}
|
||||
writer.Close()
|
||||
if !sw.closed {
|
||||
t.Fatalf("The ReaderCloser should have been closed, it is not.")
|
||||
}
|
||||
}
|
@@ -5,7 +5,7 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/vendor/github.com/Sirupsen/logrus"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -31,7 +31,7 @@ type StdWriter struct {
|
||||
func (w *StdWriter) Write(buf []byte) (n int, err error) {
|
||||
var n1, n2 int
|
||||
if w == nil || w.Writer == nil {
|
||||
return 0, errors.New("Writer not instanciated")
|
||||
return 0, errors.New("Writer not instantiated")
|
||||
}
|
||||
binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf)))
|
||||
n1, err = w.Writer.Write(w.prefix[:])
|
||||
@@ -47,7 +47,7 @@ func (w *StdWriter) Write(buf []byte) (n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// NewStdWriter instanciates a new Writer.
|
||||
// NewStdWriter instantiates a new Writer.
|
||||
// Everything written to it will be encapsulated using a custom format,
|
||||
// and written to the underlying `w` stream.
|
||||
// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user