Merge pull request #8491 from vmarmol/fix-isolation

Fix setting resource isolation in Docker 1.6+
This commit is contained in:
Dawn Chen
2015-05-19 11:26:27 -07:00
119 changed files with 13086 additions and 458 deletions

3
Godeps/Godeps.json generated
View File

@@ -173,7 +173,8 @@
},
{
"ImportPath": "github.com/fsouza/go-dockerclient",
"Rev": "09334c56c63bab2cd6c4ccab924d89e2419a361f"
"Comment": "0.2.1-532-g2f1ad24",
"Rev": "2f1ad24900b2777139b5becee93eb63a75b00617"
},
{
"ImportPath": "github.com/garyburd/redigo/internal",

View File

@@ -6,7 +6,5 @@ go:
env:
- GOARCH=amd64
- GOARCH=386
install:
- make testdeps
script:
- make test

View File

@@ -21,6 +21,7 @@ David Huie <dahuie@gmail.com>
Dawn Chen <dawnchen@google.com>
Ed <edrocksit@gmail.com>
Eric Anderson <anderson@copperegg.com>
Ewout Prangsma <ewout@prangsma.net>
Fabio Rehm <fgrehm@gmail.com>
Fatih Arslan <ftharsln@gmail.com>
Flavia Missi <flaviamissi@gmail.com>
@@ -37,6 +38,7 @@ Johan Euphrosine <proppy@google.com>
Kamil Domanski <kamil@domanski.co>
Karan Misra <kidoman@gmail.com>
Kim, Hirokuni <hirokuni.kim@kvh.co.jp>
Kyle Allan <kallan357@gmail.com>
liron-l <levinlir@gmail.com>
Lucas Clemente <lucas@clemente.io>
Lucas Weiblen <lucasweiblen@gmail.com>
@@ -67,6 +69,7 @@ Summer Mousa <smousa@zenoss.com>
Tarsis Azevedo <tarsis@corp.globo.com>
Tim Schindler <tim@catalyst-zero.com>
Tobi Knaup <tobi@mesosphere.io>
Victor Marmol <vmarmol@google.com>
Vincenzo Prignano <vincenzo.prignano@gmail.com>
Wiliam Souza <wiliamsouza83@gmail.com>
Ye Yin <eyniy@qq.com>

View File

@@ -2,5 +2,5 @@
Version 2.0, January 2004
http://www.apache.org/licenses/
You can find the Docker license int the following link:
https://raw2.github.com/dotcloud/docker/master/LICENSE
You can find the Docker license at the following link:
https://raw.githubusercontent.com/docker/docker/master/LICENSE

View File

@@ -1,35 +1,52 @@
.PHONY: \
all \
deps \
updatedeps \
testdeps \
updatetestdeps \
cov \
vendor \
lint \
vet \
fmt \
fmtcheck \
pretest \
test \
cov \
clean
all: test
deps:
go get -d -v ./...
vendor:
go get -v github.com/mjibson/party
party -d vendor -c -u
updatedeps:
go get -d -v -u -f ./...
lint:
go get -v github.com/golang/lint/golint
for file in $(shell git ls-files '*.go' | grep -v '^vendor/'); do \
golint $$file; \
done
testdeps:
go get -d -v -t ./...
vet:
go get -v golang.org/x/tools/cmd/vet
go vet ./...
updatetestdeps:
go get -d -v -t -u -f ./...
fmt:
gofmt -w $(shell git ls-files '*.go' | grep -v '^vendor/')
cov: testdeps
fmtcheck:
for file in $(shell git ls-files '*.go' | grep -v '^vendor/'); do \
gofmt $$file | diff -u $$file -; \
if [ -n "$$(gofmt $$file | diff -u $$file -)" ]; then\
exit 1; \
fi; \
done
pretest: lint vet fmtcheck
test: pretest
go test
go test ./testing
cov:
go get -v github.com/axw/gocov/gocov
go get golang.org/x/tools/cmd/cover
gocov test | gocov report
test: testdeps
go test ./...
./testing/bin/fmtpolice
clean:
go clean ./...

View File

@@ -1,9 +1,8 @@
#go-dockerclient
# go-dockerclient
[![Build Status](https://drone.io/github.com/fsouza/go-dockerclient/status.png)](https://drone.io/github.com/fsouza/go-dockerclient/latest)
[![Build Status](https://travis-ci.org/fsouza/go-dockerclient.png)](https://travis-ci.org/fsouza/go-dockerclient)
[![GoDoc](https://godoc.org/github.com/fsouza/go-dockerclient?status.png)](https://godoc.org/github.com/fsouza/go-dockerclient)
[![Drone](https://drone.io/github.com/fsouza/go-dockerclient/status.png)](https://drone.io/github.com/fsouza/go-dockerclient/latest)
[![Travis](https://img.shields.io/travis/fsouza/go-dockerclient.svg?style=flat-square)](https://travis-ci.org/fsouza/go-dockerclient)
[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient)
This package presents a client for the Docker remote API. It also provides
support for the extensions in the [Swarm API](https://docs.docker.com/swarm/API/).
@@ -64,6 +63,13 @@ func main() {
## Developing
You can run the tests with:
All development commands can be seen in the [Makefile](Makefile).
make test
Commited code must pass:
* [golint](https://github.com/golang/lint)
* [go vet](https://godoc.org/golang.org/x/tools/cmd/vet)
* [gofmt](https://golang.org/cmd/gofmt)
* [go test](https://golang.org/cmd/go/#hdr-Test_packages)
Running `make test` will check all of these. If your editor does not automatically call gofmt, `make fmt` will format all go files in this repository.

View File

@@ -9,7 +9,7 @@ import (
"reflect"
"testing"
"github.com/docker/docker/pkg/archive"
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/archive"
)
func TestBuildImageMultipleContextsError(t *testing.T) {

View File

@@ -23,6 +23,8 @@ import (
"reflect"
"strconv"
"strings"
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/stdcopy"
)
const userAgent = "go-dockerclient"
@@ -438,7 +440,7 @@ func (c *Client) stream(method, path string, streamOptions streamOptions) error
if streamOptions.setRawTerminal {
_, err = io.Copy(streamOptions.stdout, resp.Body)
} else {
_, err = stdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body)
_, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body)
}
return err
}
@@ -509,31 +511,37 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) error
}
rwc, br := clientconn.Hijack()
defer rwc.Close()
errs := make(chan error, 2)
errChanOut := make(chan error, 1)
errChanIn := make(chan error, 1)
exit := make(chan bool)
go func() {
defer close(exit)
defer close(errChanOut)
var err error
if hijackOptions.setRawTerminal {
// When TTY is ON, use regular copy
_, err = io.Copy(hijackOptions.stdout, br)
} else {
_, err = stdCopy(hijackOptions.stdout, hijackOptions.stderr, br)
_, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br)
}
errs <- err
errChanOut <- err
}()
go func() {
var err error
if hijackOptions.in != nil {
_, err = io.Copy(rwc, hijackOptions.in)
_, err := io.Copy(rwc, hijackOptions.in)
errChanIn <- err
}
rwc.(interface {
CloseWrite() error
}).CloseWrite()
errs <- err
}()
<-exit
return <-errs
select {
case err = <-errChanIn:
return err
case err = <-errChanOut:
return err
}
}
func (c *Client) getURL(path string) string {

View File

@@ -192,6 +192,7 @@ type Config struct {
Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"`
MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
Entrypoint []string `json:"Entrypoint,omitempty" yaml:"Entrypoint,omitempty"`
NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"`
SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"`
@@ -425,6 +426,10 @@ type HostConfig struct {
ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty"`
SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"`
CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty"`
Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"`
CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
}
// StartContainer starts a container, returning an error in case of failure.
@@ -495,7 +500,7 @@ func (c *Client) PauseContainer(id string) error {
return nil
}
// UnpauseContainer pauses the given container.
// UnpauseContainer unpauses the given container.
//
// See http://goo.gl/eBrNSL for more details.
func (c *Client) UnpauseContainer(id string) error {
@@ -543,6 +548,154 @@ func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) {
return result, nil
}
// Stats represents container statistics, returned by /containers/<id>/stats.
//
// See http://goo.gl/DFMiYD for more details.
type Stats struct {
Read time.Time `json:"read,omitempty" yaml:"read,omitempty"`
Network struct {
RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty"`
RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty"`
RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty"`
TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty"`
TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty"`
RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty"`
TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty"`
TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty"`
} `json:"network,omitempty" yaml:"network,omitempty"`
MemoryStats struct {
Stats struct {
TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty"`
Cache uint64 `json:"cache,omitempty" yaml:"cache,omitempty"`
MappedFile uint64 `json:"mapped_file,omitempty" yaml:"mapped_file,omitempty"`
TotalInactiveFile uint64 `json:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty"`
Pgpgout uint64 `json:"pgpgout,omitempty" yaml:"pgpgout,omitempty"`
Rss uint64 `json:"rss,omitempty" yaml:"rss,omitempty"`
TotalMappedFile uint64 `json:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty"`
Writeback uint64 `json:"writeback,omitempty" yaml:"writeback,omitempty"`
Unevictable uint64 `json:"unevictable,omitempty" yaml:"unevictable,omitempty"`
Pgpgin uint64 `json:"pgpgin,omitempty" yaml:"pgpgin,omitempty"`
TotalUnevictable uint64 `json:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty"`
Pgmajfault uint64 `json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty"`
TotalRss uint64 `json:"total_rss,omitempty" yaml:"total_rss,omitempty"`
TotalRssHuge uint64 `json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty"`
TotalWriteback uint64 `json:"total_writeback,omitempty" yaml:"total_writeback,omitempty"`
TotalInactiveAnon uint64 `json:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty"`
RssHuge uint64 `json:"rss_huge,omitempty" yaml:"rss_huge,omitempty"`
HierarchicalMemoryLimit uint64 `json:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty"`
TotalPgfault uint64 `json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty"`
TotalActiveFile uint64 `json:"total_active_file,omitempty" yaml:"total_active_file,omitempty"`
ActiveAnon uint64 `json:"active_anon,omitempty" yaml:"active_anon,omitempty"`
TotalActiveAnon uint64 `json:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty"`
TotalPgpgout uint64 `json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty"`
TotalCache uint64 `json:"total_cache,omitempty" yaml:"total_cache,omitempty"`
InactiveAnon uint64 `json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty"`
ActiveFile uint64 `json:"active_file,omitempty" yaml:"active_file,omitempty"`
Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty"`
InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty"`
TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty"`
} `json:"stats,omitempty" yaml:"stats,omitempty"`
MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty"`
Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty"`
Failcnt uint64 `json:"failcnt,omitempty" yaml:"failcnt,omitempty"`
Limit uint64 `json:"limit,omitempty" yaml:"limit,omitempty"`
} `json:"memory_stats,omitempty" yaml:"memory_stats,omitempty"`
BlkioStats struct {
IOServiceBytesRecursive []BlkioStatsEntry `json:"io_service_bytes_recursive,omitempty" yaml:"io_service_bytes_recursive,omitempty"`
IOServicedRecursive []BlkioStatsEntry `json:"io_serviced_recursive,omitempty" yaml:"io_serviced_recursive,omitempty"`
IOQueueRecursive []BlkioStatsEntry `json:"io_queue_recursive,omitempty" yaml:"io_queue_recursive,omitempty"`
IOServiceTimeRecursive []BlkioStatsEntry `json:"io_service_time_recursive,omitempty" yaml:"io_service_time_recursive,omitempty"`
IOWaitTimeRecursive []BlkioStatsEntry `json:"io_wait_time_recursive,omitempty" yaml:"io_wait_time_recursive,omitempty"`
IOMergedRecursive []BlkioStatsEntry `json:"io_merged_recursive,omitempty" yaml:"io_merged_recursive,omitempty"`
IOTimeRecursive []BlkioStatsEntry `json:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty"`
SectorsRecursive []BlkioStatsEntry `json:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty"`
} `json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty"`
CPUStats struct {
CPUUsage struct {
PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty"`
UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty"`
TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty"`
UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty"`
} `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty"`
SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty"`
ThrottlingData struct {
Periods uint64 `json:"periods,omitempty"`
ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
ThrottledTime uint64 `json:"throttled_time,omitempty"`
} `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty"`
} `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty"`
}
// BlkioStatsEntry is a stats entry for blkio_stats
type BlkioStatsEntry struct {
Major uint64 `json:"major,omitempty" yaml:"major,omitempty"`
Minor uint64 `json:"major,omitempty" yaml:"major,omitempty"`
Op string `json:"op,omitempty" yaml:"op,omitempty"`
Value uint64 `json:"value,omitempty" yaml:"value,omitempty"`
}
// StatsOptions specify parameters to the Stats function.
//
// See http://goo.gl/DFMiYD for more details.
type StatsOptions struct {
ID string
Stats chan<- *Stats
}
// Stats sends container statistics for the given container to the given channel.
//
// This function is blocking, similar to a streaming call for logs, and should be run
// on a separate goroutine from the caller. Note that this function will block until
// the given container is removed, not just exited. When finished, this function
// will close the given channel.
//
// See http://goo.gl/DFMiYD for more details.
func (c *Client) Stats(opts StatsOptions) (retErr error) {
errC := make(chan error, 1)
readCloser, writeCloser := io.Pipe()
defer func() {
close(opts.Stats)
if err := <-errC; err != nil && retErr == nil {
retErr = err
}
if err := readCloser.Close(); err != nil && retErr == nil {
retErr = err
}
}()
go func() {
err := c.stream("GET", fmt.Sprintf("/containers/%s/stats", opts.ID), streamOptions{
rawJSONStream: true,
stdout: writeCloser,
})
if err != nil {
dockerError, ok := err.(*Error)
if ok {
if dockerError.Status == http.StatusNotFound {
err = &NoSuchContainer{ID: opts.ID}
}
}
}
if closeErr := writeCloser.Close(); closeErr != nil && err == nil {
err = closeErr
}
errC <- err
close(errC)
}()
decoder := json.NewDecoder(readCloser)
stats := new(Stats)
for err := decoder.Decode(&stats); err != io.EOF; err = decoder.Decode(stats) {
if err != nil {
return err
}
opts.Stats <- stats
stats = new(Stats)
}
return nil
}
// KillContainerOptions represents the set of options that can be used in a
// call to KillContainer.
//
@@ -628,8 +781,8 @@ func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error {
if err != nil {
return err
}
io.Copy(opts.OutputStream, bytes.NewBuffer(body))
return nil
_, err = io.Copy(opts.OutputStream, bytes.NewBuffer(body))
return err
}
// WaitContainer blocks until the given container stops, return the exit code

View File

@@ -116,7 +116,9 @@ func TestListContainersParams(t *testing.T) {
client := newTestClient(fakeRT)
u, _ := url.Parse(client.getURL("/containers/json"))
for _, tt := range tests {
client.ListContainers(tt.input)
if _, err := client.ListContainers(tt.input); err != nil {
t.Error(err)
}
got := map[string][]string(fakeRT.requests[0].URL.Query())
if !reflect.DeepEqual(got, tt.params) {
t.Errorf("Expected %#v, got %#v.", tt.params, got)
@@ -230,7 +232,9 @@ func TestInspectContainer(t *testing.T) {
},
"Links": null,
"PublishAllPorts": false,
"CgroupParent": "/mesos"
"CgroupParent": "/mesos",
"Memory": 17179869184,
"MemorySwap": 34359738368
}
}`
var expected Container
@@ -535,7 +539,7 @@ func TestStartContainerNilHostConfig(t *testing.T) {
var buf [4]byte
req.Body.Read(buf[:])
if string(buf[:]) != "null" {
t.Errorf("Startcontainer(%q): Wrong body. Want null. Got %s", buf[:])
t.Errorf("Startcontainer(%q): Wrong body. Want null. Got %s", id, buf[:])
}
}
@@ -853,11 +857,13 @@ func TestCommitContainerParams(t *testing.T) {
json,
},
}
fakeRT := &FakeRoundTripper{message: "[]", status: http.StatusOK}
fakeRT := &FakeRoundTripper{message: "{}", status: http.StatusOK}
client := newTestClient(fakeRT)
u, _ := url.Parse(client.getURL("/commit"))
for _, tt := range tests {
client.CommitContainer(tt.input)
if _, err := client.CommitContainer(tt.input); err != nil {
t.Error(err)
}
got := map[string][]string(fakeRT.requests[0].URL.Query())
if !reflect.DeepEqual(got, tt.params) {
t.Errorf("Expected %#v, got %#v.", tt.params, got)
@@ -983,11 +989,9 @@ func TestAttachToContainer(t *testing.T) {
func TestAttachToContainerSentinel(t *testing.T) {
var reader = strings.NewReader("send value")
var req http.Request
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5})
w.Write([]byte("hello"))
req = *r
}))
defer server.Close()
client, _ := NewClient(server.URL)
@@ -1006,17 +1010,19 @@ func TestAttachToContainerSentinel(t *testing.T) {
RawTerminal: true,
Success: success,
}
go client.AttachToContainer(opts)
go func() {
if err := client.AttachToContainer(opts); err != nil {
t.Error(err)
}
}()
success <- <-success
}
func TestAttachToContainerNilStdout(t *testing.T) {
var reader = strings.NewReader("send value")
var req http.Request
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5})
w.Write([]byte("hello"))
req = *r
}))
defer server.Close()
client, _ := NewClient(server.URL)
@@ -1041,11 +1047,9 @@ func TestAttachToContainerNilStdout(t *testing.T) {
func TestAttachToContainerNilStderr(t *testing.T) {
var reader = strings.NewReader("send value")
var req http.Request
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5})
w.Write([]byte("hello"))
req = *r
}))
defer server.Close()
client, _ := NewClient(server.URL)
@@ -1071,10 +1075,21 @@ func TestAttachToContainerRawTerminalFalse(t *testing.T) {
input := strings.NewReader("send value")
var req http.Request
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
prefix := []byte{1, 0, 0, 0, 0, 0, 0, 5}
w.Write(prefix)
w.Write([]byte("hello"))
req = *r
w.WriteHeader(http.StatusOK)
hj, ok := w.(http.Hijacker)
if !ok {
t.Fatal("cannot hijack server connection")
}
conn, _, err := hj.Hijack()
if err != nil {
t.Fatal(err)
}
conn.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5})
conn.Write([]byte("hello"))
conn.Write([]byte{2, 0, 0, 0, 0, 0, 0, 6})
conn.Write([]byte("hello!"))
conn.Close()
}))
defer server.Close()
client, _ := NewClient(server.URL)
@@ -1105,10 +1120,11 @@ func TestAttachToContainerRawTerminalFalse(t *testing.T) {
if !reflect.DeepEqual(got, expected) {
t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expected, got)
}
t.Log(stderr.String())
t.Log(stdout.String())
if stdout.String() != "hello" {
t.Errorf("AttachToContainer: wrong content written to stdout. Want %q. Got %q.", "hello", stderr.String())
t.Errorf("AttachToContainer: wrong content written to stdout. Want %q. Got %q.", "hello", stdout.String())
}
if stderr.String() != "hello!" {
t.Errorf("AttachToContainer: wrong content written to stderr. Want %q. Got %q.", "hello!", stderr.String())
}
}
@@ -1170,12 +1186,10 @@ func TestLogs(t *testing.T) {
}
func TestLogsNilStdoutDoesntFail(t *testing.T) {
var req http.Request
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19}
w.Write(prefix)
w.Write([]byte("something happened!"))
req = *r
}))
defer server.Close()
client, _ := NewClient(server.URL)
@@ -1194,12 +1208,10 @@ func TestLogsNilStdoutDoesntFail(t *testing.T) {
}
func TestLogsNilStderrDoesntFail(t *testing.T) {
var req http.Request
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
prefix := []byte{2, 0, 0, 0, 0, 0, 0, 19}
w.Write(prefix)
w.Write([]byte("something happened!"))
req = *r
}))
defer server.Close()
client, _ := NewClient(server.URL)
@@ -1267,10 +1279,8 @@ func TestLogsSpecifyingTail(t *testing.T) {
}
func TestLogsRawTerminal(t *testing.T) {
var req http.Request
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("something happened!"))
req = *r
}))
defer server.Close()
client, _ := NewClient(server.URL)
@@ -1530,7 +1540,7 @@ func TestTopContainer(t *testing.T) {
}
if len(processes.Processes) != 2 || len(processes.Processes[0]) != 8 ||
processes.Processes[0][7] != "cmd1" {
t.Errorf("TopContainer: Process list to include cmd1. Got %#v.", expected, processes)
t.Errorf("TopContainer: Process list to include cmd1. Got %#v.", processes)
}
expectedURI := "/containers/" + id + "/top"
if !strings.HasSuffix(fakeRT.requests[0].URL.String(), expectedURI) {
@@ -1550,13 +1560,285 @@ func TestTopContainerNotFound(t *testing.T) {
func TestTopContainerWithPsArgs(t *testing.T) {
fakeRT := &FakeRoundTripper{message: "no such container", status: http.StatusNotFound}
client := newTestClient(fakeRT)
client.TopContainer("abef348", "aux")
expectedErr := &NoSuchContainer{ID: "abef348"}
if _, err := client.TopContainer("abef348", "aux"); !reflect.DeepEqual(expectedErr, err) {
t.Errorf("TopContainer: Expected %v. Got %v.", expectedErr, err)
}
expectedURI := "/containers/abef348/top?ps_args=aux"
if !strings.HasSuffix(fakeRT.requests[0].URL.String(), expectedURI) {
t.Errorf("TopContainer: Expected URI to have %q. Got %q.", expectedURI, fakeRT.requests[0].URL.String())
}
}
func TestStats(t *testing.T) {
jsonStats1 := `{
"read" : "2015-01-08T22:57:31.547920715Z",
"network" : {
"rx_dropped" : 0,
"rx_bytes" : 648,
"rx_errors" : 0,
"tx_packets" : 8,
"tx_dropped" : 0,
"rx_packets" : 8,
"tx_errors" : 0,
"tx_bytes" : 648
},
"memory_stats" : {
"stats" : {
"total_pgmajfault" : 0,
"cache" : 0,
"mapped_file" : 0,
"total_inactive_file" : 0,
"pgpgout" : 414,
"rss" : 6537216,
"total_mapped_file" : 0,
"writeback" : 0,
"unevictable" : 0,
"pgpgin" : 477,
"total_unevictable" : 0,
"pgmajfault" : 0,
"total_rss" : 6537216,
"total_rss_huge" : 6291456,
"total_writeback" : 0,
"total_inactive_anon" : 0,
"rss_huge" : 6291456,
"hierarchical_memory_limit": 189204833,
"total_pgfault" : 964,
"total_active_file" : 0,
"active_anon" : 6537216,
"total_active_anon" : 6537216,
"total_pgpgout" : 414,
"total_cache" : 0,
"inactive_anon" : 0,
"active_file" : 0,
"pgfault" : 964,
"inactive_file" : 0,
"total_pgpgin" : 477
},
"max_usage" : 6651904,
"usage" : 6537216,
"failcnt" : 0,
"limit" : 67108864
},
"blkio_stats": {
"io_service_bytes_recursive": [
{
"major": 8,
"minor": 0,
"op": "Read",
"value": 428795731968
},
{
"major": 8,
"minor": 0,
"op": "Write",
"value": 388177920
}
],
"io_serviced_recursive": [
{
"major": 8,
"minor": 0,
"op": "Read",
"value": 25994442
},
{
"major": 8,
"minor": 0,
"op": "Write",
"value": 1734
}
],
"io_queue_recursive": [],
"io_service_time_recursive": [],
"io_wait_time_recursive": [],
"io_merged_recursive": [],
"io_time_recursive": [],
"sectors_recursive": []
},
"cpu_stats" : {
"cpu_usage" : {
"percpu_usage" : [
16970827,
1839451,
7107380,
10571290
],
"usage_in_usermode" : 10000000,
"total_usage" : 36488948,
"usage_in_kernelmode" : 20000000
},
"system_cpu_usage" : 20091722000000000
}
}`
// 1 second later, cache is 100
jsonStats2 := `{
"read" : "2015-01-08T22:57:32.547920715Z",
"network" : {
"rx_dropped" : 0,
"rx_bytes" : 648,
"rx_errors" : 0,
"tx_packets" : 8,
"tx_dropped" : 0,
"rx_packets" : 8,
"tx_errors" : 0,
"tx_bytes" : 648
},
"memory_stats" : {
"stats" : {
"total_pgmajfault" : 0,
"cache" : 100,
"mapped_file" : 0,
"total_inactive_file" : 0,
"pgpgout" : 414,
"rss" : 6537216,
"total_mapped_file" : 0,
"writeback" : 0,
"unevictable" : 0,
"pgpgin" : 477,
"total_unevictable" : 0,
"pgmajfault" : 0,
"total_rss" : 6537216,
"total_rss_huge" : 6291456,
"total_writeback" : 0,
"total_inactive_anon" : 0,
"rss_huge" : 6291456,
"total_pgfault" : 964,
"total_active_file" : 0,
"active_anon" : 6537216,
"total_active_anon" : 6537216,
"total_pgpgout" : 414,
"total_cache" : 0,
"inactive_anon" : 0,
"active_file" : 0,
"pgfault" : 964,
"inactive_file" : 0,
"total_pgpgin" : 477
},
"max_usage" : 6651904,
"usage" : 6537216,
"failcnt" : 0,
"limit" : 67108864
},
"blkio_stats": {
"io_service_bytes_recursive": [
{
"major": 8,
"minor": 0,
"op": "Read",
"value": 428795731968
},
{
"major": 8,
"minor": 0,
"op": "Write",
"value": 388177920
}
],
"io_serviced_recursive": [
{
"major": 8,
"minor": 0,
"op": "Read",
"value": 25994442
},
{
"major": 8,
"minor": 0,
"op": "Write",
"value": 1734
}
],
"io_queue_recursive": [],
"io_service_time_recursive": [],
"io_wait_time_recursive": [],
"io_merged_recursive": [],
"io_time_recursive": [],
"sectors_recursive": []
},
"cpu_stats" : {
"cpu_usage" : {
"percpu_usage" : [
16970827,
1839451,
7107380,
10571290
],
"usage_in_usermode" : 10000000,
"total_usage" : 36488948,
"usage_in_kernelmode" : 20000000
},
"system_cpu_usage" : 20091722000000000
}
}`
var expected1 Stats
var expected2 Stats
err := json.Unmarshal([]byte(jsonStats1), &expected1)
if err != nil {
t.Fatal(err)
}
err = json.Unmarshal([]byte(jsonStats2), &expected2)
if err != nil {
t.Fatal(err)
}
id := "4fa6e0f0"
var req http.Request
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(jsonStats1))
w.Write([]byte(jsonStats2))
req = *r
}))
defer server.Close()
client, _ := NewClient(server.URL)
client.SkipServerVersionCheck = true
errC := make(chan error, 1)
statsC := make(chan *Stats)
go func() {
errC <- client.Stats(StatsOptions{id, statsC})
close(errC)
}()
var resultStats []*Stats
for {
stats, ok := <-statsC
if !ok {
break
}
resultStats = append(resultStats, stats)
}
err = <-errC
if err != nil {
t.Fatal(err)
}
if len(resultStats) != 2 {
t.Fatalf("Stats: Expected 2 results. Got %d.", len(resultStats))
}
if !reflect.DeepEqual(resultStats[0], &expected1) {
t.Errorf("Stats: Expected:\n%+v\nGot:\n%+v", expected1, resultStats[0])
}
if !reflect.DeepEqual(resultStats[1], &expected2) {
t.Errorf("Stats: Expected:\n%+v\nGot:\n%+v", expected2, resultStats[1])
}
if req.Method != "GET" {
t.Errorf("Stats: wrong HTTP method. Want GET. Got %s.", req.Method)
}
u, _ := url.Parse(client.getURL("/containers/" + id + "/stats"))
if req.URL.Path != u.Path {
t.Errorf("Stats: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path)
}
}
func TestStatsContainerNotFound(t *testing.T) {
client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
statsC := make(chan *Stats)
err := client.Stats(StatsOptions{"abef348", statsC})
expected := &NoSuchContainer{ID: "abef348"}
if !reflect.DeepEqual(err, expected) {
t.Errorf("Stats: Wrong error returned. Want %#v. Got %#v.", expected, err)
}
}
func TestRenameContainer(t *testing.T) {
fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
client := newTestClient(fakeRT)

View File

@@ -251,7 +251,9 @@ func TestGetList(t *testing.T) {
func TestSetList(t *testing.T) {
list := []string{"a", "b", "c"}
var env Env
env.SetList("SOME", list)
if err := env.SetList("SOME", list); err != nil {
t.Error(err)
}
if got := env.GetList("SOME"); !reflect.DeepEqual(got, list) {
t.Errorf("Env.SetList(%v): wrong result. Got %v", list, got)
}

View File

@@ -57,7 +57,6 @@ func testEventListeners(testName string, t *testing.T, buildServer func(http.Han
{"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970}
`
var req http.Request
server := buildServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
rsc := bufio.NewScanner(strings.NewReader(response))
for rsc.Scan() {
@@ -65,7 +64,6 @@ func testEventListeners(testName string, t *testing.T, buildServer func(http.Han
w.(http.Flusher).Flush()
time.Sleep(10 * time.Millisecond)
}
req = *r
}))
defer server.Close()
@@ -76,7 +74,12 @@ func testEventListeners(testName string, t *testing.T, buildServer func(http.Han
client.SkipServerVersionCheck = true
listener := make(chan *APIEvents, 10)
defer func() { time.Sleep(10 * time.Millisecond); client.RemoveEventListener(listener) }()
defer func() {
time.Sleep(10 * time.Millisecond)
if err := client.RemoveEventListener(listener); err != nil {
t.Error(err)
}
}()
err = client.AddEventListener(listener)
if err != nil {
@@ -89,7 +92,7 @@ func testEventListeners(testName string, t *testing.T, buildServer func(http.Han
for {
select {
case msg := <-listener:
t.Logf("Received: %s", *msg)
t.Logf("Received: %v", *msg)
count++
err = checkEvent(count, msg)
if err != nil {

View File

@@ -87,11 +87,9 @@ func TestExecStartDetached(t *testing.T) {
func TestExecStartAndAttach(t *testing.T) {
var reader = strings.NewReader("send value")
var req http.Request
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5})
w.Write([]byte("hello"))
req = *r
}))
defer server.Close()
client, _ := NewClient(server.URL)
@@ -106,7 +104,11 @@ func TestExecStartAndAttach(t *testing.T) {
RawTerminal: true,
Success: success,
}
go client.StartExec(execID, opts)
go func() {
if err := client.StartExec(execID, opts); err != nil {
t.Error(err)
}
}()
<-success
}

View File

@@ -104,7 +104,6 @@ var (
//
// See http://goo.gl/HRVN1Z for more details.
func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) {
// TODO(pedge): what happens if we specify the digest parameter when using API Version <1.18?
path := "/images/json?" + queryString(opts)
body, _, err := c.do("GET", path, doOptions{})
if err != nil {
@@ -240,13 +239,17 @@ func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error
if opts.Name == "" {
return ErrNoSuchImage
}
headers, err := headersWithAuth(auth)
if err != nil {
return err
}
name := opts.Name
opts.Name = ""
path := "/images/" + name + "/push?" + queryString(&opts)
return c.stream("POST", path, streamOptions{
setRawTerminal: true,
rawJSONStream: opts.RawJSONStream,
headers: headersWithAuth(auth),
headers: headers,
stdout: opts.OutputStream,
})
}
@@ -271,7 +274,10 @@ func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error
return ErrNoSuchImage
}
headers := headersWithAuth(auth)
headers, err := headersWithAuth(auth)
if err != nil {
return err
}
return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream)
}
@@ -387,8 +393,13 @@ type BuildImageOptions struct {
Dockerfile string `qs:"dockerfile"`
NoCache bool `qs:"nocache"`
SuppressOutput bool `qs:"q"`
Pull bool `qs:"pull"`
RmTmpContainer bool `qs:"rm"`
ForceRmTmpContainer bool `qs:"forcerm"`
Memory int64 `qs:"memory"`
Memswap int64 `qs:"memswap"`
CPUShares int64 `qs:"cpushares"`
CPUSetCPUs string `qs:"cpusetcpus"`
InputStream io.Reader `qs:"-"`
OutputStream io.Writer `qs:"-"`
RawJSONStream bool `qs:"-"`
@@ -401,12 +412,15 @@ type BuildImageOptions struct {
// BuildImage builds an image from a tarball's url or a Dockerfile in the input
// stream.
//
// See http://goo.gl/wRsW76 for more details.
// See http://goo.gl/7nuGXa for more details.
func (c *Client) BuildImage(opts BuildImageOptions) error {
if opts.OutputStream == nil {
return ErrMissingOutputStream
}
var headers = headersWithAuth(opts.Auth, opts.AuthConfigs)
headers, err := headersWithAuth(opts.Auth, opts.AuthConfigs)
if err != nil {
return err
}
if opts.Remote != "" && opts.Name == "" {
opts.Name = opts.Remote
@@ -421,7 +435,7 @@ func (c *Client) BuildImage(opts BuildImageOptions) error {
return ErrMultipleContexts
}
var err error
if opts.InputStream, err = createTarStream(opts.ContextDir); err != nil {
if opts.InputStream, err = createTarStream(opts.ContextDir, opts.Dockerfile); err != nil {
return err
}
}
@@ -469,23 +483,27 @@ func isURL(u string) bool {
return p.Scheme == "http" || p.Scheme == "https"
}
func headersWithAuth(auths ...interface{}) map[string]string {
func headersWithAuth(auths ...interface{}) (map[string]string, error) {
var headers = make(map[string]string)
for _, auth := range auths {
switch auth.(type) {
case AuthConfiguration:
var buf bytes.Buffer
json.NewEncoder(&buf).Encode(auth)
if err := json.NewEncoder(&buf).Encode(auth); err != nil {
return nil, err
}
headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes())
case AuthConfigurations:
var buf bytes.Buffer
json.NewEncoder(&buf).Encode(auth)
if err := json.NewEncoder(&buf).Encode(auth); err != nil {
return nil, err
}
headers["X-Registry-Config"] = base64.URLEncoding.EncodeToString(buf.Bytes())
}
}
return headers
return headers, nil
}
// APIImageSearch reflect the result of a search on the dockerHub

View File

@@ -240,7 +240,9 @@ func TestInspectImage(t *testing.T) {
"container_config":{"Memory":0}
}`
var expected Image
json.Unmarshal([]byte(body), &expected)
if err := json.Unmarshal([]byte(body), &expected); err != nil {
t.Fatal(err)
}
fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK}
client := newTestClient(fakeRT)
image, err := client.InspectImage(expected.ID)
@@ -655,8 +657,13 @@ func TestBuildImageParameters(t *testing.T) {
Name: "testImage",
NoCache: true,
SuppressOutput: true,
Pull: true,
RmTmpContainer: true,
ForceRmTmpContainer: true,
Memory: 1024,
Memswap: 2048,
CPUShares: 10,
CPUSetCPUs: "0-3",
InputStream: &buf,
OutputStream: &buf,
}
@@ -665,7 +672,18 @@ func TestBuildImageParameters(t *testing.T) {
t.Fatal(err)
}
req := fakeRT.requests[0]
expected := map[string][]string{"t": {opts.Name}, "nocache": {"1"}, "q": {"1"}, "rm": {"1"}, "forcerm": {"1"}}
expected := map[string][]string{
"t": {opts.Name},
"nocache": {"1"},
"q": {"1"},
"pull": {"1"},
"rm": {"1"},
"forcerm": {"1"},
"memory": {"1024"},
"memswap": {"2048"},
"cpushares": {"10"},
"cpusetcpus": {"0-3"},
}
got := map[string][]string(req.URL.Query())
if !reflect.DeepEqual(got, expected) {
t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got)

View File

@@ -1,91 +0,0 @@
// Copyright 2014 Docker authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the DOCKER-LICENSE file.
package docker
import (
"encoding/binary"
"errors"
"io"
)
const (
stdWriterPrefixLen = 8
stdWriterFdIndex = 0
stdWriterSizeIndex = 4
)
var errInvalidStdHeader = errors.New("Unrecognized input header")
func stdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
var (
buf = make([]byte, 32*1024+stdWriterPrefixLen+1)
bufLen = len(buf)
nr, nw int
er, ew error
out io.Writer
frameSize int
)
for {
for nr < stdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
if er == io.EOF {
if nr < stdWriterPrefixLen && nr2 < stdWriterPrefixLen {
return written, nil
}
nr += nr2
break
} else if er != nil {
return 0, er
}
nr += nr2
}
switch buf[stdWriterFdIndex] {
case 0:
fallthrough
case 1:
out = dstout
case 2:
out = dsterr
default:
return 0, errInvalidStdHeader
}
frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
if frameSize+stdWriterPrefixLen > bufLen {
buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-len(buf)+1)...)
bufLen = len(buf)
}
for nr < frameSize+stdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
if er == io.EOF {
if nr == 0 {
return written, nil
}
nr += nr2
break
} else if er != nil {
return 0, er
}
nr += nr2
}
bound := frameSize + stdWriterPrefixLen
if bound > nr {
bound = nr
}
nw, ew = out.Write(buf[stdWriterPrefixLen:bound])
if nw > 0 {
written += int64(nw)
}
if ew != nil {
return 0, ew
}
if nw != frameSize {
return written, io.ErrShortWrite
}
copy(buf, buf[frameSize+stdWriterPrefixLen:])
nr -= frameSize + stdWriterPrefixLen
}
}

View File

@@ -1,255 +0,0 @@
// Copyright 2014 go-dockerclient authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the DOCKER-LICENSE file.
package docker
import (
"bytes"
"encoding/binary"
"errors"
"io"
"strings"
"testing"
"testing/iotest"
)
type errorWriter struct {
}
func (errorWriter) Write([]byte) (int, error) {
return 0, errors.New("something went wrong")
}
func TestStdCopy(t *testing.T) {
var input, stdout, stderr bytes.Buffer
input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19})
input.Write([]byte("something happened!"))
input.Write([]byte{1, 0, 0, 0, 0, 0, 0, 12})
input.Write([]byte("just kidding"))
input.Write([]byte{0, 0, 0, 0, 0, 0, 0, 6})
input.Write([]byte("\nyeah!"))
n, err := stdCopy(&stdout, &stderr, &input)
if err != nil {
t.Fatal(err)
}
if expected := int64(19 + 12 + 6); n != expected {
t.Errorf("Wrong number of bytes. Want %d. Got %d.", expected, n)
}
if got := stderr.String(); got != "something happened!" {
t.Errorf("stdCopy: wrong stderr. Want %q. Got %q.", "something happened!", got)
}
if got := stdout.String(); got != "just kidding\nyeah!" {
t.Errorf("stdCopy: wrong stdout. Want %q. Got %q.", "just kidding\nyeah!", got)
}
}
func TestStdCopyStress(t *testing.T) {
var input, stdout, stderr bytes.Buffer
value := strings.Repeat("something ", 4096)
writer := newStdWriter(&input, Stdout)
writer.Write([]byte(value))
n, err := stdCopy(&stdout, &stderr, &input)
if err != nil {
t.Fatal(err)
}
if n != 40960 {
t.Errorf("Wrong number of bytes. Want 40960. Got %d.", n)
}
if got := stderr.String(); got != "" {
t.Errorf("stdCopy: wrong stderr. Want empty string. Got %q", got)
}
if got := stdout.String(); got != value {
t.Errorf("stdCopy: wrong stdout. Want %q. Got %q", value, got)
}
}
func TestStdCopyInvalidStdHeader(t *testing.T) {
var input, stdout, stderr bytes.Buffer
input.Write([]byte{3, 0, 0, 0, 0, 0, 0, 19})
n, err := stdCopy(&stdout, &stderr, &input)
if n != 0 {
t.Errorf("stdCopy: wrong number of bytes. Want 0. Got %d", n)
}
if err != errInvalidStdHeader {
t.Errorf("stdCopy: wrong error. Want ErrInvalidStdHeader. Got %#v", err)
}
}
func TestStdCopyBigFrame(t *testing.T) {
var input, stdout, stderr bytes.Buffer
input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 18})
input.Write([]byte("something happened!"))
n, err := stdCopy(&stdout, &stderr, &input)
if err != nil {
t.Fatal(err)
}
if expected := int64(18); n != expected {
t.Errorf("Wrong number of bytes. Want %d. Got %d.", expected, n)
}
if got := stderr.String(); got != "something happened" {
t.Errorf("stdCopy: wrong stderr. Want %q. Got %q.", "something happened", got)
}
if got := stdout.String(); got != "" {
t.Errorf("stdCopy: wrong stdout. Want %q. Got %q.", "", got)
}
}
func TestStdCopySmallFrame(t *testing.T) {
var input, stdout, stderr bytes.Buffer
input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 20})
input.Write([]byte("something happened!"))
n, err := stdCopy(&stdout, &stderr, &input)
if err != io.ErrShortWrite {
t.Errorf("stdCopy: wrong error. Want ShortWrite. Got %#v", err)
}
if expected := int64(19); n != expected {
t.Errorf("Wrong number of bytes. Want %d. Got %d.", expected, n)
}
if got := stderr.String(); got != "something happened!" {
t.Errorf("stdCopy: wrong stderr. Want %q. Got %q.", "something happened", got)
}
if got := stdout.String(); got != "" {
t.Errorf("stdCopy: wrong stdout. Want %q. Got %q.", "", got)
}
}
func TestStdCopyEmpty(t *testing.T) {
var input, stdout, stderr bytes.Buffer
n, err := stdCopy(&stdout, &stderr, &input)
if err != nil {
t.Fatal(err)
}
if n != 0 {
t.Errorf("stdCopy: wrong number of bytes. Want 0. Got %d.", n)
}
}
func TestStdCopyCorruptedHeader(t *testing.T) {
var input, stdout, stderr bytes.Buffer
input.Write([]byte{2, 0, 0, 0, 0})
n, err := stdCopy(&stdout, &stderr, &input)
if err != nil {
t.Fatal(err)
}
if n != 0 {
t.Errorf("stdCopy: wrong number of bytes. Want 0. Got %d.", n)
}
}
func TestStdCopyTruncateWriter(t *testing.T) {
var input, stdout, stderr bytes.Buffer
input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19})
input.Write([]byte("something happened!"))
n, err := stdCopy(&stdout, iotest.TruncateWriter(&stderr, 7), &input)
if err != nil {
t.Fatal(err)
}
if expected := int64(19); n != expected {
t.Errorf("Wrong number of bytes. Want %d. Got %d.", expected, n)
}
if got := stderr.String(); got != "somethi" {
t.Errorf("stdCopy: wrong stderr. Want %q. Got %q.", "somethi", got)
}
if got := stdout.String(); got != "" {
t.Errorf("stdCopy: wrong stdout. Want %q. Got %q.", "", got)
}
}
func TestStdCopyHeaderOnly(t *testing.T) {
var input, stdout, stderr bytes.Buffer
input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19})
n, err := stdCopy(&stdout, iotest.TruncateWriter(&stderr, 7), &input)
if err != io.ErrShortWrite {
t.Errorf("stdCopy: wrong error. Want ShortWrite. Got %#v", err)
}
if n != 0 {
t.Errorf("Wrong number of bytes. Want 0. Got %d.", n)
}
if got := stderr.String(); got != "" {
t.Errorf("stdCopy: wrong stderr. Want %q. Got %q.", "", got)
}
if got := stdout.String(); got != "" {
t.Errorf("stdCopy: wrong stdout. Want %q. Got %q.", "", got)
}
}
func TestStdCopyDataErrReader(t *testing.T) {
var input, stdout, stderr bytes.Buffer
input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19})
input.Write([]byte("something happened!"))
n, err := stdCopy(&stdout, &stderr, iotest.DataErrReader(&input))
if err != nil {
t.Fatal(err)
}
if expected := int64(19); n != expected {
t.Errorf("Wrong number of bytes. Want %d. Got %d.", expected, n)
}
if got := stderr.String(); got != "something happened!" {
t.Errorf("stdCopy: wrong stderr. Want %q. Got %q.", "something happened!", got)
}
if got := stdout.String(); got != "" {
t.Errorf("stdCopy: wrong stdout. Want %q. Got %q.", "", got)
}
}
func TestStdCopyTimeoutReader(t *testing.T) {
var input, stdout, stderr bytes.Buffer
input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19})
input.Write([]byte("something happened!"))
_, err := stdCopy(&stdout, &stderr, iotest.TimeoutReader(&input))
if err != iotest.ErrTimeout {
t.Errorf("stdCopy: wrong error. Want ErrTimeout. Got %#v.", err)
}
}
func TestStdCopyWriteError(t *testing.T) {
var input bytes.Buffer
input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19})
input.Write([]byte("something happened!"))
var stdout, stderr errorWriter
n, err := stdCopy(stdout, stderr, &input)
if err.Error() != "something went wrong" {
t.Errorf("stdCopy: wrong error. Want %q. Got %q", "something went wrong", err)
}
if n != 0 {
t.Errorf("stdCopy: wrong number of bytes. Want 0. Got %d.", n)
}
}
type StdType [8]byte
var (
Stdin = StdType{0: 0}
Stdout = StdType{0: 1}
Stderr = StdType{0: 2}
)
type StdWriter struct {
io.Writer
prefix StdType
sizeBuf []byte
}
func (w *StdWriter) Write(buf []byte) (n int, err error) {
if w == nil || w.Writer == nil {
return 0, errors.New("Writer not instanciated")
}
binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf)))
buf = append(w.prefix[:], buf...)
n, err = w.Writer.Write(buf)
return n - 8, err
}
func newStdWriter(w io.Writer, t StdType) *StdWriter {
if len(t) != 8 {
return nil
}
return &StdWriter{
Writer: w,
prefix: t,
sizeBuf: make([]byte, 4),
}
}

View File

@@ -13,21 +13,48 @@ import (
"path/filepath"
"strings"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/fileutils"
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/archive"
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/fileutils"
)
func createTarStream(srcPath string) (io.ReadCloser, error) {
func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) {
excludes, err := parseDockerignore(srcPath)
if err != nil {
return nil, err
}
includes := []string{"."}
// If .dockerignore mentions .dockerignore or the Dockerfile
// then make sure we send both files over to the daemon
// because Dockerfile is, obviously, needed no matter what, and
// .dockerignore is needed to know if either one needs to be
// removed. The deamon will remove them for us, if needed, after it
// parses the Dockerfile.
//
// https://github.com/docker/docker/issues/8330
//
forceIncludeFiles := []string{".dockerignore", dockerfilePath}
for _, includeFile := range forceIncludeFiles {
if includeFile == "" {
continue
}
keepThem, err := fileutils.Matches(includeFile, excludes)
if err != nil {
return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err)
}
if keepThem {
includes = append(includes, includeFile)
}
}
if err := validateContextDirectory(srcPath, excludes); err != nil {
return nil, err
}
tarOpts := &archive.TarOptions{
ExcludePatterns: excludes,
IncludeFiles: includes,
Compression: archive.Uncompressed,
NoLchown: true,
}
@@ -84,16 +111,7 @@ func parseDockerignore(root string) ([]string, error) {
if err != nil && !os.IsNotExist(err) {
return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err)
}
for _, pattern := range strings.Split(string(ignore), "\n") {
matches, err := filepath.Match(pattern, "Dockerfile")
if err != nil {
return excludes, fmt.Errorf("bad .dockerignore pattern: '%s', error: %s", pattern, err)
}
if matches {
return excludes, fmt.Errorf("dockerfile was excluded by .dockerignore pattern '%s'", pattern)
}
excludes = append(excludes, pattern)
}
excludes = strings.Split(string(ignore), "\n")
return excludes, nil
}

View File

@@ -9,7 +9,7 @@ main() {
check_fmt() {
eval "set -e"
for file in $(git ls-files '*.go') ; do
for file in $(_list_go_files) ; do
gofmt $file | diff -u $file -
done
eval "set +e"
@@ -18,7 +18,7 @@ check_fmt() {
check_lint() {
_install_linter
for file in $(git ls-files '*.go') ; do
for file in $(_list_go_files) ; do
if [[ ! "$(${GOPATH}/bin/golint $file)" =~ ^[[:blank:]]*$ ]] ; then
_lint_verbose && exit 1
fi
@@ -26,7 +26,7 @@ check_lint() {
}
_lint_verbose() {
for file in $(git ls-files '*.go') ; do $GOPATH/bin/golint $file ; done
for file in $(_list_go_files) ; do $GOPATH/bin/golint $file ; done
}
_install_linter() {
@@ -35,4 +35,8 @@ _install_linter() {
fi
}
_list_go_files() {
git ls-files '*.go' | grep -v '^vendor/'
}
main "$@"

View File

@@ -22,7 +22,7 @@ import (
"time"
"github.com/fsouza/go-dockerclient"
"github.com/gorilla/mux"
"github.com/fsouza/go-dockerclient/vendor/github.com/gorilla/mux"
)
var nameRegexp = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`)
@@ -533,8 +533,19 @@ func (s *DockerServer) attachContainer(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
outStream := newStdWriter(w, stdout)
fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
hijacker, ok := w.(http.Hijacker)
if !ok {
http.Error(w, "cannot hijack connection", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/vnd.docker.raw-stream")
w.WriteHeader(http.StatusOK)
conn, _, err := hijacker.Hijack()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
outStream := newStdWriter(conn, stdout)
if container.State.Running {
fmt.Fprintf(outStream, "Container %q is running\n", container.ID)
} else {
@@ -542,6 +553,7 @@ func (s *DockerServer) attachContainer(w http.ResponseWriter, r *http.Request) {
}
fmt.Fprintln(outStream, "What happened?")
fmt.Fprintln(outStream, "Something happened")
conn.Close()
}
func (s *DockerServer) waitContainer(w http.ResponseWriter, r *http.Request) {
@@ -566,12 +578,13 @@ func (s *DockerServer) waitContainer(w http.ResponseWriter, r *http.Request) {
func (s *DockerServer) removeContainer(w http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["id"]
force := r.URL.Query().Get("force")
_, index, err := s.findContainer(id)
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
if s.containers[index].State.Running {
if s.containers[index].State.Running && force != "1" {
msg := "Error: API error (406): Impossible to remove a running container, please stop it first"
http.Error(w, msg, http.StatusInternalServerError)
return

View File

@@ -850,6 +850,23 @@ func TestRemoveContainerRunning(t *testing.T) {
}
}
func TestRemoveContainerRunningForce(t *testing.T) {
server := DockerServer{}
addContainers(&server, 1)
server.containers[0].State.Running = true
server.buildMuxer()
recorder := httptest.NewRecorder()
path := fmt.Sprintf("/containers/%s?%s", server.containers[0].ID, "force=1")
request, _ := http.NewRequest("DELETE", path, nil)
server.ServeHTTP(recorder, request)
if recorder.Code != http.StatusNoContent {
t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code)
}
if len(server.containers) > 0 {
t.Error("RemoveContainer: did not remove the container.")
}
}
func TestPullImage(t *testing.T) {
server := DockerServer{imgIDs: make(map[string]string)}
server.buildMuxer()

View File

@@ -0,0 +1,11 @@
# 0.8
logrus: defaults to stderr instead of stdout
# 0.7.3
formatter/\*: allow configuration of timestamp layout
# 0.7.2
formatter/text: Add configuration option for time format (#158)

View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Simon Eskildsen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@@ -0,0 +1,349 @@
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
many large deployments. The core API is unlikely to change much but please
version control your Logrus to make sure you aren't fetching latest `master` on
every build.**
Nicely color-coded in development (when a TTY is attached, otherwise just
plain text):
![Colored](http://i.imgur.com/PY7qMwd.png)
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
or Splunk:
```json
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
{"level":"warning","msg":"The group's number increased tremendously!",
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
```
With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
attached, the output is compatible with the
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
```text
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
exit status 1
```
#### Example
The simplest way to use Logrus is simply the package-level exported logger:
```go
package main
import (
log "github.com/Sirupsen/logrus"
)
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
}).Info("A walrus appears")
}
```
Note that it's completely api-compatible with the stdlib logger, so you can
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
and you'll now have the flexibility of Logrus. You can customize it all you
want:
```go
package main
import (
"os"
log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
)
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
// Use the Airbrake hook to report errors that have Error severity or above to
// an exception tracker. You can create custom hooks, see the Hooks section.
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
// Output to stderr instead of stdout, could also be a file.
log.SetOutput(os.Stderr)
// Only log the warning severity or above.
log.SetLevel(log.WarnLevel)
}
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(log.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(log.Fields{
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
// A common pattern is to re-use fields between logging statements by re-using
// the logrus.Entry returned from WithFields()
contextLogger := log.WithFields(log.Fields{
"common": "this is a common field",
"other": "I also should be logged always",
})
contextLogger.Info("I'll be logged with common and other field")
contextLogger.Info("Me too")
}
```
For more advanced usage such as logging to multiple locations from the same
application, you can also create an instance of the `logrus` Logger:
```go
package main
import (
"github.com/Sirupsen/logrus"
)
// Create a new instance of the logger. You can have any number of instances.
var log = logrus.New()
func main() {
// The API for setting attributes is a little different than the package level
// exported logger. See Godoc.
log.Out = os.Stderr
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
}
```
#### Fields
Logrus encourages careful, structured logging though logging fields instead of
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
to send event %s to topic %s with key %d")`, you should log the much more
discoverable:
```go
log.WithFields(log.Fields{
"event": event,
"topic": topic,
"key": key,
}).Fatal("Failed to send event")
```
We've found this API forces you to think about logging in a way that produces
much more useful logging messages. We've been in countless situations where just
a single added field to a log statement that was already there would've saved us
hours. The `WithFields` call is optional.
In general, with Logrus using any of the `printf`-family functions should be
seen as a hint you should add a field, however, you can still use the
`printf`-family functions with Logrus.
#### Hooks
You can add hooks for logging levels. For example to send errors to an exception
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
multiple places simultaneously, e.g. syslog.
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
`init`:
```go
import (
log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
"github.com/Sirupsen/logrus/hooks/syslog"
"log/syslog"
)
func init() {
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
log.Error("Unable to connect to local syslog daemon")
} else {
log.AddHook(hook)
}
}
```
| Hook | Description |
| ----- | ----------- |
| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. |
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
#### Level logging
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
```go
log.Debug("Useful debugging information.")
log.Info("Something noteworthy happened!")
log.Warn("You should probably take a look at this.")
log.Error("Something failed but I'm not quitting.")
// Calls os.Exit(1) after logging
log.Fatal("Bye.")
// Calls panic() after logging
log.Panic("I'm bailing.")
```
You can set the logging level on a `Logger`, then it will only log entries with
that severity or anything above it:
```go
// Will log anything that is info or above (warn, error, fatal, panic). Default.
log.SetLevel(log.InfoLevel)
```
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
environment if your application has that.
#### Entries
Besides the fields added with `WithField` or `WithFields` some fields are
automatically added to all logging events:
1. `time`. The timestamp when the entry was created.
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
the `AddFields` call. E.g. `Failed to send event.`
3. `level`. The logging level. E.g. `info`.
#### Environments
Logrus has no notion of environment.
If you wish for hooks and formatters to only be used in specific environments,
you should handle that yourself. For example, if your application has a global
variable `Environment`, which is a string representation of the environment you
could do:
```go
import (
log "github.com/Sirupsen/logrus"
)
init() {
// do something here to set environment depending on an environment variable
// or command-line flag
if Environment == "production" {
log.SetFormatter(logrus.JSONFormatter)
} else {
// The TextFormatter is default, you don't actually have to do this.
log.SetFormatter(logrus.TextFormatter)
}
}
```
This configuration is how `logrus` was intended to be used, but JSON in
production is mostly only useful if you do log aggregation with tools like
Splunk or Logstash.
#### Formatters
The built-in logging formatters are:
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
without colors.
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`
* `logrus.JSONFormatter`. Logs fields as JSON.
* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net).
```go
logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"})
```
Third party logging formatters:
* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
`Fields` type (`map[string]interface{}`) with all your fields as well as the
default ones (see Entries section above):
```go
type MyJSONFormatter struct {
}
log.SetFormatter(new(MyJSONFormatter))
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
// Note this doesn't include Time, Level and Message which are available on
// the Entry. Consult `godoc` on information about those fields or read the
// source of the official loggers.
serialized, err := json.Marshal(entry.Data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}
```
#### Logger as an `io.Writer`
Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
```go
w := logger.Writer()
defer w.Close()
srv := http.Server{
// create a stdlib log.Logger that writes to
// logrus.Logger.
ErrorLog: log.New(w, "", 0),
}
```
Each line written to that writer will be printed the usual way, using formatters
and hooks. The level for those entries is `info`.
#### Rotation
Log rotation is not provided with Logrus. Log rotation should be done by an
external program (like `logrotate(8)`) that can compress and delete old log
entries. It should not be a feature of the application-level logger.
[godoc]: https://godoc.org/github.com/Sirupsen/logrus

View File

@@ -0,0 +1,252 @@
package logrus
import (
"bytes"
"fmt"
"io"
"os"
"time"
)
// An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
// passed around as much as you wish to avoid field duplication.
type Entry struct {
Logger *Logger
// Contains all the fields set by the user.
Data Fields
// Time at which the log entry was created
Time time.Time
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
Level Level
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string
}
func NewEntry(logger *Logger) *Entry {
return &Entry{
Logger: logger,
// Default is three fields, give a little extra room
Data: make(Fields, 5),
}
}
// Returns a reader for the entry, which is a proxy to the formatter.
func (entry *Entry) Reader() (*bytes.Buffer, error) {
serialized, err := entry.Logger.Formatter.Format(entry)
return bytes.NewBuffer(serialized), err
}
// Returns the string representation from the reader and ultimately the
// formatter.
func (entry *Entry) String() (string, error) {
reader, err := entry.Reader()
if err != nil {
return "", err
}
return reader.String(), err
}
// Add a single field to the Entry.
func (entry *Entry) WithField(key string, value interface{}) *Entry {
return entry.WithFields(Fields{key: value})
}
// Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry {
data := Fields{}
for k, v := range entry.Data {
data[k] = v
}
for k, v := range fields {
data[k] = v
}
return &Entry{Logger: entry.Logger, Data: data}
}
func (entry *Entry) log(level Level, msg string) {
entry.Time = time.Now()
entry.Level = level
entry.Message = msg
if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
}
reader, err := entry.Reader()
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock()
}
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
_, err = io.Copy(entry.Logger.Out, reader)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
panic(entry)
}
}
func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.log(DebugLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Print(args ...interface{}) {
entry.Info(args...)
}
func (entry *Entry) Info(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.log(InfoLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warn(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.log(WarnLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warning(args ...interface{}) {
entry.Warn(args...)
}
func (entry *Entry) Error(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.log(ErrorLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...))
}
os.Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.log(PanicLevel, fmt.Sprint(args...))
}
panic(fmt.Sprint(args...))
}
// Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.Debug(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Infof(format string, args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.Info(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Printf(format string, args ...interface{}) {
entry.Infof(format, args...)
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.Warn(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Warningf(format string, args ...interface{}) {
entry.Warnf(format, args...)
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.Error(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.Panic(fmt.Sprintf(format, args...))
}
}
// Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.Debug(entry.sprintlnn(args...))
}
}
func (entry *Entry) Infoln(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.Info(entry.sprintlnn(args...))
}
}
func (entry *Entry) Println(args ...interface{}) {
entry.Infoln(args...)
}
func (entry *Entry) Warnln(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.Warn(entry.sprintlnn(args...))
}
}
func (entry *Entry) Warningln(args ...interface{}) {
entry.Warnln(args...)
}
func (entry *Entry) Errorln(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.Error(entry.sprintlnn(args...))
}
}
func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
}
func (entry *Entry) Panicln(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.Panic(entry.sprintlnn(args...))
}
}
// Sprintlnn => Sprint no newline. This is to get the behavior of how
// fmt.Sprintln where spaces are always added between operands, regardless of
// their type. Instead of vendoring the Sprintln implementation to spare a
// string allocation, we do the simplest thing.
func (entry *Entry) sprintlnn(args ...interface{}) string {
msg := fmt.Sprintln(args...)
return msg[:len(msg)-1]
}

View File

@@ -0,0 +1,53 @@
package logrus
import (
"bytes"
"fmt"
"testing"
"github.com/fsouza/go-dockerclient/vendor/github.com/stretchr/testify/assert"
)
func TestEntryPanicln(t *testing.T) {
errBoom := fmt.Errorf("boom time")
defer func() {
p := recover()
assert.NotNil(t, p)
switch pVal := p.(type) {
case *Entry:
assert.Equal(t, "kaboom", pVal.Message)
assert.Equal(t, errBoom, pVal.Data["err"])
default:
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
}
}()
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
entry.WithField("err", errBoom).Panicln("kaboom")
}
func TestEntryPanicf(t *testing.T) {
errBoom := fmt.Errorf("boom again")
defer func() {
p := recover()
assert.NotNil(t, p)
switch pVal := p.(type) {
case *Entry:
assert.Equal(t, "kaboom true", pVal.Message)
assert.Equal(t, errBoom, pVal.Data["err"])
default:
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
}
}()
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
entry.WithField("err", errBoom).Panicf("kaboom %v", true)
}

View File

@@ -0,0 +1,188 @@
package logrus
import (
"io"
)
var (
// std is the name of the standard logger in stdlib `log`
std = New()
)
func StandardLogger() *Logger {
return std
}
// SetOutput sets the standard logger output.
func SetOutput(out io.Writer) {
std.mu.Lock()
defer std.mu.Unlock()
std.Out = out
}
// SetFormatter sets the standard logger formatter.
func SetFormatter(formatter Formatter) {
std.mu.Lock()
defer std.mu.Unlock()
std.Formatter = formatter
}
// SetLevel sets the standard logger level.
func SetLevel(level Level) {
std.mu.Lock()
defer std.mu.Unlock()
std.Level = level
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
std.mu.Lock()
defer std.mu.Unlock()
return std.Level
}
// AddHook adds a hook to the standard logger hooks.
func AddHook(hook Hook) {
std.mu.Lock()
defer std.mu.Unlock()
std.Hooks.Add(hook)
}
// WithField creates an entry from the standard logger and adds a field to
// it. If you want multiple fields, use `WithFields`.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithField(key string, value interface{}) *Entry {
return std.WithField(key, value)
}
// WithFields creates an entry from the standard logger and adds multiple
// fields to it. This is simply a helper for `WithField`, invoking it
// once for each field.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithFields(fields Fields) *Entry {
return std.WithFields(fields)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
std.Debug(args...)
}
// Print logs a message at level Info on the standard logger.
func Print(args ...interface{}) {
std.Print(args...)
}
// Info logs a message at level Info on the standard logger.
func Info(args ...interface{}) {
std.Info(args...)
}
// Warn logs a message at level Warn on the standard logger.
func Warn(args ...interface{}) {
std.Warn(args...)
}
// Warning logs a message at level Warn on the standard logger.
func Warning(args ...interface{}) {
std.Warning(args...)
}
// Error logs a message at level Error on the standard logger.
func Error(args ...interface{}) {
std.Error(args...)
}
// Panic logs a message at level Panic on the standard logger.
func Panic(args ...interface{}) {
std.Panic(args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func Fatal(args ...interface{}) {
std.Fatal(args...)
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
std.Debugf(format, args...)
}
// Printf logs a message at level Info on the standard logger.
func Printf(format string, args ...interface{}) {
std.Printf(format, args...)
}
// Infof logs a message at level Info on the standard logger.
func Infof(format string, args ...interface{}) {
std.Infof(format, args...)
}
// Warnf logs a message at level Warn on the standard logger.
func Warnf(format string, args ...interface{}) {
std.Warnf(format, args...)
}
// Warningf logs a message at level Warn on the standard logger.
func Warningf(format string, args ...interface{}) {
std.Warningf(format, args...)
}
// Errorf logs a message at level Error on the standard logger.
func Errorf(format string, args ...interface{}) {
std.Errorf(format, args...)
}
// Panicf logs a message at level Panic on the standard logger.
func Panicf(format string, args ...interface{}) {
std.Panicf(format, args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...)
}
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
std.Debugln(args...)
}
// Println logs a message at level Info on the standard logger.
func Println(args ...interface{}) {
std.Println(args...)
}
// Infoln logs a message at level Info on the standard logger.
func Infoln(args ...interface{}) {
std.Infoln(args...)
}
// Warnln logs a message at level Warn on the standard logger.
func Warnln(args ...interface{}) {
std.Warnln(args...)
}
// Warningln logs a message at level Warn on the standard logger.
func Warningln(args ...interface{}) {
std.Warningln(args...)
}
// Errorln logs a message at level Error on the standard logger.
func Errorln(args ...interface{}) {
std.Errorln(args...)
}
// Panicln logs a message at level Panic on the standard logger.
func Panicln(args ...interface{}) {
std.Panicln(args...)
}
// Fatalln logs a message at level Fatal on the standard logger.
func Fatalln(args ...interface{}) {
std.Fatalln(args...)
}

View File

@@ -0,0 +1,48 @@
package logrus
import "time"
const DefaultTimestampFormat = time.RFC3339
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
//
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
// * `entry.Data["time"]`. The timestamp.
// * `entry.Data["level"]. The level the entry was logged at.
//
// Any additional fields added with `WithField` or `WithFields` are also in
// `entry.Data`. Format is expected to return an array of bytes which are then
// logged to `logger.Out`.
type Formatter interface {
Format(*Entry) ([]byte, error)
}
// This is to not silently overwrite `time`, `msg` and `level` fields when
// dumping it. If this code wasn't there doing:
//
// logrus.WithField("level", 1).Info("hello")
//
// Would just silently drop the user provided level. Instead with this code
// it'll logged as:
//
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
//
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) {
_, ok := data["time"]
if ok {
data["fields.time"] = data["time"]
}
_, ok = data["msg"]
if ok {
data["fields.msg"] = data["msg"]
}
_, ok = data["level"]
if ok {
data["fields.level"] = data["level"]
}
}

View File

@@ -0,0 +1,88 @@
package logrus
import (
"testing"
"time"
)
// smallFields is a small size data set for benchmarking
var smallFields = Fields{
"foo": "bar",
"baz": "qux",
"one": "two",
"three": "four",
}
// largeFields is a large size data set for benchmarking
var largeFields = Fields{
"foo": "bar",
"baz": "qux",
"one": "two",
"three": "four",
"five": "six",
"seven": "eight",
"nine": "ten",
"eleven": "twelve",
"thirteen": "fourteen",
"fifteen": "sixteen",
"seventeen": "eighteen",
"nineteen": "twenty",
"a": "b",
"c": "d",
"e": "f",
"g": "h",
"i": "j",
"k": "l",
"m": "n",
"o": "p",
"q": "r",
"s": "t",
"u": "v",
"w": "x",
"y": "z",
"this": "will",
"make": "thirty",
"entries": "yeah",
}
func BenchmarkSmallTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
}
func BenchmarkLargeTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
}
func BenchmarkSmallColoredTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
}
func BenchmarkLargeColoredTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
}
func BenchmarkSmallJSONFormatter(b *testing.B) {
doBenchmark(b, &JSONFormatter{}, smallFields)
}
func BenchmarkLargeJSONFormatter(b *testing.B) {
doBenchmark(b, &JSONFormatter{}, largeFields)
}
func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
entry := &Entry{
Time: time.Time{},
Level: InfoLevel,
Message: "message",
Data: fields,
}
var d []byte
var err error
for i := 0; i < b.N; i++ {
d, err = formatter.Format(entry)
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(len(d)))
}
}

View File

@@ -0,0 +1,122 @@
package logrus
import (
"testing"
"github.com/fsouza/go-dockerclient/vendor/github.com/stretchr/testify/assert"
)
type TestHook struct {
Fired bool
}
func (hook *TestHook) Fire(entry *Entry) error {
hook.Fired = true
return nil
}
func (hook *TestHook) Levels() []Level {
return []Level{
DebugLevel,
InfoLevel,
WarnLevel,
ErrorLevel,
FatalLevel,
PanicLevel,
}
}
func TestHookFires(t *testing.T) {
hook := new(TestHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
assert.Equal(t, hook.Fired, false)
log.Print("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, true)
})
}
type ModifyHook struct {
}
func (hook *ModifyHook) Fire(entry *Entry) error {
entry.Data["wow"] = "whale"
return nil
}
func (hook *ModifyHook) Levels() []Level {
return []Level{
DebugLevel,
InfoLevel,
WarnLevel,
ErrorLevel,
FatalLevel,
PanicLevel,
}
}
func TestHookCanModifyEntry(t *testing.T) {
hook := new(ModifyHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.WithField("wow", "elephant").Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["wow"], "whale")
})
}
func TestCanFireMultipleHooks(t *testing.T) {
hook1 := new(ModifyHook)
hook2 := new(TestHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook1)
log.Hooks.Add(hook2)
log.WithField("wow", "elephant").Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["wow"], "whale")
assert.Equal(t, hook2.Fired, true)
})
}
type ErrorHook struct {
Fired bool
}
func (hook *ErrorHook) Fire(entry *Entry) error {
hook.Fired = true
return nil
}
func (hook *ErrorHook) Levels() []Level {
return []Level{
ErrorLevel,
}
}
func TestErrorHookShouldntFireOnInfo(t *testing.T) {
hook := new(ErrorHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.Info("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, false)
})
}
func TestErrorHookShouldFireOnError(t *testing.T) {
hook := new(ErrorHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.Error("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, true)
})
}

View File

@@ -0,0 +1,34 @@
package logrus
// A hook to be fired when logging on the logging levels returned from
// `Levels()` on your implementation of the interface. Note that this is not
// fired in a goroutine or a channel with workers, you should handle such
// functionality yourself if your call is non-blocking and you don't wish for
// the logging calls for levels returned from `Levels()` to block.
type Hook interface {
Levels() []Level
Fire(*Entry) error
}
// Internal type for storing the hooks on a logger instance.
type levelHooks map[Level][]Hook
// Add a hook to an instance of logger. This is called with
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
func (hooks levelHooks) Add(hook Hook) {
for _, level := range hook.Levels() {
hooks[level] = append(hooks[level], hook)
}
}
// Fire all the hooks for the passed level. Used by `entry.log` to fire
// appropriate hooks for a log entry.
func (hooks levelHooks) Fire(level Level, entry *Entry) error {
for _, hook := range hooks[level] {
if err := hook.Fire(entry); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,40 @@
package logrus
import (
"encoding/json"
"fmt"
)
type JSONFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps.
TimestampFormat string
}
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
for k, v := range entry.Data {
switch v := v.(type) {
case error:
// Otherwise errors are ignored by `encoding/json`
// https://github.com/Sirupsen/logrus/issues/137
data[k] = v.Error()
default:
data[k] = v
}
}
prefixFieldClashes(data)
if f.TimestampFormat == "" {
f.TimestampFormat = DefaultTimestampFormat
}
data["time"] = entry.Time.Format(f.TimestampFormat)
data["msg"] = entry.Message
data["level"] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}

View File

@@ -0,0 +1,120 @@
package logrus
import (
"encoding/json"
"errors"
"testing"
)
func TestErrorNotLost(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["error"] != "wild walrus" {
t.Fatal("Error field not set")
}
}
func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["omg"] != "wild walrus" {
t.Fatal("Error field not set")
}
}
func TestFieldClashWithTime(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("time", "right now!"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["fields.time"] != "right now!" {
t.Fatal("fields.time not set to original time field")
}
if entry["time"] != "0001-01-01T00:00:00Z" {
t.Fatal("time field not set to current time, was: ", entry["time"])
}
}
func TestFieldClashWithMsg(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("msg", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["fields.msg"] != "something" {
t.Fatal("fields.msg not set to original msg field")
}
}
func TestFieldClashWithLevel(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["fields.level"] != "something" {
t.Fatal("fields.level not set to original level field")
}
}
func TestJSONEntryEndsWithNewline(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
if b[len(b)-1] != '\n' {
t.Fatal("Expected JSON log entry to end with a newline")
}
}

View File

@@ -0,0 +1,203 @@
package logrus
import (
"io"
"os"
"sync"
)
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stdout`. You can also set this to
// something more adventorous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking
// service, log to StatsD or dump the core on fatal errors.
Hooks levelHooks
// All log entries pass through the formatter before logged to Out. The
// included formatters are `TextFormatter` and `JSONFormatter` for which
// TextFormatter is the default. In development (when a TTY is attached) it
// logs with colors, but to a file it wouldn't. You can easily implement your
// own that implements the `Formatter` interface, see the `README` or included
// formatters for examples.
Formatter Formatter
// The logging level the logger should log at. This is typically (and defaults
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged. `logrus.Debug` is useful in
Level Level
// Used to sync writing to the log.
mu sync.Mutex
}
// Creates a new logger. Configuration should be set by changing `Formatter`,
// `Out` and `Hooks` directly on the default logger instance. You can also just
// instantiate your own:
//
// var log = &Logger{
// Out: os.Stderr,
// Formatter: new(JSONFormatter),
// Hooks: make(levelHooks),
// Level: logrus.DebugLevel,
// }
//
// It's recommended to make this a global instance called `log`.
func New() *Logger {
return &Logger{
Out: os.Stderr,
Formatter: new(TextFormatter),
Hooks: make(levelHooks),
Level: InfoLevel,
}
}
// Adds a field to the log entry, note that you it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// Ff you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
return NewEntry(logger).WithField(key, value)
}
// Adds a struct of fields to the log entry. All it does is call `WithField` for
// each `Field`.
func (logger *Logger) WithFields(fields Fields) *Entry {
return NewEntry(logger).WithFields(fields)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debugf(format, args...)
}
}
func (logger *Logger) Infof(format string, args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Infof(format, args...)
}
}
func (logger *Logger) Printf(format string, args ...interface{}) {
NewEntry(logger).Printf(format, args...)
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnf(format, args...)
}
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnf(format, args...)
}
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Errorf(format, args...)
}
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalf(format, args...)
}
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panicf(format, args...)
}
}
func (logger *Logger) Debug(args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debug(args...)
}
}
func (logger *Logger) Info(args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Info(args...)
}
}
func (logger *Logger) Print(args ...interface{}) {
NewEntry(logger).Info(args...)
}
func (logger *Logger) Warn(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warn(args...)
}
}
func (logger *Logger) Warning(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warn(args...)
}
}
func (logger *Logger) Error(args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Error(args...)
}
}
func (logger *Logger) Fatal(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatal(args...)
}
}
func (logger *Logger) Panic(args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panic(args...)
}
}
func (logger *Logger) Debugln(args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debugln(args...)
}
}
func (logger *Logger) Infoln(args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Infoln(args...)
}
}
func (logger *Logger) Println(args ...interface{}) {
NewEntry(logger).Println(args...)
}
func (logger *Logger) Warnln(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnln(args...)
}
}
func (logger *Logger) Warningln(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnln(args...)
}
}
func (logger *Logger) Errorln(args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Errorln(args...)
}
}
func (logger *Logger) Fatalln(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalln(args...)
}
}
func (logger *Logger) Panicln(args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panicln(args...)
}
}

View File

@@ -0,0 +1,94 @@
package logrus
import (
"fmt"
"log"
)
// Fields type, used to pass to `WithFields`.
type Fields map[string]interface{}
// Level type
type Level uint8
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string {
switch level {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarnLevel:
return "warning"
case ErrorLevel:
return "error"
case FatalLevel:
return "fatal"
case PanicLevel:
return "panic"
}
return "unknown"
}
// ParseLevel takes a string level and returns the Logrus log level constant.
func ParseLevel(lvl string) (Level, error) {
switch lvl {
case "panic":
return PanicLevel, nil
case "fatal":
return FatalLevel, nil
case "error":
return ErrorLevel, nil
case "warn", "warning":
return WarnLevel, nil
case "info":
return InfoLevel, nil
case "debug":
return DebugLevel, nil
}
var l Level
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
// These are the different logging levels. You can set the logging level to log
// on your instance of logger, obtained with `logrus.New()`.
const (
// PanicLevel level, highest level of severity. Logs and then calls panic with the
// message passed to Debug, Info, ...
PanicLevel Level = iota
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
// logging level is set to Panic.
FatalLevel
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
// Commonly used for hooks to send errors to an error tracking service.
ErrorLevel
// WarnLevel level. Non-critical entries that deserve eyes.
WarnLevel
// InfoLevel level. General operational entries about what's going on inside the
// application.
InfoLevel
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
DebugLevel
)
// Won't compile if StdLogger can't be realized by a log.Logger
var _ StdLogger = &log.Logger{}
// StdLogger is what your logrus-enabled library should take, that way
// it'll accept a stdlib logger and a logrus logger. There's no standard
// interface, this is the closest we get, unfortunately.
type StdLogger interface {
Print(...interface{})
Printf(string, ...interface{})
Println(...interface{})
Fatal(...interface{})
Fatalf(string, ...interface{})
Fatalln(...interface{})
Panic(...interface{})
Panicf(string, ...interface{})
Panicln(...interface{})
}

View File

@@ -0,0 +1,301 @@
package logrus
import (
"bytes"
"encoding/json"
"strconv"
"strings"
"sync"
"testing"
"github.com/fsouza/go-dockerclient/vendor/github.com/stretchr/testify/assert"
)
func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
log(logger)
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
assertions(fields)
}
func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
var buffer bytes.Buffer
logger := New()
logger.Out = &buffer
logger.Formatter = &TextFormatter{
DisableColors: true,
}
log(logger)
fields := make(map[string]string)
for _, kv := range strings.Split(buffer.String(), " ") {
if !strings.Contains(kv, "=") {
continue
}
kvArr := strings.Split(kv, "=")
key := strings.TrimSpace(kvArr[0])
val := kvArr[1]
if kvArr[1][0] == '"' {
var err error
val, err = strconv.Unquote(val)
assert.NoError(t, err)
}
fields[key] = val
}
assertions(fields)
}
func TestPrint(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "info")
})
}
func TestInfo(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "info")
})
}
func TestWarn(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Warn("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "warning")
})
}
func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln("test", "test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test test")
})
}
func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln("test", 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test 10")
})
}
func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln(10, 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "10 10")
})
}
func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln(10, 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "10 10")
})
}
func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test", 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test10")
})
}
func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test", "test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "testtest")
})
}
func TestWithFieldsShouldAllowAssignments(t *testing.T) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
localLog := logger.WithFields(Fields{
"key1": "value1",
})
localLog.WithField("key2", "value2").Info("test")
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
assert.Equal(t, "value2", fields["key2"])
assert.Equal(t, "value1", fields["key1"])
buffer = bytes.Buffer{}
fields = Fields{}
localLog.Info("test")
err = json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
_, ok := fields["key2"]
assert.Equal(t, false, ok)
assert.Equal(t, "value1", fields["key1"])
}
func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("msg", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
})
}
func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("msg", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["fields.msg"], "hello")
})
}
func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("time", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["fields.time"], "hello")
})
}
func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("level", 1).Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["level"], "info")
assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
})
}
func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
LogAndAssertText(t, func(log *Logger) {
ll := log.WithField("herp", "derp")
ll.Info("hello")
ll.Info("bye")
}, func(fields map[string]string) {
for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
if _, ok := fields[fieldName]; ok {
t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
}
}
})
}
func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
llog := logger.WithField("context", "eating raw fish")
llog.Info("looks delicious")
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.NoError(t, err, "should have decoded first message")
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
assert.Equal(t, fields["msg"], "looks delicious")
assert.Equal(t, fields["context"], "eating raw fish")
buffer.Reset()
llog.Warn("omg it is!")
err = json.Unmarshal(buffer.Bytes(), &fields)
assert.NoError(t, err, "should have decoded second message")
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
assert.Equal(t, fields["msg"], "omg it is!")
assert.Equal(t, fields["context"], "eating raw fish")
assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
}
func TestConvertLevelToString(t *testing.T) {
assert.Equal(t, "debug", DebugLevel.String())
assert.Equal(t, "info", InfoLevel.String())
assert.Equal(t, "warning", WarnLevel.String())
assert.Equal(t, "error", ErrorLevel.String())
assert.Equal(t, "fatal", FatalLevel.String())
assert.Equal(t, "panic", PanicLevel.String())
}
func TestParseLevel(t *testing.T) {
l, err := ParseLevel("panic")
assert.Nil(t, err)
assert.Equal(t, PanicLevel, l)
l, err = ParseLevel("fatal")
assert.Nil(t, err)
assert.Equal(t, FatalLevel, l)
l, err = ParseLevel("error")
assert.Nil(t, err)
assert.Equal(t, ErrorLevel, l)
l, err = ParseLevel("warn")
assert.Nil(t, err)
assert.Equal(t, WarnLevel, l)
l, err = ParseLevel("warning")
assert.Nil(t, err)
assert.Equal(t, WarnLevel, l)
l, err = ParseLevel("info")
assert.Nil(t, err)
assert.Equal(t, InfoLevel, l)
l, err = ParseLevel("debug")
assert.Nil(t, err)
assert.Equal(t, DebugLevel, l)
l, err = ParseLevel("invalid")
assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
}
func TestGetSetLevelRace(t *testing.T) {
wg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
if i%2 == 0 {
SetLevel(InfoLevel)
} else {
GetLevel()
}
}(i)
}
wg.Wait()
}

View File

@@ -0,0 +1,12 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package logrus
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

View File

@@ -0,0 +1,20 @@
/*
Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
*/
package logrus
import (
"syscall"
)
const ioctlReadTermios = syscall.TIOCGETA
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]uint8
Ispeed uint32
Ospeed uint32
}

View File

@@ -0,0 +1,12 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package logrus
import "syscall"
const ioctlReadTermios = syscall.TCGETS
type Termios syscall.Termios

View File

@@ -0,0 +1,21 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd
package logrus
import (
"syscall"
"unsafe"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stdout
var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
}

View File

@@ -0,0 +1,7 @@
package logrus
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

View File

@@ -0,0 +1,27 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package logrus
import (
"syscall"
"unsafe"
)
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stdout
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
}

View File

@@ -0,0 +1,149 @@
package logrus
import (
"bytes"
"fmt"
"sort"
"strings"
"time"
)
const (
nocolor = 0
red = 31
green = 32
yellow = 33
blue = 34
gray = 37
)
var (
baseTimestamp time.Time
isTerminal bool
)
func init() {
baseTimestamp = time.Now()
isTerminal = IsTerminal()
}
func miniTS() int {
return int(time.Since(baseTimestamp) / time.Second)
}
type TextFormatter struct {
// Set to true to bypass checking for a TTY before outputting colors.
ForceColors bool
// Force disabling colors.
DisableColors bool
// Disable timestamp logging. useful when output is redirected to logging
// system that already adds timestamps.
DisableTimestamp bool
// Enable logging the full timestamp when a TTY is attached instead of just
// the time passed since beginning of execution.
FullTimestamp bool
// TimestampFormat to use for display when a full timestamp is printed
TimestampFormat string
// The fields are sorted by default for a consistent output. For applications
// that log extremely frequently and don't use the JSON formatter this may not
// be desired.
DisableSorting bool
}
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var keys []string = make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
if !f.DisableSorting {
sort.Strings(keys)
}
b := &bytes.Buffer{}
prefixFieldClashes(entry.Data)
isColored := (f.ForceColors || isTerminal) && !f.DisableColors
if f.TimestampFormat == "" {
f.TimestampFormat = DefaultTimestampFormat
}
if isColored {
f.printColored(b, entry, keys)
} else {
if !f.DisableTimestamp {
f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat))
}
f.appendKeyValue(b, "level", entry.Level.String())
f.appendKeyValue(b, "msg", entry.Message)
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
}
}
b.WriteByte('\n')
return b.Bytes(), nil
}
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) {
var levelColor int
switch entry.Level {
case DebugLevel:
levelColor = gray
case WarnLevel:
levelColor = yellow
case ErrorLevel, FatalLevel, PanicLevel:
levelColor = red
default:
levelColor = blue
}
levelText := strings.ToUpper(entry.Level.String())[0:4]
if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
} else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
}
}
func needsQuoting(text string) bool {
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.') {
return false
}
}
return true
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
switch value.(type) {
case string:
if needsQuoting(value.(string)) {
fmt.Fprintf(b, "%v=%s ", key, value)
} else {
fmt.Fprintf(b, "%v=%q ", key, value)
}
case error:
if needsQuoting(value.(error).Error()) {
fmt.Fprintf(b, "%v=%s ", key, value)
} else {
fmt.Fprintf(b, "%v=%q ", key, value)
}
default:
fmt.Fprintf(b, "%v=%v ", key, value)
}
}

View File

@@ -0,0 +1,61 @@
package logrus
import (
"bytes"
"errors"
"testing"
"time"
)
func TestQuoting(t *testing.T) {
tf := &TextFormatter{DisableColors: true}
checkQuoting := func(q bool, value interface{}) {
b, _ := tf.Format(WithField("test", value))
idx := bytes.Index(b, ([]byte)("test="))
cont := bytes.Contains(b[idx+5:], []byte{'"'})
if cont != q {
if q {
t.Errorf("quoting expected for: %#v", value)
} else {
t.Errorf("quoting not expected for: %#v", value)
}
}
}
checkQuoting(false, "abcd")
checkQuoting(false, "v1.0")
checkQuoting(false, "1234567890")
checkQuoting(true, "/foobar")
checkQuoting(true, "x y")
checkQuoting(true, "x,y")
checkQuoting(false, errors.New("invalid"))
checkQuoting(true, errors.New("invalid argument"))
}
func TestTimestampFormat(t *testing.T) {
checkTimeStr := func(format string) {
customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format}
customStr, _ := customFormatter.Format(WithField("test", "test"))
timeStart := bytes.Index(customStr, ([]byte)("time="))
timeEnd := bytes.Index(customStr, ([]byte)("level="))
timeStr := customStr[timeStart+5 : timeEnd-1]
if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' {
timeStr = timeStr[1 : len(timeStr)-1]
}
if format == "" {
format = time.RFC3339
}
_, e := time.Parse(format, (string)(timeStr))
if e != nil {
t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e)
}
}
checkTimeStr("2006-01-02T15:04:05.000000000Z07:00")
checkTimeStr("Mon Jan _2 15:04:05 2006")
checkTimeStr("")
}
// TODO add tests for sorting etc., this requires a parser for the text
// formatter output.

View File

@@ -0,0 +1,31 @@
package logrus
import (
"bufio"
"io"
"runtime"
)
func (logger *Logger) Writer() *io.PipeWriter {
reader, writer := io.Pipe()
go logger.writerScanner(reader)
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
func (logger *Logger) writerScanner(reader *io.PipeReader) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
logger.Print(scanner.Text())
}
if err := scanner.Err(); err != nil {
logger.Errorf("Error while reading from Writer: %s", err)
}
reader.Close()
}
func writerFinalizer(writer *io.PipeWriter) {
writer.Close()
}

View File

@@ -0,0 +1 @@
This code provides helper functions for dealing with archive files.

View File

@@ -0,0 +1,834 @@
package archive
import (
"archive/tar"
"bufio"
"bytes"
"compress/bzip2"
"compress/gzip"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"syscall"
"github.com/fsouza/go-dockerclient/vendor/github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/fileutils"
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/pools"
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/promise"
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/system"
)
type (
Archive io.ReadCloser
ArchiveReader io.Reader
Compression int
TarOptions struct {
IncludeFiles []string
ExcludePatterns []string
Compression Compression
NoLchown bool
Name string
}
// Archiver allows the reuse of most utility functions of this package
// with a pluggable Untar function.
Archiver struct {
Untar func(io.Reader, string, *TarOptions) error
}
// breakoutError is used to differentiate errors related to breaking out
// When testing archive breakout in the unit tests, this error is expected
// in order for the test to pass.
breakoutError error
)
var (
ErrNotImplemented = errors.New("Function not implemented")
defaultArchiver = &Archiver{Untar}
)
const (
Uncompressed Compression = iota
Bzip2
Gzip
Xz
)
func IsArchive(header []byte) bool {
compression := DetectCompression(header)
if compression != Uncompressed {
return true
}
r := tar.NewReader(bytes.NewBuffer(header))
_, err := r.Next()
return err == nil
}
func DetectCompression(source []byte) Compression {
for compression, m := range map[Compression][]byte{
Bzip2: {0x42, 0x5A, 0x68},
Gzip: {0x1F, 0x8B, 0x08},
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
} {
if len(source) < len(m) {
logrus.Debugf("Len too short")
continue
}
if bytes.Compare(m, source[:len(m)]) == 0 {
return compression
}
}
return Uncompressed
}
func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
args := []string{"xz", "-d", "-c", "-q"}
return CmdStream(exec.Command(args[0], args[1:]...), archive)
}
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
p := pools.BufioReader32KPool
buf := p.Get(archive)
bs, err := buf.Peek(10)
if err != nil {
return nil, err
}
compression := DetectCompression(bs)
switch compression {
case Uncompressed:
readBufWrapper := p.NewReadCloserWrapper(buf, buf)
return readBufWrapper, nil
case Gzip:
gzReader, err := gzip.NewReader(buf)
if err != nil {
return nil, err
}
readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
return readBufWrapper, nil
case Bzip2:
bz2Reader := bzip2.NewReader(buf)
readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
return readBufWrapper, nil
case Xz:
xzReader, err := xzDecompress(buf)
if err != nil {
return nil, err
}
readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
return readBufWrapper, nil
default:
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
}
}
func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
p := pools.BufioWriter32KPool
buf := p.Get(dest)
switch compression {
case Uncompressed:
writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
return writeBufWrapper, nil
case Gzip:
gzWriter := gzip.NewWriter(dest)
writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
return writeBufWrapper, nil
case Bzip2, Xz:
// archive/bzip2 does not support writing, and there is no xz support at all
// However, this is not a problem as docker only currently generates gzipped tars
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
default:
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
}
}
func (compression *Compression) Extension() string {
switch *compression {
case Uncompressed:
return "tar"
case Bzip2:
return "tar.bz2"
case Gzip:
return "tar.gz"
case Xz:
return "tar.xz"
}
return ""
}
type tarAppender struct {
TarWriter *tar.Writer
Buffer *bufio.Writer
// for hardlink mapping
SeenFiles map[uint64]string
}
// canonicalTarName provides a platform-independent and consistent posix-style
//path for files and directories to be archived regardless of the platform.
func canonicalTarName(name string, isDir bool) (string, error) {
name, err := CanonicalTarNameForPath(name)
if err != nil {
return "", err
}
// suffix with '/' for directories
if isDir && !strings.HasSuffix(name, "/") {
name += "/"
}
return name, nil
}
func (ta *tarAppender) addTarFile(path, name string) error {
fi, err := os.Lstat(path)
if err != nil {
return err
}
link := ""
if fi.Mode()&os.ModeSymlink != 0 {
if link, err = os.Readlink(path); err != nil {
return err
}
}
hdr, err := tar.FileInfoHeader(fi, link)
if err != nil {
return err
}
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
name, err = canonicalTarName(name, fi.IsDir())
if err != nil {
return fmt.Errorf("tar: cannot canonicalize path: %v", err)
}
hdr.Name = name
nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
if err != nil {
return err
}
// if it's a regular file and has more than 1 link,
// it's hardlinked, so set the type flag accordingly
if fi.Mode().IsRegular() && nlink > 1 {
// a link should have a name that it links too
// and that linked name should be first in the tar archive
if oldpath, ok := ta.SeenFiles[inode]; ok {
hdr.Typeflag = tar.TypeLink
hdr.Linkname = oldpath
hdr.Size = 0 // This Must be here for the writer math to add up!
} else {
ta.SeenFiles[inode] = name
}
}
capability, _ := system.Lgetxattr(path, "security.capability")
if capability != nil {
hdr.Xattrs = make(map[string]string)
hdr.Xattrs["security.capability"] = string(capability)
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
return err
}
if hdr.Typeflag == tar.TypeReg {
file, err := os.Open(path)
if err != nil {
return err
}
ta.Buffer.Reset(ta.TarWriter)
defer ta.Buffer.Reset(nil)
_, err = io.Copy(ta.Buffer, file)
file.Close()
if err != nil {
return err
}
err = ta.Buffer.Flush()
if err != nil {
return err
}
}
return nil
}
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error {
// hdr.Mode is in linux format, which we can use for sycalls,
// but for os.Foo() calls we need the mode converted to os.FileMode,
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
hdrInfo := hdr.FileInfo()
switch hdr.Typeflag {
case tar.TypeDir:
// Create directory unless it exists as a directory already.
// In that case we just want to merge the two
if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
return err
}
}
case tar.TypeReg, tar.TypeRegA:
// Source is regular file
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
if err != nil {
return err
}
if _, err := io.Copy(file, reader); err != nil {
file.Close()
return err
}
file.Close()
case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
mode := uint32(hdr.Mode & 07777)
switch hdr.Typeflag {
case tar.TypeBlock:
mode |= syscall.S_IFBLK
case tar.TypeChar:
mode |= syscall.S_IFCHR
case tar.TypeFifo:
mode |= syscall.S_IFIFO
}
if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
return err
}
case tar.TypeLink:
targetPath := filepath.Join(extractDir, hdr.Linkname)
// check for hardlink breakout
if !strings.HasPrefix(targetPath, extractDir) {
return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
}
if err := os.Link(targetPath, path); err != nil {
return err
}
case tar.TypeSymlink:
// path -> hdr.Linkname = targetPath
// e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
// the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
// that symlink would first have to be created, which would be caught earlier, at this very check:
if !strings.HasPrefix(targetPath, extractDir) {
return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
}
if err := os.Symlink(hdr.Linkname, path); err != nil {
return err
}
case tar.TypeXGlobalHeader:
logrus.Debugf("PAX Global Extended Headers found and ignored")
return nil
default:
return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
}
if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown {
return err
}
for key, value := range hdr.Xattrs {
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
return err
}
}
// There is no LChmod, so ignore mode for symlink. Also, this
// must happen after chown, as that can modify the file mode
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
// syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
return err
}
} else {
if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
return err
}
}
return nil
}
// Tar creates an archive from the directory at `path`, and returns it as a
// stream of bytes.
func Tar(path string, compression Compression) (io.ReadCloser, error) {
return TarWithOptions(path, &TarOptions{Compression: compression})
}
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)
if err != nil {
return nil, err
}
pipeReader, pipeWriter := io.Pipe()
compressWriter, err := CompressStream(pipeWriter, options.Compression)
if err != nil {
return nil, err
}
go func() {
ta := &tarAppender{
TarWriter: tar.NewWriter(compressWriter),
Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string),
}
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
// In general we log errors here but ignore them because
// during e.g. a diff operation the container can continue
// mutating the filesystem and we can see transient errors
// from this
if options.IncludeFiles == nil {
options.IncludeFiles = []string{"."}
}
seen := make(map[string]bool)
var renamedRelFilePath string // For when tar.Options.Name is set
for _, include := range options.IncludeFiles {
filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
if err != nil {
logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
return nil
}
relFilePath, err := filepath.Rel(srcPath, filePath)
if err != nil || (relFilePath == "." && f.IsDir()) {
// Error getting relative path OR we are looking
// at the root path. Skip in both situations.
return nil
}
skip := false
// If "include" is an exact match for the current file
// then even if there's an "excludePatterns" pattern that
// matches it, don't skip it. IOW, assume an explicit 'include'
// is asking for that file no matter what - which is true
// for some files, like .dockerignore and Dockerfile (sometimes)
if include != relFilePath {
skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
if err != nil {
logrus.Debugf("Error matching %s", relFilePath, err)
return err
}
}
if skip {
if !exceptions && f.IsDir() {
return filepath.SkipDir
}
return nil
}
if seen[relFilePath] {
return nil
}
seen[relFilePath] = true
// Rename the base resource
if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) {
renamedRelFilePath = relFilePath
}
// Set this to make sure the items underneath also get renamed
if options.Name != "" {
relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1)
}
if err := ta.addTarFile(filePath, relFilePath); err != nil {
logrus.Debugf("Can't add file %s to tar: %s", filePath, err)
}
return nil
})
}
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Debugf("Can't close tar writer: %s", err)
}
if err := compressWriter.Close(); err != nil {
logrus.Debugf("Can't close compress writer: %s", err)
}
if err := pipeWriter.Close(); err != nil {
logrus.Debugf("Can't close pipe writer: %s", err)
}
}()
return pipeReader, nil
}
func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
tr := tar.NewReader(decompressedArchive)
trBuf := pools.BufioReader32KPool.Get(nil)
defer pools.BufioReader32KPool.Put(trBuf)
var dirs []*tar.Header
// Iterate through the files in the archive.
loop:
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return err
}
// Normalize name, for safety and for a simple is-root check
// This keeps "../" as-is, but normalizes "/../" to "/"
hdr.Name = filepath.Clean(hdr.Name)
for _, exclude := range options.ExcludePatterns {
if strings.HasPrefix(hdr.Name, exclude) {
continue loop
}
}
if !strings.HasSuffix(hdr.Name, "/") {
// Not the root directory, ensure that the parent directory exists
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = os.MkdirAll(parentPath, 0777)
if err != nil {
return err
}
}
}
path := filepath.Join(dest, hdr.Name)
rel, err := filepath.Rel(dest, path)
if err != nil {
return err
}
if strings.HasPrefix(rel, "../") {
return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
}
// If path exits we almost always just want to remove and replace it
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if fi.IsDir() && hdr.Name == "." {
continue
}
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return err
}
}
}
trBuf.Reset(tr)
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil {
return err
}
// Directory mtimes must be handled at the end to avoid further
// file creation in them to modify the directory mtime
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
}
for _, hdr := range dirs {
path := filepath.Join(dest, hdr.Name)
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
if err := syscall.UtimesNano(path, ts); err != nil {
return err
}
}
return nil
}
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive may be compressed with one of the following algorithms:
// identity (uncompressed), gzip, bzip2, xz.
// FIXME: specify behavior when target path exists vs. doesn't exist.
func Untar(archive io.Reader, dest string, options *TarOptions) error {
if archive == nil {
return fmt.Errorf("Empty archive")
}
dest = filepath.Clean(dest)
if options == nil {
options = &TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
decompressedArchive, err := DecompressStream(archive)
if err != nil {
return err
}
defer decompressedArchive.Close()
return Unpack(decompressedArchive, dest, options)
}
func (archiver *Archiver) TarUntar(src, dst string) error {
logrus.Debugf("TarUntar(%s %s)", src, dst)
archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
if err != nil {
return err
}
defer archive.Close()
return archiver.Untar(archive, dst, nil)
}
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
// If either Tar or Untar fails, TarUntar aborts and returns the error.
func TarUntar(src, dst string) error {
return defaultArchiver.TarUntar(src, dst)
}
func (archiver *Archiver) UntarPath(src, dst string) error {
archive, err := os.Open(src)
if err != nil {
return err
}
defer archive.Close()
if err := archiver.Untar(archive, dst, nil); err != nil {
return err
}
return nil
}
// UntarPath is a convenience function which looks for an archive
// at filesystem path `src`, and unpacks it at `dst`.
func UntarPath(src, dst string) error {
return defaultArchiver.UntarPath(src, dst)
}
func (archiver *Archiver) CopyWithTar(src, dst string) error {
srcSt, err := os.Stat(src)
if err != nil {
return err
}
if !srcSt.IsDir() {
return archiver.CopyFileWithTar(src, dst)
}
// Create dst, copy src's content into it
logrus.Debugf("Creating dest directory: %s", dst)
if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
return err
}
logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
return archiver.TarUntar(src, dst)
}
// CopyWithTar creates a tar archive of filesystem path `src`, and
// unpacks it at filesystem path `dst`.
// The archive is streamed directly with fixed buffering and no
// intermediary disk IO.
func CopyWithTar(src, dst string) error {
return defaultArchiver.CopyWithTar(src, dst)
}
func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
srcSt, err := os.Stat(src)
if err != nil {
return err
}
if srcSt.IsDir() {
return fmt.Errorf("Can't copy a directory")
}
// Clean up the trailing /
if dst[len(dst)-1] == '/' {
dst = path.Join(dst, filepath.Base(src))
}
// Create the holding directory if necessary
if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
return err
}
r, w := io.Pipe()
errC := promise.Go(func() error {
defer w.Close()
srcF, err := os.Open(src)
if err != nil {
return err
}
defer srcF.Close()
hdr, err := tar.FileInfoHeader(srcSt, "")
if err != nil {
return err
}
hdr.Name = filepath.Base(dst)
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
tw := tar.NewWriter(w)
defer tw.Close()
if err := tw.WriteHeader(hdr); err != nil {
return err
}
if _, err := io.Copy(tw, srcF); err != nil {
return err
}
return nil
})
defer func() {
if er := <-errC; err != nil {
err = er
}
}()
return archiver.Untar(r, filepath.Dir(dst), nil)
}
// CopyFileWithTar emulates the behavior of the 'cp' command-line
// for a single file. It copies a regular file from path `src` to
// path `dst`, and preserves all its metadata.
//
// If `dst` ends with a trailing slash '/', the final destination path
// will be `dst/base(src)`.
func CopyFileWithTar(src, dst string) (err error) {
return defaultArchiver.CopyFileWithTar(src, dst)
}
// CmdStream executes a command, and returns its stdout as a stream.
// If the command fails to run or doesn't complete successfully, an error
// will be returned, including anything written on stderr.
func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
if input != nil {
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, err
}
// Write stdin if any
go func() {
io.Copy(stdin, input)
stdin.Close()
}()
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, err
}
pipeR, pipeW := io.Pipe()
errChan := make(chan []byte)
// Collect stderr, we will use it in case of an error
go func() {
errText, e := ioutil.ReadAll(stderr)
if e != nil {
errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
}
errChan <- errText
}()
// Copy stdout to the returned pipe
go func() {
_, err := io.Copy(pipeW, stdout)
if err != nil {
pipeW.CloseWithError(err)
}
errText := <-errChan
if err := cmd.Wait(); err != nil {
pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
} else {
pipeW.Close()
}
}()
// Run the command and return the pipe
if err := cmd.Start(); err != nil {
return nil, err
}
return pipeR, nil
}
// NewTempArchive reads the content of src into a temporary file, and returns the contents
// of that file as an archive. The archive can only be read once - as soon as reading completes,
// the file will be deleted.
func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
f, err := ioutil.TempFile(dir, "")
if err != nil {
return nil, err
}
if _, err := io.Copy(f, src); err != nil {
return nil, err
}
if _, err := f.Seek(0, 0); err != nil {
return nil, err
}
st, err := f.Stat()
if err != nil {
return nil, err
}
size := st.Size()
return &TempArchive{File: f, Size: size}, nil
}
type TempArchive struct {
*os.File
Size int64 // Pre-computed from Stat().Size() as a convenience
read int64
closed bool
}
// Close closes the underlying file if it's still open, or does a no-op
// to allow callers to try to close the TempArchive multiple times safely.
func (archive *TempArchive) Close() error {
if archive.closed {
return nil
}
archive.closed = true
return archive.File.Close()
}
func (archive *TempArchive) Read(data []byte) (int, error) {
n, err := archive.File.Read(data)
archive.read += int64(n)
if err != nil || archive.read == archive.Size {
archive.Close()
os.Remove(archive.File.Name())
}
return n, err
}

View File

@@ -0,0 +1,53 @@
// +build !windows
package archive
import (
"archive/tar"
"errors"
"os"
"syscall"
)
// canonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) (string, error) {
return p, nil // already unix-style
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm // noop for unix as golang APIs provide perm bits correctly
}
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
err = errors.New("cannot convert stat value to syscall.Stat_t")
return
}
nlink = uint32(s.Nlink)
inode = uint64(s.Ino)
// Currently go does not fil in the major/minors
if s.Mode&syscall.S_IFBLK != 0 ||
s.Mode&syscall.S_IFCHR != 0 {
hdr.Devmajor = int64(major(uint64(s.Rdev)))
hdr.Devminor = int64(minor(uint64(s.Rdev)))
}
return
}
func major(device uint64) uint64 {
return (device >> 8) & 0xfff
}
func minor(device uint64) uint64 {
return (device & 0xff) | ((device >> 12) & 0xfff00)
}

View File

@@ -0,0 +1,60 @@
// +build !windows
package archive
import (
"os"
"testing"
)
func TestCanonicalTarNameForPath(t *testing.T) {
cases := []struct{ in, expected string }{
{"foo", "foo"},
{"foo/bar", "foo/bar"},
{"foo/dir/", "foo/dir/"},
}
for _, v := range cases {
if out, err := CanonicalTarNameForPath(v.in); err != nil {
t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
} else if out != v.expected {
t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
}
}
}
func TestCanonicalTarName(t *testing.T) {
cases := []struct {
in string
isDir bool
expected string
}{
{"foo", false, "foo"},
{"foo", true, "foo/"},
{"foo/bar", false, "foo/bar"},
{"foo/bar", true, "foo/bar/"},
}
for _, v := range cases {
if out, err := canonicalTarName(v.in, v.isDir); err != nil {
t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
} else if out != v.expected {
t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
}
}
}
func TestChmodTarEntry(t *testing.T) {
cases := []struct {
in, expected os.FileMode
}{
{0000, 0000},
{0777, 0777},
{0644, 0644},
{0755, 0755},
{0444, 0444},
}
for _, v := range cases {
if out := chmodTarEntry(v.in); out != v.expected {
t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out)
}
}
}

View File

@@ -0,0 +1,40 @@
// +build windows
package archive
import (
"archive/tar"
"fmt"
"os"
"strings"
)
// canonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) (string, error) {
// windows: convert windows style relative path with backslashes
// into forward slashes. since windows does not allow '/' or '\'
// in file names, it is mostly safe to replace however we must
// check just in case
if strings.Contains(p, "/") {
return "", fmt.Errorf("windows path contains forward slash: %s", p)
}
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
perm &= 0755
// Add the x bit: make everything +x from windows
perm |= 0111
return perm
}
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) {
// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
return
}

View File

@@ -0,0 +1,65 @@
// +build windows
package archive
import (
"os"
"testing"
)
func TestCanonicalTarNameForPath(t *testing.T) {
cases := []struct {
in, expected string
shouldFail bool
}{
{"foo", "foo", false},
{"foo/bar", "___", true}, // unix-styled windows path must fail
{`foo\bar`, "foo/bar", false},
}
for _, v := range cases {
if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail {
t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
} else if v.shouldFail && err == nil {
t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out)
} else if !v.shouldFail && out != v.expected {
t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
}
}
}
func TestCanonicalTarName(t *testing.T) {
cases := []struct {
in string
isDir bool
expected string
}{
{"foo", false, "foo"},
{"foo", true, "foo/"},
{`foo\bar`, false, "foo/bar"},
{`foo\bar`, true, "foo/bar/"},
}
for _, v := range cases {
if out, err := canonicalTarName(v.in, v.isDir); err != nil {
t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
} else if out != v.expected {
t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
}
}
}
func TestChmodTarEntry(t *testing.T) {
cases := []struct {
in, expected os.FileMode
}{
{0000, 0111},
{0777, 0755},
{0644, 0755},
{0755, 0755},
{0444, 0555},
}
for _, v := range cases {
if out := chmodTarEntry(v.in); out != v.expected {
t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out)
}
}
}

View File

@@ -0,0 +1,422 @@
package archive
import (
"archive/tar"
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strings"
"syscall"
"time"
"github.com/fsouza/go-dockerclient/vendor/github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/pools"
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/system"
)
type ChangeType int
const (
ChangeModify = iota
ChangeAdd
ChangeDelete
)
type Change struct {
Path string
Kind ChangeType
}
func (change *Change) String() string {
var kind string
switch change.Kind {
case ChangeModify:
kind = "C"
case ChangeAdd:
kind = "A"
case ChangeDelete:
kind = "D"
}
return fmt.Sprintf("%s %s", kind, change.Path)
}
// for sort.Sort
type changesByPath []Change
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
func (c changesByPath) Len() int { return len(c) }
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
// Gnu tar and the go tar writer don't have sub-second mtime
// precision, which is problematic when we apply changes via tar
// files, we handle this by comparing for exact times, *or* same
// second count and either a or b having exactly 0 nanoseconds
func sameFsTime(a, b time.Time) bool {
return a == b ||
(a.Unix() == b.Unix() &&
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
}
func sameFsTimeSpec(a, b syscall.Timespec) bool {
return a.Sec == b.Sec &&
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
}
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func Changes(layers []string, rw string) ([]Change, error) {
var changes []Change
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
path, err = filepath.Rel(rw, path)
if err != nil {
return err
}
path = filepath.Join("/", path)
// Skip root
if path == "/" {
return nil
}
// Skip AUFS metadata
if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched {
return err
}
change := Change{
Path: path,
}
// Find out what kind of modification happened
file := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(file, ".wh.") {
originalFile := file[len(".wh."):]
change.Path = filepath.Join(filepath.Dir(path), originalFile)
change.Kind = ChangeDelete
} else {
// Otherwise, the file was added
change.Kind = ChangeAdd
// ...Unless it already existed in a top layer, in which case, it's a modification
for _, layer := range layers {
stat, err := os.Stat(filepath.Join(layer, path))
if err != nil && !os.IsNotExist(err) {
return err
}
if err == nil {
// The file existed in the top layer, so that's a modification
// However, if it's a directory, maybe it wasn't actually modified.
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
if stat.IsDir() && f.IsDir() {
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
// Both directories are the same, don't record the change
return nil
}
}
change.Kind = ChangeModify
break
}
}
}
// Record change
changes = append(changes, change)
return nil
})
if err != nil && !os.IsNotExist(err) {
return nil, err
}
return changes, nil
}
type FileInfo struct {
parent *FileInfo
name string
stat *system.Stat_t
children map[string]*FileInfo
capability []byte
added bool
}
func (root *FileInfo) LookUp(path string) *FileInfo {
parent := root
if path == "/" {
return root
}
pathElements := strings.Split(path, "/")
for _, elem := range pathElements {
if elem != "" {
child := parent.children[elem]
if child == nil {
return nil
}
parent = child
}
}
return parent
}
func (info *FileInfo) path() string {
if info.parent == nil {
return "/"
}
return filepath.Join(info.parent.path(), info.name)
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
}
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
sizeAtEntry := len(*changes)
if oldInfo == nil {
// add
change := Change{
Path: info.path(),
Kind: ChangeAdd,
}
*changes = append(*changes, change)
info.added = true
}
// We make a copy so we can modify it to detect additions
// also, we only recurse on the old dir if the new info is a directory
// otherwise any previous delete/change is considered recursive
oldChildren := make(map[string]*FileInfo)
if oldInfo != nil && info.isDir() {
for k, v := range oldInfo.children {
oldChildren[k] = v
}
}
for name, newChild := range info.children {
oldChild, _ := oldChildren[name]
if oldChild != nil {
// change?
oldStat := oldChild.stat
newStat := newChild.stat
// Note: We can't compare inode or ctime or blocksize here, because these change
// when copying a file into a container. However, that is not generally a problem
// because any content change will change mtime, and any status change should
// be visible when actually comparing the stat fields. The only time this
// breaks down is if some code intentionally hides a change by setting
// back mtime
if oldStat.Mode() != newStat.Mode() ||
oldStat.Uid() != newStat.Uid() ||
oldStat.Gid() != newStat.Gid() ||
oldStat.Rdev() != newStat.Rdev() ||
// Don't look at size for dirs, its not a good measure of change
(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) ||
bytes.Compare(oldChild.capability, newChild.capability) != 0 {
change := Change{
Path: newChild.path(),
Kind: ChangeModify,
}
*changes = append(*changes, change)
newChild.added = true
}
// Remove from copy so we can detect deletions
delete(oldChildren, name)
}
newChild.addChanges(oldChild, changes)
}
for _, oldChild := range oldChildren {
// delete
change := Change{
Path: oldChild.path(),
Kind: ChangeDelete,
}
*changes = append(*changes, change)
}
// If there were changes inside this directory, we need to add it, even if the directory
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != "/" {
change := Change{
Path: info.path(),
Kind: ChangeModify,
}
// Let's insert the directory entry before the recently added entries located inside this dir
*changes = append(*changes, change) // just to resize the slice, will be overwritten
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
(*changes)[sizeAtEntry] = change
}
}
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
var changes []Change
info.addChanges(oldInfo, &changes)
return changes
}
func newRootFileInfo() *FileInfo {
root := &FileInfo{
name: "/",
children: make(map[string]*FileInfo),
}
return root
}
func collectFileInfo(sourceDir string) (*FileInfo, error) {
root := newRootFileInfo()
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
relPath, err := filepath.Rel(sourceDir, path)
if err != nil {
return err
}
relPath = filepath.Join("/", relPath)
if relPath == "/" {
return nil
}
parent := root.LookUp(filepath.Dir(relPath))
if parent == nil {
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
}
info := &FileInfo{
name: filepath.Base(relPath),
children: make(map[string]*FileInfo),
parent: parent,
}
s, err := system.Lstat(path)
if err != nil {
return err
}
info.stat = s
info.capability, _ = system.Lgetxattr(path, "security.capability")
parent.children[info.name] = info
return nil
})
if err != nil {
return nil, err
}
return root, nil
}
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
// If oldDir is "", then all files in newDir will be Add-Changes.
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
var (
oldRoot, newRoot *FileInfo
err1, err2 error
errs = make(chan error, 2)
)
go func() {
if oldDir != "" {
oldRoot, err1 = collectFileInfo(oldDir)
}
errs <- err1
}()
go func() {
newRoot, err2 = collectFileInfo(newDir)
errs <- err2
}()
// block until both routines have returned
for i := 0; i < 2; i++ {
if err := <-errs; err != nil {
return nil, err
}
}
return newRoot.Changes(oldRoot), nil
}
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
func ChangesSize(newDir string, changes []Change) int64 {
var size int64
for _, change := range changes {
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
file := filepath.Join(newDir, change.Path)
fileInfo, _ := os.Lstat(file)
if fileInfo != nil && !fileInfo.IsDir() {
size += fileInfo.Size()
}
}
}
return size
}
// ExportChanges produces an Archive from the provided changes, relative to dir.
func ExportChanges(dir string, changes []Change) (Archive, error) {
reader, writer := io.Pipe()
go func() {
ta := &tarAppender{
TarWriter: tar.NewWriter(writer),
Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string),
}
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
sort.Sort(changesByPath(changes))
// In general we log errors here but ignore them because
// during e.g. a diff operation the container can continue
// mutating the filesystem and we can see transient errors
// from this
for _, change := range changes {
if change.Kind == ChangeDelete {
whiteOutDir := filepath.Dir(change.Path)
whiteOutBase := filepath.Base(change.Path)
whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase)
timestamp := time.Now()
hdr := &tar.Header{
Name: whiteOut[1:],
Size: 0,
ModTime: timestamp,
AccessTime: timestamp,
ChangeTime: timestamp,
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
logrus.Debugf("Can't write whiteout header: %s", err)
}
} else {
path := filepath.Join(dir, change.Path)
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
logrus.Debugf("Can't add file %s to tar: %s", path, err)
}
}
}
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Debugf("Can't close layer: %s", err)
}
if err := writer.Close(); err != nil {
logrus.Debugf("failed close Changes writer: %s", err)
}
}()
return reader, nil
}

View File

@@ -0,0 +1,127 @@
package archive
import (
"archive/tar"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"sort"
"testing"
)
func TestHardLinkOrder(t *testing.T) {
names := []string{"file1.txt", "file2.txt", "file3.txt"}
msg := []byte("Hey y'all")
// Create dir
src, err := ioutil.TempDir("", "docker-hardlink-test-src-")
if err != nil {
t.Fatal(err)
}
//defer os.RemoveAll(src)
for _, name := range names {
func() {
fh, err := os.Create(path.Join(src, name))
if err != nil {
t.Fatal(err)
}
defer fh.Close()
if _, err = fh.Write(msg); err != nil {
t.Fatal(err)
}
}()
}
// Create dest, with changes that includes hardlinks
dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-")
if err != nil {
t.Fatal(err)
}
os.RemoveAll(dest) // we just want the name, at first
if err := copyDir(src, dest); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dest)
for _, name := range names {
for i := 0; i < 5; i++ {
if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil {
t.Fatal(err)
}
}
}
// get changes
changes, err := ChangesDirs(dest, src)
if err != nil {
t.Fatal(err)
}
// sort
sort.Sort(changesByPath(changes))
// ExportChanges
ar, err := ExportChanges(dest, changes)
if err != nil {
t.Fatal(err)
}
hdrs, err := walkHeaders(ar)
if err != nil {
t.Fatal(err)
}
// reverse sort
sort.Sort(sort.Reverse(changesByPath(changes)))
// ExportChanges
arRev, err := ExportChanges(dest, changes)
if err != nil {
t.Fatal(err)
}
hdrsRev, err := walkHeaders(arRev)
if err != nil {
t.Fatal(err)
}
// line up the two sets
sort.Sort(tarHeaders(hdrs))
sort.Sort(tarHeaders(hdrsRev))
// compare Size and LinkName
for i := range hdrs {
if hdrs[i].Name != hdrsRev[i].Name {
t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name)
}
if hdrs[i].Size != hdrsRev[i].Size {
t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size)
}
if hdrs[i].Typeflag != hdrsRev[i].Typeflag {
t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag)
}
if hdrs[i].Linkname != hdrsRev[i].Linkname {
t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname)
}
}
}
type tarHeaders []tar.Header
func (th tarHeaders) Len() int { return len(th) }
func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] }
func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name }
func walkHeaders(r io.Reader) ([]tar.Header, error) {
t := tar.NewReader(r)
headers := []tar.Header{}
for {
hdr, err := t.Next()
if err != nil {
if err == io.EOF {
break
}
return headers, err
}
headers = append(headers, *hdr)
}
return headers, nil
}

View File

@@ -0,0 +1,445 @@
package archive
import (
"io/ioutil"
"os"
"os/exec"
"path"
"sort"
"syscall"
"testing"
"time"
)
func max(x, y int) int {
if x >= y {
return x
}
return y
}
func copyDir(src, dst string) error {
cmd := exec.Command("cp", "-a", src, dst)
if err := cmd.Run(); err != nil {
return err
}
return nil
}
type FileType uint32
const (
Regular FileType = iota
Dir
Symlink
)
type FileData struct {
filetype FileType
path string
contents string
permissions os.FileMode
}
func createSampleDir(t *testing.T, root string) {
files := []FileData{
{Regular, "file1", "file1\n", 0600},
{Regular, "file2", "file2\n", 0666},
{Regular, "file3", "file3\n", 0404},
{Regular, "file4", "file4\n", 0600},
{Regular, "file5", "file5\n", 0600},
{Regular, "file6", "file6\n", 0600},
{Regular, "file7", "file7\n", 0600},
{Dir, "dir1", "", 0740},
{Regular, "dir1/file1-1", "file1-1\n", 01444},
{Regular, "dir1/file1-2", "file1-2\n", 0666},
{Dir, "dir2", "", 0700},
{Regular, "dir2/file2-1", "file2-1\n", 0666},
{Regular, "dir2/file2-2", "file2-2\n", 0666},
{Dir, "dir3", "", 0700},
{Regular, "dir3/file3-1", "file3-1\n", 0666},
{Regular, "dir3/file3-2", "file3-2\n", 0666},
{Dir, "dir4", "", 0700},
{Regular, "dir4/file3-1", "file4-1\n", 0666},
{Regular, "dir4/file3-2", "file4-2\n", 0666},
{Symlink, "symlink1", "target1", 0666},
{Symlink, "symlink2", "target2", 0666},
}
now := time.Now()
for _, info := range files {
p := path.Join(root, info.path)
if info.filetype == Dir {
if err := os.MkdirAll(p, info.permissions); err != nil {
t.Fatal(err)
}
} else if info.filetype == Regular {
if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil {
t.Fatal(err)
}
} else if info.filetype == Symlink {
if err := os.Symlink(info.contents, p); err != nil {
t.Fatal(err)
}
}
if info.filetype != Symlink {
// Set a consistent ctime, atime for all files and dirs
if err := os.Chtimes(p, now, now); err != nil {
t.Fatal(err)
}
}
}
}
func TestChangeString(t *testing.T) {
modifiyChange := Change{"change", ChangeModify}
toString := modifiyChange.String()
if toString != "C change" {
t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString)
}
addChange := Change{"change", ChangeAdd}
toString = addChange.String()
if toString != "A change" {
t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString)
}
deleteChange := Change{"change", ChangeDelete}
toString = deleteChange.String()
if toString != "D change" {
t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString)
}
}
func TestChangesWithNoChanges(t *testing.T) {
rwLayer, err := ioutil.TempDir("", "docker-changes-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rwLayer)
layer, err := ioutil.TempDir("", "docker-changes-test-layer")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(layer)
createSampleDir(t, layer)
changes, err := Changes([]string{layer}, rwLayer)
if err != nil {
t.Fatal(err)
}
if len(changes) != 0 {
t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes))
}
}
func TestChangesWithChanges(t *testing.T) {
rwLayer, err := ioutil.TempDir("", "docker-changes-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rwLayer)
// Create a folder
dir1 := path.Join(rwLayer, "dir1")
os.MkdirAll(dir1, 0740)
deletedFile := path.Join(dir1, ".wh.file1-2")
ioutil.WriteFile(deletedFile, []byte{}, 0600)
modifiedFile := path.Join(dir1, "file1-1")
ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444)
// Let's add a subfolder for a newFile
subfolder := path.Join(dir1, "subfolder")
os.MkdirAll(subfolder, 0740)
newFile := path.Join(subfolder, "newFile")
ioutil.WriteFile(newFile, []byte{}, 0740)
// Let's create folders that with have the role of layers with the same data
layer, err := ioutil.TempDir("", "docker-changes-test-layer")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(layer)
createSampleDir(t, layer)
os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740)
// Let's modify modtime for dir1 to be sure it's the same for the two layer (to not having false positive)
fi, err := os.Stat(dir1)
if err != nil {
return
}
mtime := fi.ModTime()
stat := fi.Sys().(*syscall.Stat_t)
atime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
layerDir1 := path.Join(layer, "dir1")
os.Chtimes(layerDir1, atime, mtime)
changes, err := Changes([]string{layer}, rwLayer)
if err != nil {
t.Fatal(err)
}
sort.Sort(changesByPath(changes))
expectedChanges := []Change{
{"/dir1/file1-1", ChangeModify},
{"/dir1/file1-2", ChangeDelete},
{"/dir1/subfolder", ChangeModify},
{"/dir1/subfolder/newFile", ChangeAdd},
}
for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
if i >= len(expectedChanges) {
t.Fatalf("unexpected change %s\n", changes[i].String())
}
if i >= len(changes) {
t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
}
if changes[i].Path == expectedChanges[i].Path {
if changes[i] != expectedChanges[i] {
t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
}
} else if changes[i].Path < expectedChanges[i].Path {
t.Fatalf("unexpected change %s\n", changes[i].String())
} else {
t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
}
}
}
// Create an directory, copy it, make sure we report no changes between the two
func TestChangesDirsEmpty(t *testing.T) {
src, err := ioutil.TempDir("", "docker-changes-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(src)
createSampleDir(t, src)
dst := src + "-copy"
if err := copyDir(src, dst); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dst)
changes, err := ChangesDirs(dst, src)
if err != nil {
t.Fatal(err)
}
if len(changes) != 0 {
t.Fatalf("Reported changes for identical dirs: %v", changes)
}
os.RemoveAll(src)
os.RemoveAll(dst)
}
func mutateSampleDir(t *testing.T, root string) {
// Remove a regular file
if err := os.RemoveAll(path.Join(root, "file1")); err != nil {
t.Fatal(err)
}
// Remove a directory
if err := os.RemoveAll(path.Join(root, "dir1")); err != nil {
t.Fatal(err)
}
// Remove a symlink
if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil {
t.Fatal(err)
}
// Rewrite a file
if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil {
t.Fatal(err)
}
// Replace a file
if err := os.RemoveAll(path.Join(root, "file3")); err != nil {
t.Fatal(err)
}
if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil {
t.Fatal(err)
}
// Touch file
if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
t.Fatal(err)
}
// Replace file with dir
if err := os.RemoveAll(path.Join(root, "file5")); err != nil {
t.Fatal(err)
}
if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil {
t.Fatal(err)
}
// Create new file
if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil {
t.Fatal(err)
}
// Create new dir
if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil {
t.Fatal(err)
}
// Create a new symlink
if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil {
t.Fatal(err)
}
// Change a symlink
if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil {
t.Fatal(err)
}
if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil {
t.Fatal(err)
}
// Replace dir with file
if err := os.RemoveAll(path.Join(root, "dir2")); err != nil {
t.Fatal(err)
}
if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil {
t.Fatal(err)
}
// Touch dir
if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
t.Fatal(err)
}
}
func TestChangesDirsMutated(t *testing.T) {
src, err := ioutil.TempDir("", "docker-changes-test")
if err != nil {
t.Fatal(err)
}
createSampleDir(t, src)
dst := src + "-copy"
if err := copyDir(src, dst); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(src)
defer os.RemoveAll(dst)
mutateSampleDir(t, dst)
changes, err := ChangesDirs(dst, src)
if err != nil {
t.Fatal(err)
}
sort.Sort(changesByPath(changes))
expectedChanges := []Change{
{"/dir1", ChangeDelete},
{"/dir2", ChangeModify},
{"/dirnew", ChangeAdd},
{"/file1", ChangeDelete},
{"/file2", ChangeModify},
{"/file3", ChangeModify},
{"/file4", ChangeModify},
{"/file5", ChangeModify},
{"/filenew", ChangeAdd},
{"/symlink1", ChangeDelete},
{"/symlink2", ChangeModify},
{"/symlinknew", ChangeAdd},
}
for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
if i >= len(expectedChanges) {
t.Fatalf("unexpected change %s\n", changes[i].String())
}
if i >= len(changes) {
t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
}
if changes[i].Path == expectedChanges[i].Path {
if changes[i] != expectedChanges[i] {
t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
}
} else if changes[i].Path < expectedChanges[i].Path {
t.Fatalf("unexpected change %s\n", changes[i].String())
} else {
t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
}
}
}
func TestApplyLayer(t *testing.T) {
src, err := ioutil.TempDir("", "docker-changes-test")
if err != nil {
t.Fatal(err)
}
createSampleDir(t, src)
defer os.RemoveAll(src)
dst := src + "-copy"
if err := copyDir(src, dst); err != nil {
t.Fatal(err)
}
mutateSampleDir(t, dst)
defer os.RemoveAll(dst)
changes, err := ChangesDirs(dst, src)
if err != nil {
t.Fatal(err)
}
layer, err := ExportChanges(dst, changes)
if err != nil {
t.Fatal(err)
}
layerCopy, err := NewTempArchive(layer, "")
if err != nil {
t.Fatal(err)
}
if _, err := ApplyLayer(src, layerCopy); err != nil {
t.Fatal(err)
}
changes2, err := ChangesDirs(src, dst)
if err != nil {
t.Fatal(err)
}
if len(changes2) != 0 {
t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2)
}
}
func TestChangesSizeWithNoChanges(t *testing.T) {
size := ChangesSize("/tmp", nil)
if size != 0 {
t.Fatalf("ChangesSizes with no changes should be 0, was %d", size)
}
}
func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) {
changes := []Change{
{Path: "deletedPath", Kind: ChangeDelete},
}
size := ChangesSize("/tmp", changes)
if size != 0 {
t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size)
}
}
func TestChangesSize(t *testing.T) {
parentPath, err := ioutil.TempDir("", "docker-changes-test")
defer os.RemoveAll(parentPath)
addition := path.Join(parentPath, "addition")
if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil {
t.Fatal(err)
}
modification := path.Join(parentPath, "modification")
if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil {
t.Fatal(err)
}
changes := []Change{
{Path: "addition", Kind: ChangeAdd},
{Path: "modification", Kind: ChangeModify},
}
size := ChangesSize(parentPath, changes)
if size != 6 {
t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size)
}
}

View File

@@ -0,0 +1,168 @@
package archive
import (
"archive/tar"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/pools"
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/system"
)
func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
tr := tar.NewReader(layer)
trBuf := pools.BufioReader32KPool.Get(tr)
defer pools.BufioReader32KPool.Put(trBuf)
var dirs []*tar.Header
aufsTempdir := ""
aufsHardlinks := make(map[string]*tar.Header)
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return 0, err
}
size += hdr.Size
// Normalize name, for safety and for a simple is-root check
hdr.Name = filepath.Clean(hdr.Name)
if !strings.HasSuffix(hdr.Name, "/") {
// Not the root directory, ensure that the parent directory exists.
// This happened in some tests where an image had a tarfile without any
// parent directories.
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = os.MkdirAll(parentPath, 0600)
if err != nil {
return 0, err
}
}
}
// Skip AUFS metadata dirs
if strings.HasPrefix(hdr.Name, ".wh..wh.") {
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
// We don't want this directory, but we need the files in them so that
// such hardlinks can be resolved.
if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg {
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil {
return 0, err
}
}
continue
}
path := filepath.Join(dest, hdr.Name)
rel, err := filepath.Rel(dest, path)
if err != nil {
return 0, err
}
if strings.HasPrefix(rel, "../") {
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
}
base := filepath.Base(path)
if strings.HasPrefix(base, ".wh.") {
originalBase := base[len(".wh."):]
originalPath := filepath.Join(filepath.Dir(path), originalBase)
if err := os.RemoveAll(originalPath); err != nil {
return 0, err
}
} else {
// If path exits we almost always just want to remove and replace it.
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return 0, err
}
}
}
trBuf.Reset(tr)
srcData := io.Reader(trBuf)
srcHdr := hdr
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
// we manually retarget these into the temporary files we extracted them into
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
return 0, fmt.Errorf("Invalid aufs hardlink")
}
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
if err != nil {
return 0, err
}
defer tmpFile.Close()
srcData = tmpFile
}
if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil {
return 0, err
}
// Directory mtimes must be handled at the end to avoid further
// file creation in them to modify the directory mtime
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
}
}
for _, hdr := range dirs {
path := filepath.Join(dest, hdr.Name)
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
if err := syscall.UtimesNano(path, ts); err != nil {
return 0, err
}
}
return size, nil
}
// ApplyLayer parses a diff in the standard layer format from `layer`, and
// applies it to the directory `dest`. Returns the size in bytes of the
// contents of the layer.
func ApplyLayer(dest string, layer ArchiveReader) (int64, error) {
dest = filepath.Clean(dest)
// We need to be able to set any perms
oldmask, err := system.Umask(0)
if err != nil {
return 0, err
}
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
layer, err = DecompressStream(layer)
if err != nil {
return 0, err
}
return UnpackLayer(dest, layer)
}

View File

@@ -0,0 +1,190 @@
package archive
import (
"archive/tar"
"testing"
)
func TestApplyLayerInvalidFilenames(t *testing.T) {
for i, headers := range [][]*tar.Header{
{
{
Name: "../victim/dotdot",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
{
{
// Note the leading slash
Name: "/../victim/slash-dotdot",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
} {
if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil {
t.Fatalf("i=%d. %v", i, err)
}
}
}
func TestApplyLayerInvalidHardlink(t *testing.T) {
for i, headers := range [][]*tar.Header{
{ // try reading victim/hello (../)
{
Name: "dotdot",
Typeflag: tar.TypeLink,
Linkname: "../victim/hello",
Mode: 0644,
},
},
{ // try reading victim/hello (/../)
{
Name: "slash-dotdot",
Typeflag: tar.TypeLink,
// Note the leading slash
Linkname: "/../victim/hello",
Mode: 0644,
},
},
{ // try writing victim/file
{
Name: "loophole-victim",
Typeflag: tar.TypeLink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "loophole-victim/file",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
{ // try reading victim/hello (hardlink, symlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeLink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: "loophole-victim/hello",
Mode: 0644,
},
},
{ // Try reading victim/hello (hardlink, hardlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeLink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "hardlink",
Typeflag: tar.TypeLink,
Linkname: "loophole-victim/hello",
Mode: 0644,
},
},
{ // Try removing victim directory (hardlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeLink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "loophole-victim",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
} {
if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil {
t.Fatalf("i=%d. %v", i, err)
}
}
}
func TestApplyLayerInvalidSymlink(t *testing.T) {
for i, headers := range [][]*tar.Header{
{ // try reading victim/hello (../)
{
Name: "dotdot",
Typeflag: tar.TypeSymlink,
Linkname: "../victim/hello",
Mode: 0644,
},
},
{ // try reading victim/hello (/../)
{
Name: "slash-dotdot",
Typeflag: tar.TypeSymlink,
// Note the leading slash
Linkname: "/../victim/hello",
Mode: 0644,
},
},
{ // try writing victim/file
{
Name: "loophole-victim",
Typeflag: tar.TypeSymlink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "loophole-victim/file",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
{ // try reading victim/hello (symlink, symlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeSymlink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: "loophole-victim/hello",
Mode: 0644,
},
},
{ // try reading victim/hello (symlink, hardlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeSymlink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "hardlink",
Typeflag: tar.TypeLink,
Linkname: "loophole-victim/hello",
Mode: 0644,
},
},
{ // try removing victim directory (symlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeSymlink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "loophole-victim",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
} {
if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil {
t.Fatalf("i=%d. %v", i, err)
}
}
}

View File

@@ -0,0 +1,97 @@
// +build ignore
// Simple tool to create an archive stream from an old and new directory
//
// By default it will stream the comparison of two temporary directories with junk files
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"github.com/fsouza/go-dockerclient/vendor/github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/archive"
)
var (
flDebug = flag.Bool("D", false, "debugging output")
flNewDir = flag.String("newdir", "", "")
flOldDir = flag.String("olddir", "", "")
log = logrus.New()
)
func main() {
flag.Usage = func() {
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
flag.PrintDefaults()
}
flag.Parse()
log.Out = os.Stderr
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
logrus.SetLevel(logrus.DebugLevel)
}
var newDir, oldDir string
if len(*flNewDir) == 0 {
var err error
newDir, err = ioutil.TempDir("", "docker-test-newDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(newDir)
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
log.Fatal(err)
}
} else {
newDir = *flNewDir
}
if len(*flOldDir) == 0 {
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(oldDir)
} else {
oldDir = *flOldDir
}
changes, err := archive.ChangesDirs(newDir, oldDir)
if err != nil {
log.Fatal(err)
}
a, err := archive.ExportChanges(newDir, changes)
if err != nil {
log.Fatal(err)
}
defer a.Close()
i, err := io.Copy(os.Stdout, a)
if err != nil && err != io.EOF {
log.Fatal(err)
}
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
}
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
fileData := []byte("fooo")
for n := 0; n < numberOfFiles; n++ {
fileName := fmt.Sprintf("file-%d", n)
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
return 0, err
}
if makeLinks {
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
return 0, err
}
}
}
totalSize := numberOfFiles * len(fileData)
return totalSize, nil
}

View File

@@ -0,0 +1,16 @@
package archive
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
if time.IsZero() {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = ((1 << 30) - 2)
return
}
return syscall.NsecToTimespec(time.UnixNano())
}

View File

@@ -0,0 +1,16 @@
// +build !linux
package archive
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
nsec := int64(0)
if !time.IsZero() {
nsec = time.UnixNano()
}
return syscall.NsecToTimespec(nsec)
}

View File

@@ -0,0 +1,166 @@
package archive
import (
"archive/tar"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
)
var testUntarFns = map[string]func(string, io.Reader) error{
"untar": func(dest string, r io.Reader) error {
return Untar(r, dest, nil)
},
"applylayer": func(dest string, r io.Reader) error {
_, err := ApplyLayer(dest, ArchiveReader(r))
return err
},
}
// testBreakout is a helper function that, within the provided `tmpdir` directory,
// creates a `victim` folder with a generated `hello` file in it.
// `untar` extracts to a directory named `dest`, the tar file created from `headers`.
//
// Here are the tested scenarios:
// - removed `victim` folder (write)
// - removed files from `victim` folder (write)
// - new files in `victim` folder (write)
// - modified files in `victim` folder (write)
// - file in `dest` with same content as `victim/hello` (read)
//
// When using testBreakout make sure you cover one of the scenarios listed above.
func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error {
tmpdir, err := ioutil.TempDir("", tmpdir)
if err != nil {
return err
}
defer os.RemoveAll(tmpdir)
dest := filepath.Join(tmpdir, "dest")
if err := os.Mkdir(dest, 0755); err != nil {
return err
}
victim := filepath.Join(tmpdir, "victim")
if err := os.Mkdir(victim, 0755); err != nil {
return err
}
hello := filepath.Join(victim, "hello")
helloData, err := time.Now().MarshalText()
if err != nil {
return err
}
if err := ioutil.WriteFile(hello, helloData, 0644); err != nil {
return err
}
helloStat, err := os.Stat(hello)
if err != nil {
return err
}
reader, writer := io.Pipe()
go func() {
t := tar.NewWriter(writer)
for _, hdr := range headers {
t.WriteHeader(hdr)
}
t.Close()
}()
untar := testUntarFns[untarFn]
if untar == nil {
return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn)
}
if err := untar(dest, reader); err != nil {
if _, ok := err.(breakoutError); !ok {
// If untar returns an error unrelated to an archive breakout,
// then consider this an unexpected error and abort.
return err
}
// Here, untar detected the breakout.
// Let's move on verifying that indeed there was no breakout.
fmt.Printf("breakoutError: %v\n", err)
}
// Check victim folder
f, err := os.Open(victim)
if err != nil {
// codepath taken if victim folder was removed
return fmt.Errorf("archive breakout: error reading %q: %v", victim, err)
}
defer f.Close()
// Check contents of victim folder
//
// We are only interested in getting 2 files from the victim folder, because if all is well
// we expect only one result, the `hello` file. If there is a second result, it cannot
// hold the same name `hello` and we assume that a new file got created in the victim folder.
// That is enough to detect an archive breakout.
names, err := f.Readdirnames(2)
if err != nil {
// codepath taken if victim is not a folder
return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err)
}
for _, name := range names {
if name != "hello" {
// codepath taken if new file was created in victim folder
return fmt.Errorf("archive breakout: new file %q", name)
}
}
// Check victim/hello
f, err = os.Open(hello)
if err != nil {
// codepath taken if read permissions were removed
return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err)
}
defer f.Close()
b, err := ioutil.ReadAll(f)
if err != nil {
return err
}
fi, err := f.Stat()
if err != nil {
return err
}
if helloStat.IsDir() != fi.IsDir() ||
// TODO: cannot check for fi.ModTime() change
helloStat.Mode() != fi.Mode() ||
helloStat.Size() != fi.Size() ||
!bytes.Equal(helloData, b) {
// codepath taken if hello has been modified
return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v.", hello, helloData, b, helloStat, fi)
}
// Check that nothing in dest/ has the same content as victim/hello.
// Since victim/hello was generated with time.Now(), it is safe to assume
// that any file whose content matches exactly victim/hello, managed somehow
// to access victim/hello.
return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
if err != nil {
// skip directory if error
return filepath.SkipDir
}
// enter directory
return nil
}
if err != nil {
// skip file if error
return nil
}
b, err := ioutil.ReadFile(path)
if err != nil {
// Houston, we have a problem. Aborting (space)walk.
return err
}
if bytes.Equal(helloData, b) {
return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path)
}
return nil
})
}

View File

@@ -0,0 +1,59 @@
package archive
import (
"archive/tar"
"bytes"
"io/ioutil"
)
// Generate generates a new archive from the content provided
// as input.
//
// `files` is a sequence of path/content pairs. A new file is
// added to the archive for each pair.
// If the last pair is incomplete, the file is created with an
// empty content. For example:
//
// Generate("foo.txt", "hello world", "emptyfile")
//
// The above call will return an archive with 2 files:
// * ./foo.txt with content "hello world"
// * ./empty with empty content
//
// FIXME: stream content instead of buffering
// FIXME: specify permissions and other archive metadata
func Generate(input ...string) (Archive, error) {
files := parseStringPairs(input...)
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, file := range files {
name, content := file[0], file[1]
hdr := &tar.Header{
Name: name,
Size: int64(len(content)),
}
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
if _, err := tw.Write([]byte(content)); err != nil {
return nil, err
}
}
if err := tw.Close(); err != nil {
return nil, err
}
return ioutil.NopCloser(buf), nil
}
func parseStringPairs(input ...string) (output [][2]string) {
output = make([][2]string, 0, len(input)/2+1)
for i := 0; i < len(input); i += 2 {
var pair [2]string
pair[0] = input[i]
if i+1 < len(input) {
pair[1] = input[i+1]
}
output = append(output, pair)
}
return
}

View File

@@ -0,0 +1,98 @@
package archive
import (
"archive/tar"
"bytes"
"io"
"testing"
)
func TestGenerateEmptyFile(t *testing.T) {
archive, err := Generate("emptyFile")
if err != nil {
t.Fatal(err)
}
if archive == nil {
t.Fatal("The generated archive should not be nil.")
}
expectedFiles := [][]string{
{"emptyFile", ""},
}
tr := tar.NewReader(archive)
actualFiles := make([][]string, 0, 10)
i := 0
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
buf := new(bytes.Buffer)
buf.ReadFrom(tr)
content := buf.String()
actualFiles = append(actualFiles, []string{hdr.Name, content})
i++
}
if len(actualFiles) != len(expectedFiles) {
t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles))
}
for i := 0; i < len(expectedFiles); i++ {
actual := actualFiles[i]
expected := expectedFiles[i]
if actual[0] != expected[0] {
t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0])
}
if actual[1] != expected[1] {
t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1])
}
}
}
func TestGenerateWithContent(t *testing.T) {
archive, err := Generate("file", "content")
if err != nil {
t.Fatal(err)
}
if archive == nil {
t.Fatal("The generated archive should not be nil.")
}
expectedFiles := [][]string{
{"file", "content"},
}
tr := tar.NewReader(archive)
actualFiles := make([][]string, 0, 10)
i := 0
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
buf := new(bytes.Buffer)
buf.ReadFrom(tr)
content := buf.String()
actualFiles = append(actualFiles, []string{hdr.Name, content})
i++
}
if len(actualFiles) != len(expectedFiles) {
t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles))
}
for i := 0; i < len(expectedFiles); i++ {
actual := actualFiles[i]
expected := expectedFiles[i]
if actual[0] != expected[0] {
t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0])
}
if actual[1] != expected[1] {
t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1])
}
}
}

View File

@@ -0,0 +1,170 @@
package fileutils
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/fsouza/go-dockerclient/vendor/github.com/Sirupsen/logrus"
)
func Exclusion(pattern string) bool {
return pattern[0] == '!'
}
func Empty(pattern string) bool {
return pattern == ""
}
// Cleanpatterns takes a slice of patterns returns a new
// slice of patterns cleaned with filepath.Clean, stripped
// of any empty patterns and lets the caller know whether the
// slice contains any exception patterns (prefixed with !).
func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
// Loop over exclusion patterns and:
// 1. Clean them up.
// 2. Indicate whether we are dealing with any exception rules.
// 3. Error if we see a single exclusion marker on it's own (!).
cleanedPatterns := []string{}
patternDirs := [][]string{}
exceptions := false
for _, pattern := range patterns {
// Eliminate leading and trailing whitespace.
pattern = strings.TrimSpace(pattern)
if Empty(pattern) {
continue
}
if Exclusion(pattern) {
if len(pattern) == 1 {
logrus.Errorf("Illegal exclusion pattern: %s", pattern)
return nil, nil, false, errors.New("Illegal exclusion pattern: !")
}
exceptions = true
}
pattern = filepath.Clean(pattern)
cleanedPatterns = append(cleanedPatterns, pattern)
if Exclusion(pattern) {
pattern = pattern[1:]
}
patternDirs = append(patternDirs, strings.Split(pattern, "/"))
}
return cleanedPatterns, patternDirs, exceptions, nil
}
// Matches returns true if file matches any of the patterns
// and isn't excluded by any of the subsequent patterns.
func Matches(file string, patterns []string) (bool, error) {
file = filepath.Clean(file)
if file == "." {
// Don't let them exclude everything, kind of silly.
return false, nil
}
patterns, patDirs, _, err := CleanPatterns(patterns)
if err != nil {
return false, err
}
return OptimizedMatches(file, patterns, patDirs)
}
// Matches is basically the same as fileutils.Matches() but optimized for archive.go.
// It will assume that the inputs have been preprocessed and therefore the function
// doen't need to do as much error checking and clean-up. This was done to avoid
// repeating these steps on each file being checked during the archive process.
// The more generic fileutils.Matches() can't make these assumptions.
func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
matched := false
parentPath := filepath.Dir(file)
parentPathDirs := strings.Split(parentPath, "/")
for i, pattern := range patterns {
negative := false
if Exclusion(pattern) {
negative = true
pattern = pattern[1:]
}
match, err := filepath.Match(pattern, file)
if err != nil {
logrus.Errorf("Error matching: %s (pattern: %s)", file, pattern)
return false, err
}
if !match && parentPath != "." {
// Check to see if the pattern matches one of our parent dirs.
if len(patDirs[i]) <= len(parentPathDirs) {
match, _ = filepath.Match(strings.Join(patDirs[i], "/"),
strings.Join(parentPathDirs[:len(patDirs[i])], "/"))
}
}
if match {
matched = !negative
}
}
if matched {
logrus.Debugf("Skipping excluded path: %s", file)
}
return matched, nil
}
func CopyFile(src, dst string) (int64, error) {
cleanSrc := filepath.Clean(src)
cleanDst := filepath.Clean(dst)
if cleanSrc == cleanDst {
return 0, nil
}
sf, err := os.Open(cleanSrc)
if err != nil {
return 0, err
}
defer sf.Close()
if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
return 0, err
}
df, err := os.Create(cleanDst)
if err != nil {
return 0, err
}
defer df.Close()
return io.Copy(df, sf)
}
func GetTotalUsedFds() int {
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
} else {
return len(fds)
}
return -1
}
// ReadSymlinkedDirectory returns the target directory of a symlink.
// The target of the symbolic link may not be a file.
func ReadSymlinkedDirectory(path string) (string, error) {
var realPath string
var err error
if realPath, err = filepath.Abs(path); err != nil {
return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
}
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
}
realPathInfo, err := os.Stat(realPath)
if err != nil {
return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
}
if !realPathInfo.Mode().IsDir() {
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
}
return realPath, nil
}

View File

@@ -0,0 +1,357 @@
package fileutils
import (
"io/ioutil"
"os"
"path"
"testing"
)
// CopyFile with invalid src
func TestCopyFileWithInvalidSrc(t *testing.T) {
tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
defer os.RemoveAll(tempFolder)
if err != nil {
t.Fatal(err)
}
bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest"))
if err == nil {
t.Fatal("Should have fail to copy an invalid src file")
}
if bytes != 0 {
t.Fatal("Should have written 0 bytes")
}
}
// CopyFile with invalid dest
func TestCopyFileWithInvalidDest(t *testing.T) {
tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
defer os.RemoveAll(tempFolder)
if err != nil {
t.Fatal(err)
}
src := path.Join(tempFolder, "file")
err = ioutil.WriteFile(src, []byte("content"), 0740)
if err != nil {
t.Fatal(err)
}
bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path"))
if err == nil {
t.Fatal("Should have fail to copy an invalid src file")
}
if bytes != 0 {
t.Fatal("Should have written 0 bytes")
}
}
// CopyFile with same src and dest
func TestCopyFileWithSameSrcAndDest(t *testing.T) {
tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
defer os.RemoveAll(tempFolder)
if err != nil {
t.Fatal(err)
}
file := path.Join(tempFolder, "file")
err = ioutil.WriteFile(file, []byte("content"), 0740)
if err != nil {
t.Fatal(err)
}
bytes, err := CopyFile(file, file)
if err != nil {
t.Fatal(err)
}
if bytes != 0 {
t.Fatal("Should have written 0 bytes as it is the same file.")
}
}
// CopyFile with same src and dest but path is different and not clean
func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) {
tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
defer os.RemoveAll(tempFolder)
if err != nil {
t.Fatal(err)
}
testFolder := path.Join(tempFolder, "test")
err = os.MkdirAll(testFolder, 0740)
if err != nil {
t.Fatal(err)
}
file := path.Join(testFolder, "file")
sameFile := testFolder + "/../test/file"
err = ioutil.WriteFile(file, []byte("content"), 0740)
if err != nil {
t.Fatal(err)
}
bytes, err := CopyFile(file, sameFile)
if err != nil {
t.Fatal(err)
}
if bytes != 0 {
t.Fatal("Should have written 0 bytes as it is the same file.")
}
}
func TestCopyFile(t *testing.T) {
tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
defer os.RemoveAll(tempFolder)
if err != nil {
t.Fatal(err)
}
src := path.Join(tempFolder, "src")
dest := path.Join(tempFolder, "dest")
ioutil.WriteFile(src, []byte("content"), 0777)
ioutil.WriteFile(dest, []byte("destContent"), 0777)
bytes, err := CopyFile(src, dest)
if err != nil {
t.Fatal(err)
}
if bytes != 7 {
t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes)
}
actual, err := ioutil.ReadFile(dest)
if err != nil {
t.Fatal(err)
}
if string(actual) != "content" {
t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content")
}
}
// Reading a symlink to a directory must return the directory
func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) {
var err error
if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil {
t.Errorf("failed to create directory: %s", err)
}
if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil {
t.Errorf("failed to create symlink: %s", err)
}
var path string
if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil {
t.Fatalf("failed to read symlink to directory: %s", err)
}
if path != "/tmp/testReadSymlinkToExistingDirectory" {
t.Fatalf("symlink returned unexpected directory: %s", path)
}
if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil {
t.Errorf("failed to remove temporary directory: %s", err)
}
if err = os.Remove("/tmp/dirLinkTest"); err != nil {
t.Errorf("failed to remove symlink: %s", err)
}
}
// Reading a non-existing symlink must fail
func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) {
var path string
var err error
if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil {
t.Fatalf("error expected for non-existing symlink")
}
if path != "" {
t.Fatalf("expected empty path, but '%s' was returned", path)
}
}
// Reading a symlink to a file must fail
func TestReadSymlinkedDirectoryToFile(t *testing.T) {
var err error
var file *os.File
if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil {
t.Fatalf("failed to create file: %s", err)
}
file.Close()
if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil {
t.Errorf("failed to create symlink: %s", err)
}
var path string
if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil {
t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed")
}
if path != "" {
t.Fatalf("path should've been empty: %s", path)
}
if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil {
t.Errorf("failed to remove file: %s", err)
}
if err = os.Remove("/tmp/fileLinkTest"); err != nil {
t.Errorf("failed to remove symlink: %s", err)
}
}
func TestWildcardMatches(t *testing.T) {
match, _ := Matches("fileutils.go", []string{"*"})
if match != true {
t.Errorf("failed to get a wildcard match, got %v", match)
}
}
// A simple pattern match should return true.
func TestPatternMatches(t *testing.T) {
match, _ := Matches("fileutils.go", []string{"*.go"})
if match != true {
t.Errorf("failed to get a match, got %v", match)
}
}
// An exclusion followed by an inclusion should return true.
func TestExclusionPatternMatchesPatternBefore(t *testing.T) {
match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"})
if match != true {
t.Errorf("failed to get true match on exclusion pattern, got %v", match)
}
}
// A folder pattern followed by an exception should return false.
func TestPatternMatchesFolderExclusions(t *testing.T) {
match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"})
if match != false {
t.Errorf("failed to get a false match on exclusion pattern, got %v", match)
}
}
// A folder pattern followed by an exception should return false.
func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) {
match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"})
if match != false {
t.Errorf("failed to get a false match on exclusion pattern, got %v", match)
}
}
// A folder pattern followed by an exception should return false.
func TestPatternMatchesFolderWildcardExclusions(t *testing.T) {
match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"})
if match != false {
t.Errorf("failed to get a false match on exclusion pattern, got %v", match)
}
}
// A pattern followed by an exclusion should return false.
func TestExclusionPatternMatchesPatternAfter(t *testing.T) {
match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"})
if match != false {
t.Errorf("failed to get false match on exclusion pattern, got %v", match)
}
}
// A filename evaluating to . should return false.
func TestExclusionPatternMatchesWholeDirectory(t *testing.T) {
match, _ := Matches(".", []string{"*.go"})
if match != false {
t.Errorf("failed to get false match on ., got %v", match)
}
}
// A single ! pattern should return an error.
func TestSingleExclamationError(t *testing.T) {
_, err := Matches("fileutils.go", []string{"!"})
if err == nil {
t.Errorf("failed to get an error for a single exclamation point, got %v", err)
}
}
// A string preceded with a ! should return true from Exclusion.
func TestExclusion(t *testing.T) {
exclusion := Exclusion("!")
if !exclusion {
t.Errorf("failed to get true for a single !, got %v", exclusion)
}
}
// Matches with no patterns
func TestMatchesWithNoPatterns(t *testing.T) {
matches, err := Matches("/any/path/there", []string{})
if err != nil {
t.Fatal(err)
}
if matches {
t.Fatalf("Should not have match anything")
}
}
// Matches with malformed patterns
func TestMatchesWithMalformedPatterns(t *testing.T) {
matches, err := Matches("/any/path/there", []string{"["})
if err == nil {
t.Fatal("Should have failed because of a malformed syntax in the pattern")
}
if matches {
t.Fatalf("Should not have match anything")
}
}
// An empty string should return true from Empty.
func TestEmpty(t *testing.T) {
empty := Empty("")
if !empty {
t.Errorf("failed to get true for an empty string, got %v", empty)
}
}
func TestCleanPatterns(t *testing.T) {
cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"})
if len(cleaned) != 2 {
t.Errorf("expected 2 element slice, got %v", len(cleaned))
}
}
func TestCleanPatternsStripEmptyPatterns(t *testing.T) {
cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""})
if len(cleaned) != 2 {
t.Errorf("expected 2 element slice, got %v", len(cleaned))
}
}
func TestCleanPatternsExceptionFlag(t *testing.T) {
_, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"})
if !exceptions {
t.Errorf("expected exceptions to be true, got %v", exceptions)
}
}
func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) {
_, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"})
if !exceptions {
t.Errorf("expected exceptions to be true, got %v", exceptions)
}
}
func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) {
_, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "})
if !exceptions {
t.Errorf("expected exceptions to be true, got %v", exceptions)
}
}
func TestCleanPatternsErrorSingleException(t *testing.T) {
_, _, _, err := CleanPatterns([]string{"!"})
if err == nil {
t.Errorf("expected error on single exclamation point, got %v", err)
}
}
func TestCleanPatternsFolderSplit(t *testing.T) {
_, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"})
if dirs[0][0] != "docs" {
t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1])
}
if dirs[0][1] != "config" {
t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1])
}
}

View File

@@ -0,0 +1,227 @@
package ioutils
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"io"
"math/big"
"sync"
"time"
)
type readCloserWrapper struct {
io.Reader
closer func() error
}
func (r *readCloserWrapper) Close() error {
return r.closer()
}
func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
return &readCloserWrapper{
Reader: r,
closer: closer,
}
}
type readerErrWrapper struct {
reader io.Reader
closer func()
}
func (r *readerErrWrapper) Read(p []byte) (int, error) {
n, err := r.reader.Read(p)
if err != nil {
r.closer()
}
return n, err
}
func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
return &readerErrWrapper{
reader: r,
closer: closer,
}
}
// bufReader allows the underlying reader to continue to produce
// output by pre-emptively reading from the wrapped reader.
// This is achieved by buffering this data in bufReader's
// expanding buffer.
type bufReader struct {
sync.Mutex
buf *bytes.Buffer
reader io.Reader
err error
wait sync.Cond
drainBuf []byte
reuseBuf []byte
maxReuse int64
resetTimeout time.Duration
bufLenResetThreshold int64
maxReadDataReset int64
}
func NewBufReader(r io.Reader) *bufReader {
var timeout int
if randVal, err := rand.Int(rand.Reader, big.NewInt(120)); err == nil {
timeout = int(randVal.Int64()) + 180
} else {
timeout = 300
}
reader := &bufReader{
buf: &bytes.Buffer{},
drainBuf: make([]byte, 1024),
reuseBuf: make([]byte, 4096),
maxReuse: 1000,
resetTimeout: time.Second * time.Duration(timeout),
bufLenResetThreshold: 100 * 1024,
maxReadDataReset: 10 * 1024 * 1024,
reader: r,
}
reader.wait.L = &reader.Mutex
go reader.drain()
return reader
}
func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader {
reader := &bufReader{
buf: buffer,
drainBuf: drainBuffer,
reader: r,
}
reader.wait.L = &reader.Mutex
go reader.drain()
return reader
}
func (r *bufReader) drain() {
var (
duration time.Duration
lastReset time.Time
now time.Time
reset bool
bufLen int64
dataSinceReset int64
maxBufLen int64
reuseBufLen int64
reuseCount int64
)
reuseBufLen = int64(len(r.reuseBuf))
lastReset = time.Now()
for {
n, err := r.reader.Read(r.drainBuf)
dataSinceReset += int64(n)
r.Lock()
bufLen = int64(r.buf.Len())
if bufLen > maxBufLen {
maxBufLen = bufLen
}
// Avoid unbounded growth of the buffer over time.
// This has been discovered to be the only non-intrusive
// solution to the unbounded growth of the buffer.
// Alternative solutions such as compression, multiple
// buffers, channels and other similar pieces of code
// were reducing throughput, overall Docker performance
// or simply crashed Docker.
// This solution releases the buffer when specific
// conditions are met to avoid the continuous resizing
// of the buffer for long lived containers.
//
// Move data to the front of the buffer if it's
// smaller than what reuseBuf can store
if bufLen > 0 && reuseBufLen >= bufLen {
n, _ := r.buf.Read(r.reuseBuf)
r.buf.Write(r.reuseBuf[0:n])
// Take action if the buffer has been reused too many
// times and if there's data in the buffer.
// The timeout is also used as means to avoid doing
// these operations more often or less often than
// required.
// The various conditions try to detect heavy activity
// in the buffer which might be indicators of heavy
// growth of the buffer.
} else if reuseCount >= r.maxReuse && bufLen > 0 {
now = time.Now()
duration = now.Sub(lastReset)
timeoutReached := duration >= r.resetTimeout
// The timeout has been reached and the
// buffered data couldn't be moved to the front
// of the buffer, so the buffer gets reset.
if timeoutReached && bufLen > reuseBufLen {
reset = true
}
// The amount of buffered data is too high now,
// reset the buffer.
if timeoutReached && maxBufLen >= r.bufLenResetThreshold {
reset = true
}
// Reset the buffer if a certain amount of
// data has gone through the buffer since the
// last reset.
if timeoutReached && dataSinceReset >= r.maxReadDataReset {
reset = true
}
// The buffered data is moved to a fresh buffer,
// swap the old buffer with the new one and
// reset all counters.
if reset {
newbuf := &bytes.Buffer{}
newbuf.ReadFrom(r.buf)
r.buf = newbuf
lastReset = now
reset = false
dataSinceReset = 0
maxBufLen = 0
reuseCount = 0
}
}
if err != nil {
r.err = err
} else {
r.buf.Write(r.drainBuf[0:n])
}
reuseCount++
r.wait.Signal()
r.Unlock()
if err != nil {
break
}
}
}
func (r *bufReader) Read(p []byte) (n int, err error) {
r.Lock()
defer r.Unlock()
for {
n, err = r.buf.Read(p)
if n > 0 {
return n, err
}
if r.err != nil {
return 0, r.err
}
r.wait.Wait()
}
}
func (r *bufReader) Close() error {
closer, ok := r.reader.(io.ReadCloser)
if !ok {
return nil
}
return closer.Close()
}
func HashData(src io.Reader) (string, error) {
h := sha256.New()
if _, err := io.Copy(h, src); err != nil {
return "", err
}
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
}

View File

@@ -0,0 +1,217 @@
package ioutils
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"strings"
"testing"
)
// Implement io.Reader
type errorReader struct{}
func (r *errorReader) Read(p []byte) (int, error) {
return 0, fmt.Errorf("Error reader always fail.")
}
func TestReadCloserWrapperClose(t *testing.T) {
reader := strings.NewReader("A string reader")
wrapper := NewReadCloserWrapper(reader, func() error {
return fmt.Errorf("This will be called when closing")
})
err := wrapper.Close()
if err == nil || !strings.Contains(err.Error(), "This will be called when closing") {
t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.")
}
}
func TestReaderErrWrapperReadOnError(t *testing.T) {
called := false
reader := &errorReader{}
wrapper := NewReaderErrWrapper(reader, func() {
called = true
})
_, err := wrapper.Read([]byte{})
if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") {
t.Fatalf("readErrWrapper should returned an error")
}
if !called {
t.Fatalf("readErrWrapper should have call the anonymous function on failure")
}
}
func TestReaderErrWrapperRead(t *testing.T) {
called := false
reader := strings.NewReader("a string reader.")
wrapper := NewReaderErrWrapper(reader, func() {
called = true // Should not be called
})
// Read 20 byte (should be ok with the string above)
num, err := wrapper.Read(make([]byte, 20))
if err != nil {
t.Fatal(err)
}
if num != 16 {
t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num)
}
}
func TestNewBufReaderWithDrainbufAndBuffer(t *testing.T) {
reader, writer := io.Pipe()
drainBuffer := make([]byte, 1024)
buffer := bytes.Buffer{}
bufreader := NewBufReaderWithDrainbufAndBuffer(reader, drainBuffer, &buffer)
// Write everything down to a Pipe
// Usually, a pipe should block but because of the buffered reader,
// the writes will go through
done := make(chan bool)
go func() {
writer.Write([]byte("hello world"))
writer.Close()
done <- true
}()
// Drain the reader *after* everything has been written, just to verify
// it is indeed buffering
<-done
output, err := ioutil.ReadAll(bufreader)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(output, []byte("hello world")) {
t.Error(string(output))
}
}
func TestBufReader(t *testing.T) {
reader, writer := io.Pipe()
bufreader := NewBufReader(reader)
// Write everything down to a Pipe
// Usually, a pipe should block but because of the buffered reader,
// the writes will go through
done := make(chan bool)
go func() {
writer.Write([]byte("hello world"))
writer.Close()
done <- true
}()
// Drain the reader *after* everything has been written, just to verify
// it is indeed buffering
<-done
output, err := ioutil.ReadAll(bufreader)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(output, []byte("hello world")) {
t.Error(string(output))
}
}
func TestBufReaderCloseWithNonReaderCloser(t *testing.T) {
reader := strings.NewReader("buffer")
bufreader := NewBufReader(reader)
if err := bufreader.Close(); err != nil {
t.Fatal(err)
}
}
// implements io.ReadCloser
type simpleReaderCloser struct{}
func (r *simpleReaderCloser) Read(p []byte) (n int, err error) {
return 0, nil
}
func (r *simpleReaderCloser) Close() error {
return nil
}
func TestBufReaderCloseWithReaderCloser(t *testing.T) {
reader := &simpleReaderCloser{}
bufreader := NewBufReader(reader)
err := bufreader.Close()
if err != nil {
t.Fatal(err)
}
}
func TestHashData(t *testing.T) {
reader := strings.NewReader("hash-me")
actual, err := HashData(reader)
if err != nil {
t.Fatal(err)
}
expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa"
if actual != expected {
t.Fatalf("Expecting %s, got %s", expected, actual)
}
}
type repeatedReader struct {
readCount int
maxReads int
data []byte
}
func newRepeatedReader(max int, data []byte) *repeatedReader {
return &repeatedReader{0, max, data}
}
func (r *repeatedReader) Read(p []byte) (int, error) {
if r.readCount >= r.maxReads {
return 0, io.EOF
}
r.readCount++
n := copy(p, r.data)
return n, nil
}
func testWithData(data []byte, reads int) {
reader := newRepeatedReader(reads, data)
bufReader := NewBufReader(reader)
io.Copy(ioutil.Discard, bufReader)
}
func Benchmark1M10BytesReads(b *testing.B) {
reads := 1000000
readSize := int64(10)
data := make([]byte, readSize)
b.SetBytes(readSize * int64(reads))
b.ResetTimer()
for i := 0; i < b.N; i++ {
testWithData(data, reads)
}
}
func Benchmark1M1024BytesReads(b *testing.B) {
reads := 1000000
readSize := int64(1024)
data := make([]byte, readSize)
b.SetBytes(readSize * int64(reads))
b.ResetTimer()
for i := 0; i < b.N; i++ {
testWithData(data, reads)
}
}
func Benchmark10k32KBytesReads(b *testing.B) {
reads := 10000
readSize := int64(32 * 1024)
data := make([]byte, readSize)
b.SetBytes(readSize * int64(reads))
b.ResetTimer()
for i := 0; i < b.N; i++ {
testWithData(data, reads)
}
}

View File

@@ -0,0 +1,47 @@
package ioutils
import (
"io"
"net/http"
"sync"
)
type WriteFlusher struct {
sync.Mutex
w io.Writer
flusher http.Flusher
flushed bool
}
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
wf.Lock()
defer wf.Unlock()
n, err = wf.w.Write(b)
wf.flushed = true
wf.flusher.Flush()
return n, err
}
// Flush the stream immediately.
func (wf *WriteFlusher) Flush() {
wf.Lock()
defer wf.Unlock()
wf.flushed = true
wf.flusher.Flush()
}
func (wf *WriteFlusher) Flushed() bool {
wf.Lock()
defer wf.Unlock()
return wf.flushed
}
func NewWriteFlusher(w io.Writer) *WriteFlusher {
var flusher http.Flusher
if f, ok := w.(http.Flusher); ok {
flusher = f
} else {
flusher = &NopFlusher{}
}
return &WriteFlusher{w: w, flusher: flusher}
}

View File

@@ -0,0 +1,60 @@
package ioutils
import "io"
type NopWriter struct{}
func (*NopWriter) Write(buf []byte) (int, error) {
return len(buf), nil
}
type nopWriteCloser struct {
io.Writer
}
func (w *nopWriteCloser) Close() error { return nil }
func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{w}
}
type NopFlusher struct{}
func (f *NopFlusher) Flush() {}
type writeCloserWrapper struct {
io.Writer
closer func() error
}
func (r *writeCloserWrapper) Close() error {
return r.closer()
}
func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
return &writeCloserWrapper{
Writer: r,
closer: closer,
}
}
// Wrap a concrete io.Writer and hold a count of the number
// of bytes written to the writer during a "session".
// This can be convenient when write return is masked
// (e.g., json.Encoder.Encode())
type WriteCounter struct {
Count int64
Writer io.Writer
}
func NewWriteCounter(w io.Writer) *WriteCounter {
return &WriteCounter{
Writer: w,
}
}
func (wc *WriteCounter) Write(p []byte) (count int, err error) {
count, err = wc.Writer.Write(p)
wc.Count += int64(count)
return
}

View File

@@ -0,0 +1,65 @@
package ioutils
import (
"bytes"
"strings"
"testing"
)
func TestWriteCloserWrapperClose(t *testing.T) {
called := false
writer := bytes.NewBuffer([]byte{})
wrapper := NewWriteCloserWrapper(writer, func() error {
called = true
return nil
})
if err := wrapper.Close(); err != nil {
t.Fatal(err)
}
if !called {
t.Fatalf("writeCloserWrapper should have call the anonymous function.")
}
}
func TestNopWriteCloser(t *testing.T) {
writer := bytes.NewBuffer([]byte{})
wrapper := NopWriteCloser(writer)
if err := wrapper.Close(); err != nil {
t.Fatal("NopWriteCloser always return nil on Close.")
}
}
func TestNopWriter(t *testing.T) {
nw := &NopWriter{}
l, err := nw.Write([]byte{'c'})
if err != nil {
t.Fatal(err)
}
if l != 1 {
t.Fatalf("Expected 1 got %d", l)
}
}
func TestWriteCounter(t *testing.T) {
dummy1 := "This is a dummy string."
dummy2 := "This is another dummy string."
totalLength := int64(len(dummy1) + len(dummy2))
reader1 := strings.NewReader(dummy1)
reader2 := strings.NewReader(dummy2)
var buffer bytes.Buffer
wc := NewWriteCounter(&buffer)
reader1.WriteTo(wc)
reader2.WriteTo(wc)
if wc.Count != totalLength {
t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength)
}
if buffer.String() != dummy1+dummy2 {
t.Error("Wrong message written")
}
}

View File

@@ -0,0 +1,109 @@
// Package pools provides a collection of pools which provide various
// data types with buffers. These can be used to lower the number of
// memory allocations and reuse buffers.
//
// New pools should be added to this package to allow them to be
// shared across packages.
//
// Utility functions which operate on pools should be added to this
// package to allow them to be reused.
package pools
import (
"bufio"
"io"
"sync"
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/ioutils"
)
var (
// Pool which returns bufio.Reader with a 32K buffer
BufioReader32KPool *BufioReaderPool
// Pool which returns bufio.Writer with a 32K buffer
BufioWriter32KPool *BufioWriterPool
)
const buffer32K = 32 * 1024
type BufioReaderPool struct {
pool sync.Pool
}
func init() {
BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
}
// newBufioReaderPoolWithSize is unexported because new pools should be
// added here to be shared where required.
func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
pool := sync.Pool{
New: func() interface{} { return bufio.NewReaderSize(nil, size) },
}
return &BufioReaderPool{pool: pool}
}
// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
buf := bufPool.pool.Get().(*bufio.Reader)
buf.Reset(r)
return buf
}
// Put puts the bufio.Reader back into the pool.
func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
b.Reset(nil)
bufPool.pool.Put(b)
}
// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
// into the pool and closes the reader if it's an io.ReadCloser.
func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
return ioutils.NewReadCloserWrapper(r, func() error {
if readCloser, ok := r.(io.ReadCloser); ok {
readCloser.Close()
}
bufPool.Put(buf)
return nil
})
}
type BufioWriterPool struct {
pool sync.Pool
}
// newBufioWriterPoolWithSize is unexported because new pools should be
// added here to be shared where required.
func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
pool := sync.Pool{
New: func() interface{} { return bufio.NewWriterSize(nil, size) },
}
return &BufioWriterPool{pool: pool}
}
// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
buf := bufPool.pool.Get().(*bufio.Writer)
buf.Reset(w)
return buf
}
// Put puts the bufio.Writer back into the pool.
func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
b.Reset(nil)
bufPool.pool.Put(b)
}
// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
// into the pool and closes the writer if it's an io.Writecloser.
func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
return ioutils.NewWriteCloserWrapper(w, func() error {
buf.Flush()
if writeCloser, ok := w.(io.WriteCloser); ok {
writeCloser.Close()
}
bufPool.Put(buf)
return nil
})
}

View File

@@ -0,0 +1,11 @@
package promise
// Go is a basic promise implementation: it wraps calls a function in a goroutine,
// and returns a channel which will later return the function's return value.
func Go(f func() error) chan error {
ch := make(chan error, 1)
go func() {
ch <- f()
}()
return ch
}

View File

@@ -0,0 +1,168 @@
package stdcopy
import (
"encoding/binary"
"errors"
"io"
"github.com/fsouza/go-dockerclient/vendor/github.com/Sirupsen/logrus"
)
const (
StdWriterPrefixLen = 8
StdWriterFdIndex = 0
StdWriterSizeIndex = 4
)
type StdType [StdWriterPrefixLen]byte
var (
Stdin StdType = StdType{0: 0}
Stdout StdType = StdType{0: 1}
Stderr StdType = StdType{0: 2}
)
type StdWriter struct {
io.Writer
prefix StdType
sizeBuf []byte
}
func (w *StdWriter) Write(buf []byte) (n int, err error) {
var n1, n2 int
if w == nil || w.Writer == nil {
return 0, errors.New("Writer not instanciated")
}
binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf)))
n1, err = w.Writer.Write(w.prefix[:])
if err != nil {
n = n1 - StdWriterPrefixLen
} else {
n2, err = w.Writer.Write(buf)
n = n1 + n2 - StdWriterPrefixLen
}
if n < 0 {
n = 0
}
return
}
// NewStdWriter instanciates a new Writer.
// Everything written to it will be encapsulated using a custom format,
// and written to the underlying `w` stream.
// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
// `t` indicates the id of the stream to encapsulate.
// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
func NewStdWriter(w io.Writer, t StdType) *StdWriter {
return &StdWriter{
Writer: w,
prefix: t,
sizeBuf: make([]byte, 4),
}
}
var ErrInvalidStdHeader = errors.New("Unrecognized input header")
// StdCopy is a modified version of io.Copy.
//
// StdCopy will demultiplex `src`, assuming that it contains two streams,
// previously multiplexed together using a StdWriter instance.
// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
//
// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
// In other words: if `err` is non nil, it indicates a real underlying error.
//
// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
var (
buf = make([]byte, 32*1024+StdWriterPrefixLen+1)
bufLen = len(buf)
nr, nw int
er, ew error
out io.Writer
frameSize int
)
for {
// Make sure we have at least a full header
for nr < StdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
if nr < StdWriterPrefixLen {
logrus.Debugf("Corrupted prefix: %v", buf[:nr])
return written, nil
}
break
}
if er != nil {
logrus.Debugf("Error reading header: %s", er)
return 0, er
}
}
// Check the first byte to know where to write
switch buf[StdWriterFdIndex] {
case 0:
fallthrough
case 1:
// Write on stdout
out = dstout
case 2:
// Write on stderr
out = dsterr
default:
logrus.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex])
return 0, ErrInvalidStdHeader
}
// Retrieve the size of the frame
frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4]))
logrus.Debugf("framesize: %d", frameSize)
// Check if the buffer is big enough to read the frame.
// Extend it if necessary.
if frameSize+StdWriterPrefixLen > bufLen {
logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf))
buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...)
bufLen = len(buf)
}
// While the amount of bytes read is less than the size of the frame + header, we keep reading
for nr < frameSize+StdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
if nr < frameSize+StdWriterPrefixLen {
logrus.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr])
return written, nil
}
break
}
if er != nil {
logrus.Debugf("Error reading frame: %s", er)
return 0, er
}
}
// Write the retrieved frame (without header)
nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen])
if ew != nil {
logrus.Debugf("Error writing frame: %s", ew)
return 0, ew
}
// If the frame has not been fully written: error
if nw != frameSize {
logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize)
return 0, io.ErrShortWrite
}
written += int64(nw)
// Move the rest of the buffer to the beginning
copy(buf, buf[frameSize+StdWriterPrefixLen:])
// Move the index
nr -= frameSize + StdWriterPrefixLen
}
}

View File

@@ -0,0 +1,85 @@
package stdcopy
import (
"bytes"
"io/ioutil"
"strings"
"testing"
)
func TestNewStdWriter(t *testing.T) {
writer := NewStdWriter(ioutil.Discard, Stdout)
if writer == nil {
t.Fatalf("NewStdWriter with an invalid StdType should not return nil.")
}
}
func TestWriteWithUnitializedStdWriter(t *testing.T) {
writer := StdWriter{
Writer: nil,
prefix: Stdout,
sizeBuf: make([]byte, 4),
}
n, err := writer.Write([]byte("Something here"))
if n != 0 || err == nil {
t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter")
}
}
func TestWriteWithNilBytes(t *testing.T) {
writer := NewStdWriter(ioutil.Discard, Stdout)
n, err := writer.Write(nil)
if err != nil {
t.Fatalf("Shouldn't have fail when given no data")
}
if n > 0 {
t.Fatalf("Write should have written 0 byte, but has written %d", n)
}
}
func TestWrite(t *testing.T) {
writer := NewStdWriter(ioutil.Discard, Stdout)
data := []byte("Test StdWrite.Write")
n, err := writer.Write(data)
if err != nil {
t.Fatalf("Error while writing with StdWrite")
}
if n != len(data) {
t.Fatalf("Write should have writen %d byte but wrote %d.", len(data), n)
}
}
func TestStdCopyWithInvalidInputHeader(t *testing.T) {
dstOut := NewStdWriter(ioutil.Discard, Stdout)
dstErr := NewStdWriter(ioutil.Discard, Stderr)
src := strings.NewReader("Invalid input")
_, err := StdCopy(dstOut, dstErr, src)
if err == nil {
t.Fatal("StdCopy with invalid input header should fail.")
}
}
func TestStdCopyWithCorruptedPrefix(t *testing.T) {
data := []byte{0x01, 0x02, 0x03}
src := bytes.NewReader(data)
written, err := StdCopy(nil, nil, src)
if err != nil {
t.Fatalf("StdCopy should not return an error with corrupted prefix.")
}
if written != 0 {
t.Fatalf("StdCopy should have written 0, but has written %d", written)
}
}
func BenchmarkWrite(b *testing.B) {
w := NewStdWriter(ioutil.Discard, Stdout)
data := []byte("Test line for testing stdwriter performance\n")
data = bytes.Repeat(data, 100)
b.SetBytes(int64(len(data)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, err := w.Write(data); err != nil {
b.Fatal(err)
}
}
}

View File

@@ -0,0 +1,9 @@
package system
import (
"errors"
)
var (
ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
)

View File

@@ -0,0 +1,11 @@
// +build !windows
package system
import (
"os"
)
func MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}

View File

@@ -0,0 +1,64 @@
// +build windows
package system
import (
"os"
"regexp"
"syscall"
)
// MkdirAll implementation that is volume path aware for Windows.
func MkdirAll(path string, perm os.FileMode) error {
if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
return nil
}
// The rest of this method is copied from os.MkdirAll and should be kept
// as-is to ensure compatibility.
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
dir, err := os.Stat(path)
if err == nil {
if dir.IsDir() {
return nil
}
return &os.PathError{
Op: "mkdir",
Path: path,
Err: syscall.ENOTDIR,
}
}
// Slow path: make sure parent exists and then call Mkdir for path.
i := len(path)
for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
i--
}
j := i
for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
j--
}
if j > 1 {
// Create parent
err = MkdirAll(path[0:j-1], perm)
if err != nil {
return err
}
}
// Parent now exists; invoke Mkdir and use its result.
err = os.Mkdir(path, perm)
if err != nil {
// Handle arguments like "foo/." by
// double-checking that directory doesn't exist.
dir, err1 := os.Lstat(path)
if err1 == nil && dir.IsDir() {
return nil
}
return err
}
return nil
}

View File

@@ -0,0 +1,19 @@
// +build !windows
package system
import (
"syscall"
)
// Lstat takes a path to a file and returns
// a system.Stat_t type pertaining to that file.
//
// Throws an error if the file does not exist
func Lstat(path string) (*Stat_t, error) {
s := &syscall.Stat_t{}
if err := syscall.Lstat(path, s); err != nil {
return nil, err
}
return fromStatT(s)
}

View File

@@ -0,0 +1,28 @@
package system
import (
"os"
"testing"
)
// TestLstat tests Lstat for existing and non existing files
func TestLstat(t *testing.T) {
file, invalid, _, dir := prepareFiles(t)
defer os.RemoveAll(dir)
statFile, err := Lstat(file)
if err != nil {
t.Fatal(err)
}
if statFile == nil {
t.Fatal("returned empty stat for existing file")
}
statInvalid, err := Lstat(invalid)
if err == nil {
t.Fatal("did not return error for non-existing file")
}
if statInvalid != nil {
t.Fatal("returned non-nil stat for non-existing file")
}
}

View File

@@ -0,0 +1,8 @@
// +build windows
package system
func Lstat(path string) (*Stat_t, error) {
// should not be called on cli code path
return nil, ErrNotSupportedPlatform
}

View File

@@ -0,0 +1,17 @@
package system
// MemInfo contains memory statistics of the host system.
type MemInfo struct {
// Total usable RAM (i.e. physical RAM minus a few reserved bits and the
// kernel binary code).
MemTotal int64
// Amount of free memory.
MemFree int64
// Total amount of swap space available.
SwapTotal int64
// Amount of swap space that is currently unused.
SwapFree int64
}

View File

@@ -0,0 +1,71 @@
package system
import (
"bufio"
"errors"
"io"
"os"
"strconv"
"strings"
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/units"
)
var (
ErrMalformed = errors.New("malformed file")
)
// ReadMemInfo retrieves memory statistics of the host system and returns a
// MemInfo type.
func ReadMemInfo() (*MemInfo, error) {
file, err := os.Open("/proc/meminfo")
if err != nil {
return nil, err
}
defer file.Close()
return parseMemInfo(file)
}
// parseMemInfo parses the /proc/meminfo file into
// a MemInfo object given a io.Reader to the file.
//
// Throws error if there are problems reading from the file
func parseMemInfo(reader io.Reader) (*MemInfo, error) {
meminfo := &MemInfo{}
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
// Expected format: ["MemTotal:", "1234", "kB"]
parts := strings.Fields(scanner.Text())
// Sanity checks: Skip malformed entries.
if len(parts) < 3 || parts[2] != "kB" {
continue
}
// Convert to bytes.
size, err := strconv.Atoi(parts[1])
if err != nil {
continue
}
bytes := int64(size) * units.KiB
switch parts[0] {
case "MemTotal:":
meminfo.MemTotal = bytes
case "MemFree:":
meminfo.MemFree = bytes
case "SwapTotal:":
meminfo.SwapTotal = bytes
case "SwapFree:":
meminfo.SwapFree = bytes
}
}
// Handle errors that may have occurred during the reading of the file.
if err := scanner.Err(); err != nil {
return nil, err
}
return meminfo, nil
}

View File

@@ -0,0 +1,38 @@
package system
import (
"strings"
"testing"
"github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/units"
)
// TestMemInfo tests parseMemInfo with a static meminfo string
func TestMemInfo(t *testing.T) {
const input = `
MemTotal: 1 kB
MemFree: 2 kB
SwapTotal: 3 kB
SwapFree: 4 kB
Malformed1:
Malformed2: 1
Malformed3: 2 MB
Malformed4: X kB
`
meminfo, err := parseMemInfo(strings.NewReader(input))
if err != nil {
t.Fatal(err)
}
if meminfo.MemTotal != 1*units.KiB {
t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal)
}
if meminfo.MemFree != 2*units.KiB {
t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree)
}
if meminfo.SwapTotal != 3*units.KiB {
t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal)
}
if meminfo.SwapFree != 4*units.KiB {
t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree)
}
}

View File

@@ -0,0 +1,7 @@
// +build !linux
package system
func ReadMemInfo() (*MemInfo, error) {
return nil, ErrNotSupportedPlatform
}

View File

@@ -0,0 +1,20 @@
// +build !windows
package system
import (
"syscall"
)
// Mknod creates a filesystem node (file, device special file or named pipe) named path
// with attributes specified by mode and dev
func Mknod(path string, mode uint32, dev int) error {
return syscall.Mknod(path, mode, dev)
}
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
// then the top 12 bits of the minor
func Mkdev(major int64, minor int64) uint32 {
return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
}

View File

@@ -0,0 +1,12 @@
// +build windows
package system
func Mknod(path string, mode uint32, dev int) error {
// should not be called on cli code path
return ErrNotSupportedPlatform
}
func Mkdev(major int64, minor int64) uint32 {
panic("Mkdev not implemented on windows, should not be called on cli code")
}

View File

@@ -0,0 +1,44 @@
package system
import (
"syscall"
)
// Stat_t type contains status of a file. It contains metadata
// like permission, owner, group, size, etc about a file
type Stat_t struct {
mode uint32
uid uint32
gid uint32
rdev uint64
size int64
mtim syscall.Timespec
}
func (s Stat_t) Mode() uint32 {
return s.mode
}
func (s Stat_t) Uid() uint32 {
return s.uid
}
func (s Stat_t) Gid() uint32 {
return s.gid
}
func (s Stat_t) Rdev() uint64 {
return s.rdev
}
func (s Stat_t) Size() int64 {
return s.size
}
func (s Stat_t) Mtim() syscall.Timespec {
return s.mtim
}
func (s Stat_t) GetLastModification() syscall.Timespec {
return s.Mtim()
}

View File

@@ -0,0 +1,27 @@
package system
import (
"syscall"
)
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
return &Stat_t{size: s.Size,
mode: s.Mode,
uid: s.Uid,
gid: s.Gid,
rdev: s.Rdev,
mtim: s.Mtim}, nil
}
// Stat takes a path to a file and returns
// a system.Stat_t type pertaining to that file.
//
// Throws an error if the file does not exist
func Stat(path string) (*Stat_t, error) {
s := &syscall.Stat_t{}
if err := syscall.Stat(path, s); err != nil {
return nil, err
}
return fromStatT(s)
}

View File

@@ -0,0 +1,37 @@
package system
import (
"os"
"syscall"
"testing"
)
// TestFromStatT tests fromStatT for a tempfile
func TestFromStatT(t *testing.T) {
file, _, _, dir := prepareFiles(t)
defer os.RemoveAll(dir)
stat := &syscall.Stat_t{}
err := syscall.Lstat(file, stat)
s, err := fromStatT(stat)
if err != nil {
t.Fatal(err)
}
if stat.Mode != s.Mode() {
t.Fatal("got invalid mode")
}
if stat.Uid != s.Uid() {
t.Fatal("got invalid uid")
}
if stat.Gid != s.Gid() {
t.Fatal("got invalid gid")
}
if stat.Rdev != s.Rdev() {
t.Fatal("got invalid rdev")
}
if stat.Mtim != s.Mtim() {
t.Fatal("got invalid mtim")
}
}

View File

@@ -0,0 +1,17 @@
// +build !linux,!windows
package system
import (
"syscall"
)
// fromStatT creates a system.Stat_t type from a syscall.Stat_t type
func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
return &Stat_t{size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
mtim: s.Mtimespec}, nil
}

View File

@@ -0,0 +1,17 @@
// +build windows
package system
import (
"errors"
"syscall"
)
func fromStatT(s *syscall.Win32FileAttributeData) (*Stat_t, error) {
return nil, errors.New("fromStatT should not be called on windows path")
}
func Stat(path string) (*Stat_t, error) {
// should not be called on cli code path
return nil, ErrNotSupportedPlatform
}

View File

@@ -0,0 +1,11 @@
// +build !windows
package system
import (
"syscall"
)
func Umask(newmask int) (oldmask int, err error) {
return syscall.Umask(newmask), nil
}

View File

@@ -0,0 +1,8 @@
// +build windows
package system
func Umask(newmask int) (oldmask int, err error) {
// should not be called on cli code path
return 0, ErrNotSupportedPlatform
}

View File

@@ -0,0 +1,11 @@
package system
import "syscall"
func LUtimesNano(path string, ts []syscall.Timespec) error {
return ErrNotSupportedPlatform
}
func UtimesNano(path string, ts []syscall.Timespec) error {
return syscall.UtimesNano(path, ts)
}

View File

@@ -0,0 +1,24 @@
package system
import (
"syscall"
"unsafe"
)
func LUtimesNano(path string, ts []syscall.Timespec) error {
var _path *byte
_path, err := syscall.BytePtrFromString(path)
if err != nil {
return err
}
if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS {
return err
}
return nil
}
func UtimesNano(path string, ts []syscall.Timespec) error {
return syscall.UtimesNano(path, ts)
}

View File

@@ -0,0 +1,28 @@
package system
import (
"syscall"
"unsafe"
)
func LUtimesNano(path string, ts []syscall.Timespec) error {
// These are not currently available in syscall
AT_FDCWD := -100
AT_SYMLINK_NOFOLLOW := 0x100
var _path *byte
_path, err := syscall.BytePtrFromString(path)
if err != nil {
return err
}
if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS {
return err
}
return nil
}
func UtimesNano(path string, ts []syscall.Timespec) error {
return syscall.UtimesNano(path, ts)
}

View File

@@ -0,0 +1,66 @@
package system
import (
"io/ioutil"
"os"
"path/filepath"
"syscall"
"testing"
)
// prepareFiles creates files for testing in the temp directory
func prepareFiles(t *testing.T) (string, string, string, string) {
dir, err := ioutil.TempDir("", "docker-system-test")
if err != nil {
t.Fatal(err)
}
file := filepath.Join(dir, "exist")
if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil {
t.Fatal(err)
}
invalid := filepath.Join(dir, "doesnt-exist")
symlink := filepath.Join(dir, "symlink")
if err := os.Symlink(file, symlink); err != nil {
t.Fatal(err)
}
return file, invalid, symlink, dir
}
func TestLUtimesNano(t *testing.T) {
file, invalid, symlink, dir := prepareFiles(t)
defer os.RemoveAll(dir)
before, err := os.Stat(file)
if err != nil {
t.Fatal(err)
}
ts := []syscall.Timespec{{0, 0}, {0, 0}}
if err := LUtimesNano(symlink, ts); err != nil {
t.Fatal(err)
}
symlinkInfo, err := os.Lstat(symlink)
if err != nil {
t.Fatal(err)
}
if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() {
t.Fatal("The modification time of the symlink should be different")
}
fileInfo, err := os.Stat(file)
if err != nil {
t.Fatal(err)
}
if before.ModTime().Unix() != fileInfo.ModTime().Unix() {
t.Fatal("The modification time of the file should be same")
}
if err := LUtimesNano(invalid, ts); err == nil {
t.Fatal("Doesn't return an error on a non-existing file")
}
}

View File

@@ -0,0 +1,13 @@
// +build !linux,!freebsd,!darwin
package system
import "syscall"
func LUtimesNano(path string, ts []syscall.Timespec) error {
return ErrNotSupportedPlatform
}
func UtimesNano(path string, ts []syscall.Timespec) error {
return ErrNotSupportedPlatform
}

View File

@@ -0,0 +1,59 @@
package system
import (
"syscall"
"unsafe"
)
// Returns a nil slice and nil error if the xattr is not set
func Lgetxattr(path string, attr string) ([]byte, error) {
pathBytes, err := syscall.BytePtrFromString(path)
if err != nil {
return nil, err
}
attrBytes, err := syscall.BytePtrFromString(attr)
if err != nil {
return nil, err
}
dest := make([]byte, 128)
destBytes := unsafe.Pointer(&dest[0])
sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
if errno == syscall.ENODATA {
return nil, nil
}
if errno == syscall.ERANGE {
dest = make([]byte, sz)
destBytes := unsafe.Pointer(&dest[0])
sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
}
if errno != 0 {
return nil, errno
}
return dest[:sz], nil
}
var _zero uintptr
func Lsetxattr(path string, attr string, data []byte, flags int) error {
pathBytes, err := syscall.BytePtrFromString(path)
if err != nil {
return err
}
attrBytes, err := syscall.BytePtrFromString(attr)
if err != nil {
return err
}
var dataBytes unsafe.Pointer
if len(data) > 0 {
dataBytes = unsafe.Pointer(&data[0])
} else {
dataBytes = unsafe.Pointer(&_zero)
}
_, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
if errno != 0 {
return errno
}
return nil
}

View File

@@ -0,0 +1,11 @@
// +build !linux
package system
func Lgetxattr(path string, attr string) ([]byte, error) {
return nil, ErrNotSupportedPlatform
}
func Lsetxattr(path string, attr string, data []byte, flags int) error {
return ErrNotSupportedPlatform
}

Some files were not shown because too many files have changed in this diff Show More