diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index 1d6c9660670..56547d81860 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -39,6 +39,11 @@
"Comment": "release-96",
"Rev": "98c78185197025f935947caac56a7b6d022f89d2"
},
+ {
+ "ImportPath": "github.com/Sirupsen/logrus",
+ "Comment": "v0.6.2-10-g51fe59a",
+ "Rev": "51fe59aca108dc5680109e7b2051cbdcfa5a253c"
+ },
{
"ImportPath": "github.com/coreos/go-etcd/etcd",
"Comment": "v0.2.0-rc1-120-g23142f6",
@@ -48,6 +53,46 @@
"ImportPath": "github.com/davecgh/go-spew/spew",
"Rev": "83f84dc933714d51504ceed59f43ead21d096fe7"
},
+ {
+ "ImportPath": "github.com/docker/docker/pkg/archive",
+ "Comment": "v1.4.1-108-g364720b",
+ "Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/fileutils",
+ "Comment": "v1.4.1-108-g364720b",
+ "Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/ioutils",
+ "Comment": "v1.4.1-108-g364720b",
+ "Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/pools",
+ "Comment": "v1.4.1-108-g364720b",
+ "Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/promise",
+ "Comment": "v1.4.1-108-g364720b",
+ "Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/system",
+ "Comment": "v1.4.1-108-g364720b",
+ "Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/units",
+ "Comment": "v1.4.1-108-g364720b",
+ "Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar",
+ "Comment": "v1.4.1-108-g364720b",
+ "Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
+ },
{
"ImportPath": "github.com/elazarl/go-bindata-assetfs",
"Rev": "ae4665cf2d188c65764c73fe4af5378acc549510"
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore
new file mode 100644
index 00000000000..66be63a0057
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore
@@ -0,0 +1 @@
+logrus
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml
new file mode 100644
index 00000000000..c3af3ce27cd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go:
+ - 1.2
+ - 1.3
+ - tip
+install:
+ - go get github.com/stretchr/testify
+ - go get github.com/stvp/go-udp-testing
+ - go get github.com/tobi/airbrake-go
+ - go get github.com/getsentry/raven-go
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE b/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 00000000000..f090cb42f37
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 00000000000..b6aa84c987f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,352 @@
+# Logrus
[](https://travis-ci.org/Sirupsen/logrus)
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0), the core API is unlikely change much but please version
+control your Logrus to make sure you aren't fetching latest `master` on every
+build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+
+
+With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
+attached, the output is compatible with the
+[l2met](http://r.32k.io/l2met-introduction) format:
+
+```text
+time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10
+time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122
+time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10
+time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9
+time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(&logrus_airbrake.AirbrakeHook{})
+
+ // Output to stderr instead of stdout, could also be a file.
+ log.SetOutput(os.Stderr)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stderr
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+```go
+// Not the real implementation of the Airbrake hook. Just a simple sample.
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ log.AddHook(new(AirbrakeHook))
+}
+
+type AirbrakeHook struct{}
+
+// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains
+// the fields for the entry. See the Fields section of the README.
+func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
+ err := airbrake.Notify(entry.Data["error"].(error))
+ if err != nil {
+ log.WithFields(log.Fields{
+ "source": "airbrake",
+ "endpoint": airbrake.Endpoint,
+ }).Info("Failed to send error to Airbrake")
+ }
+
+ return nil
+}
+
+// `Levels()` returns a slice of `Levels` the hook is fired for.
+func (hook *AirbrakeHook) Levels() []log.Level {
+ return []log.Level{
+ log.ErrorLevel,
+ log.FatalLevel,
+ log.PanicLevel,
+ }
+}
+```
+
+Logrus comes with built-in hooks. Add those, or your custom hook, in `init`:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+ "github.com/Sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+ log.AddHook(new(logrus_airbrake.AirbrakeHook))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+
+* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go)
+ Send errors to an exception tracking service compatible with the Airbrake API.
+ Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes.
+
+* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go)
+ Send errors to the Papertrail hosted logging service via UDP.
+
+* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go)
+ Send errors to remote syslog server.
+ Uses standard library `log/syslog` behind the scenes.
+
+* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
+ Send errors to a channel in hipchat.
+
+* [`github.com/sebest/logrusly`](https://github.com/sebest/logrusly)
+ Send logs to Loggly (https://www.loggly.com/)
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(logrus.JSONFormatter)
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(logrus.TextFormatter)
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+
+Third party logging formatters:
+
+* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotated(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+
+[godoc]: https://godoc.org/github.com/Sirupsen/logrus
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 00000000000..e164eecb5f3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,248 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "time"
+)
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns a reader for the entry, which is a proxy to the formatter.
+func (entry *Entry) Reader() (*bytes.Buffer, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ return bytes.NewBuffer(serialized), err
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ reader, err := entry.Reader()
+ if err != nil {
+ return "", err
+ }
+
+ return reader.String(), err
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := Fields{}
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+func (entry *Entry) log(level Level, msg string) {
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ reader, err := entry.Reader()
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+
+ _, err = io.Copy(entry.Logger.Out, reader)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go
new file mode 100644
index 00000000000..98717df4901
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go
@@ -0,0 +1,53 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEntryPanicln(t *testing.T) {
+ errBoom := fmt.Errorf("boom time")
+
+ defer func() {
+ p := recover()
+ assert.NotNil(t, p)
+
+ switch pVal := p.(type) {
+ case *Entry:
+ assert.Equal(t, "kaboom", pVal.Message)
+ assert.Equal(t, errBoom, pVal.Data["err"])
+ default:
+ t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
+ }
+ }()
+
+ logger := New()
+ logger.Out = &bytes.Buffer{}
+ entry := NewEntry(logger)
+ entry.WithField("err", errBoom).Panicln("kaboom")
+}
+
+func TestEntryPanicf(t *testing.T) {
+ errBoom := fmt.Errorf("boom again")
+
+ defer func() {
+ p := recover()
+ assert.NotNil(t, p)
+
+ switch pVal := p.(type) {
+ case *Entry:
+ assert.Equal(t, "kaboom true", pVal.Message)
+ assert.Equal(t, errBoom, pVal.Data["err"])
+ default:
+ t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
+ }
+ }()
+
+ logger := New()
+ logger.Out = &bytes.Buffer{}
+ entry := NewEntry(logger)
+ entry.WithField("err", errBoom).Panicf("kaboom %v", true)
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go
new file mode 100644
index 00000000000..a62ba45de50
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go
@@ -0,0 +1,40 @@
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+var log = logrus.New()
+
+func init() {
+ log.Formatter = new(logrus.JSONFormatter)
+ log.Formatter = new(logrus.TextFormatter) // default
+}
+
+func main() {
+ defer func() {
+ err := recover()
+ if err != nil {
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "err": err,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+ }
+ }()
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(logrus.Fields{
+ "animal": "orca",
+ "size": 9009,
+ }).Panic("It's over 9000!")
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go
new file mode 100644
index 00000000000..42e7a4c9825
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go
@@ -0,0 +1,35 @@
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+ "github.com/tobi/airbrake-go"
+)
+
+var log = logrus.New()
+
+func init() {
+ log.Formatter = new(logrus.TextFormatter) // default
+ log.Hooks.Add(new(logrus_airbrake.AirbrakeHook))
+}
+
+func main() {
+ airbrake.Endpoint = "https://exceptions.whatever.com/notifier_api/v2/notices.xml"
+ airbrake.ApiKey = "whatever"
+ airbrake.Environment = "production"
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 00000000000..d0871244814
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,182 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 00000000000..038ce9fd297
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,44 @@
+package logrus
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+ _, ok := data["time"]
+ if ok {
+ data["fields.time"] = data["time"]
+ }
+
+ _, ok = data["msg"]
+ if ok {
+ data["fields.msg"] = data["msg"]
+ }
+
+ _, ok = data["level"]
+ if ok {
+ data["fields.level"] = data["level"]
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go
new file mode 100644
index 00000000000..77989da6297
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go
@@ -0,0 +1,88 @@
+package logrus
+
+import (
+ "testing"
+ "time"
+)
+
+// smallFields is a small size data set for benchmarking
+var smallFields = Fields{
+ "foo": "bar",
+ "baz": "qux",
+ "one": "two",
+ "three": "four",
+}
+
+// largeFields is a large size data set for benchmarking
+var largeFields = Fields{
+ "foo": "bar",
+ "baz": "qux",
+ "one": "two",
+ "three": "four",
+ "five": "six",
+ "seven": "eight",
+ "nine": "ten",
+ "eleven": "twelve",
+ "thirteen": "fourteen",
+ "fifteen": "sixteen",
+ "seventeen": "eighteen",
+ "nineteen": "twenty",
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ "g": "h",
+ "i": "j",
+ "k": "l",
+ "m": "n",
+ "o": "p",
+ "q": "r",
+ "s": "t",
+ "u": "v",
+ "w": "x",
+ "y": "z",
+ "this": "will",
+ "make": "thirty",
+ "entries": "yeah",
+}
+
+func BenchmarkSmallTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
+}
+
+func BenchmarkLargeTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
+}
+
+func BenchmarkSmallColoredTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
+}
+
+func BenchmarkLargeColoredTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
+}
+
+func BenchmarkSmallJSONFormatter(b *testing.B) {
+ doBenchmark(b, &JSONFormatter{}, smallFields)
+}
+
+func BenchmarkLargeJSONFormatter(b *testing.B) {
+ doBenchmark(b, &JSONFormatter{}, largeFields)
+}
+
+func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
+ entry := &Entry{
+ Time: time.Time{},
+ Level: InfoLevel,
+ Message: "message",
+ Data: fields,
+ }
+ var d []byte
+ var err error
+ for i := 0; i < b.N; i++ {
+ d, err = formatter.Format(entry)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.SetBytes(int64(len(d)))
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go
new file mode 100644
index 00000000000..13f34cb6f81
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go
@@ -0,0 +1,122 @@
+package logrus
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type TestHook struct {
+ Fired bool
+}
+
+func (hook *TestHook) Fire(entry *Entry) error {
+ hook.Fired = true
+ return nil
+}
+
+func (hook *TestHook) Levels() []Level {
+ return []Level{
+ DebugLevel,
+ InfoLevel,
+ WarnLevel,
+ ErrorLevel,
+ FatalLevel,
+ PanicLevel,
+ }
+}
+
+func TestHookFires(t *testing.T) {
+ hook := new(TestHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ assert.Equal(t, hook.Fired, false)
+
+ log.Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, true)
+ })
+}
+
+type ModifyHook struct {
+}
+
+func (hook *ModifyHook) Fire(entry *Entry) error {
+ entry.Data["wow"] = "whale"
+ return nil
+}
+
+func (hook *ModifyHook) Levels() []Level {
+ return []Level{
+ DebugLevel,
+ InfoLevel,
+ WarnLevel,
+ ErrorLevel,
+ FatalLevel,
+ PanicLevel,
+ }
+}
+
+func TestHookCanModifyEntry(t *testing.T) {
+ hook := new(ModifyHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.WithField("wow", "elephant").Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["wow"], "whale")
+ })
+}
+
+func TestCanFireMultipleHooks(t *testing.T) {
+ hook1 := new(ModifyHook)
+ hook2 := new(TestHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook1)
+ log.Hooks.Add(hook2)
+
+ log.WithField("wow", "elephant").Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["wow"], "whale")
+ assert.Equal(t, hook2.Fired, true)
+ })
+}
+
+type ErrorHook struct {
+ Fired bool
+}
+
+func (hook *ErrorHook) Fire(entry *Entry) error {
+ hook.Fired = true
+ return nil
+}
+
+func (hook *ErrorHook) Levels() []Level {
+ return []Level{
+ ErrorLevel,
+ }
+}
+
+func TestErrorHookShouldntFireOnInfo(t *testing.T) {
+ hook := new(ErrorHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, false)
+ })
+}
+
+func TestErrorHookShouldFireOnError(t *testing.T) {
+ hook := new(ErrorHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.Error("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, true)
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 00000000000..0da2b3653f5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type levelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks levelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks levelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
new file mode 100644
index 00000000000..880d21ecdc1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
@@ -0,0 +1,54 @@
+package logrus_airbrake
+
+import (
+ "github.com/Sirupsen/logrus"
+ "github.com/tobi/airbrake-go"
+)
+
+// AirbrakeHook to send exceptions to an exception-tracking service compatible
+// with the Airbrake API. You must set:
+// * airbrake.Endpoint
+// * airbrake.ApiKey
+// * airbrake.Environment (only sends exceptions when set to "production")
+//
+// Before using this hook, to send an error. Entries that trigger an Error,
+// Fatal or Panic should now include an "error" field to send to Airbrake.
+type AirbrakeHook struct{}
+
+func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
+ if entry.Data["error"] == nil {
+ entry.Logger.WithFields(logrus.Fields{
+ "source": "airbrake",
+ "endpoint": airbrake.Endpoint,
+ }).Warn("Exceptions sent to Airbrake must have an 'error' key with the error")
+ return nil
+ }
+
+ err, ok := entry.Data["error"].(error)
+ if !ok {
+ entry.Logger.WithFields(logrus.Fields{
+ "source": "airbrake",
+ "endpoint": airbrake.Endpoint,
+ }).Warn("Exceptions sent to Airbrake must have an `error` key of type `error`")
+ return nil
+ }
+
+ airErr := airbrake.Notify(err)
+ if airErr != nil {
+ entry.Logger.WithFields(logrus.Fields{
+ "source": "airbrake",
+ "endpoint": airbrake.Endpoint,
+ "error": airErr,
+ }).Warn("Failed to send error to Airbrake")
+ }
+
+ return nil
+}
+
+func (hook *AirbrakeHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.ErrorLevel,
+ logrus.FatalLevel,
+ logrus.PanicLevel,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md
new file mode 100644
index 00000000000..ae61e9229ab
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md
@@ -0,0 +1,28 @@
+# Papertrail Hook for Logrus
+
+[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts).
+
+In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible.
+
+## Usage
+
+You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`.
+
+For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs.
+
+```go
+import (
+ "log/syslog"
+ "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/papertrail"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME)
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go
new file mode 100644
index 00000000000..12c56f293d0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go
@@ -0,0 +1,54 @@
+package logrus_papertrail
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+)
+
+const (
+ format = "Jan 2 15:04:05"
+)
+
+// PapertrailHook to send logs to a logging service compatible with the Papertrail API.
+type PapertrailHook struct {
+ Host string
+ Port int
+ AppName string
+ UDPConn net.Conn
+}
+
+// NewPapertrailHook creates a hook to be added to an instance of logger.
+func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) {
+ conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port))
+ return &PapertrailHook{host, port, appName, conn}, err
+}
+
+// Fire is called when a log event is fired.
+func (hook *PapertrailHook) Fire(entry *logrus.Entry) error {
+ date := time.Now().Format(format)
+ payload := fmt.Sprintf("<22> %s %s: [%s] %s", date, hook.AppName, entry.Level, entry.Message)
+
+ bytesWritten, err := hook.UDPConn.Write([]byte(payload))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err)
+ return err
+ }
+
+ return nil
+}
+
+// Levels returns the available logging levels.
+func (hook *PapertrailHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.PanicLevel,
+ logrus.FatalLevel,
+ logrus.ErrorLevel,
+ logrus.WarnLevel,
+ logrus.InfoLevel,
+ logrus.DebugLevel,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go
new file mode 100644
index 00000000000..96318d00304
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go
@@ -0,0 +1,26 @@
+package logrus_papertrail
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/stvp/go-udp-testing"
+)
+
+func TestWritingToUDP(t *testing.T) {
+ port := 16661
+ udp.SetAddr(fmt.Sprintf(":%d", port))
+
+ hook, err := NewPapertrailHook("localhost", port, "test")
+ if err != nil {
+ t.Errorf("Unable to connect to local UDP server.")
+ }
+
+ log := logrus.New()
+ log.Hooks.Add(hook)
+
+ udp.ShouldReceive(t, "foo", func() {
+ log.Info("foo")
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md
new file mode 100644
index 00000000000..a409f3b04dc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md
@@ -0,0 +1,61 @@
+# Sentry Hook for Logrus
+
+[Sentry](https://getsentry.com) provides both self-hosted and hosted
+solutions for exception tracking.
+Both client and server are
+[open source](https://github.com/getsentry/sentry).
+
+## Usage
+
+Every sentry application defined on the server gets a different
+[DSN](https://www.getsentry.com/docs/). In the example below replace
+`YOUR_DSN` with the one created for your application.
+
+```go
+import (
+ "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/sentry"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_sentry.NewSentryHook(YOUR_DSN, []logrus.Level{
+ logrus.PanicLevel,
+ logrus.FatalLevel,
+ logrus.ErrorLevel,
+ })
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
+
+## Special fields
+
+Some logrus fields have a special meaning in this hook,
+these are server_name and logger.
+When logs are sent to sentry these fields are treated differently.
+- server_name (also known as hostname) is the name of the server which
+is logging the event (hostname.example.com)
+- logger is the part of the application which is logging the event.
+In go this usually means setting it to the name of the package.
+
+## Timeout
+
+`Timeout` is the time the sentry hook will wait for a response
+from the sentry server.
+
+If this time elapses with no response from
+the server an error will be returned.
+
+If `Timeout` is set to 0 the SentryHook will not wait for a reply
+and will assume a correct delivery.
+
+The SentryHook has a default timeout of `100 milliseconds` when created
+with a call to `NewSentryHook`. This can be changed by assigning a value to the `Timeout` field:
+
+```go
+hook, _ := logrus_sentry.NewSentryHook(...)
+hook.Timeout = 20*time.Seconds
+```
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
new file mode 100644
index 00000000000..379f281c533
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
@@ -0,0 +1,100 @@
+package logrus_sentry
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/getsentry/raven-go"
+)
+
+var (
+ severityMap = map[logrus.Level]raven.Severity{
+ logrus.DebugLevel: raven.DEBUG,
+ logrus.InfoLevel: raven.INFO,
+ logrus.WarnLevel: raven.WARNING,
+ logrus.ErrorLevel: raven.ERROR,
+ logrus.FatalLevel: raven.FATAL,
+ logrus.PanicLevel: raven.FATAL,
+ }
+)
+
+func getAndDel(d logrus.Fields, key string) (string, bool) {
+ var (
+ ok bool
+ v interface{}
+ val string
+ )
+ if v, ok = d[key]; !ok {
+ return "", false
+ }
+
+ if val, ok = v.(string); !ok {
+ return "", false
+ }
+ delete(d, key)
+ return val, true
+}
+
+// SentryHook delivers logs to a sentry server.
+type SentryHook struct {
+ // Timeout sets the time to wait for a delivery error from the sentry server.
+ // If this is set to zero the server will not wait for any response and will
+ // consider the message correctly sent
+ Timeout time.Duration
+
+ client *raven.Client
+ levels []logrus.Level
+}
+
+// NewSentryHook creates a hook to be added to an instance of logger
+// and initializes the raven client.
+// This method sets the timeout to 100 milliseconds.
+func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) {
+ client, err := raven.NewClient(DSN, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &SentryHook{100 * time.Millisecond, client, levels}, nil
+}
+
+// Called when an event should be sent to sentry
+// Special fields that sentry uses to give more information to the server
+// are extracted from entry.Data (if they are found)
+// These fields are: logger and server_name
+func (hook *SentryHook) Fire(entry *logrus.Entry) error {
+ packet := &raven.Packet{
+ Message: entry.Message,
+ Timestamp: raven.Timestamp(entry.Time),
+ Level: severityMap[entry.Level],
+ Platform: "go",
+ }
+
+ d := entry.Data
+
+ if logger, ok := getAndDel(d, "logger"); ok {
+ packet.Logger = logger
+ }
+ if serverName, ok := getAndDel(d, "server_name"); ok {
+ packet.ServerName = serverName
+ }
+ packet.Extra = map[string]interface{}(d)
+
+ _, errCh := hook.client.Capture(packet, nil)
+ timeout := hook.Timeout
+ if timeout != 0 {
+ timeoutCh := time.After(timeout)
+ select {
+ case err := <-errCh:
+ return err
+ case <-timeoutCh:
+ return fmt.Errorf("no response from sentry server in %s", timeout)
+ }
+ }
+ return nil
+}
+
+// Levels returns the available logging levels.
+func (hook *SentryHook) Levels() []logrus.Level {
+ return hook.levels
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go
new file mode 100644
index 00000000000..45f18d17047
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go
@@ -0,0 +1,97 @@
+package logrus_sentry
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/getsentry/raven-go"
+)
+
+const (
+ message = "error message"
+ server_name = "testserver.internal"
+ logger_name = "test.logger"
+)
+
+func getTestLogger() *logrus.Logger {
+ l := logrus.New()
+ l.Out = ioutil.Discard
+ return l
+}
+
+func WithTestDSN(t *testing.T, tf func(string, <-chan *raven.Packet)) {
+ pch := make(chan *raven.Packet, 1)
+ s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
+ defer req.Body.Close()
+ d := json.NewDecoder(req.Body)
+ p := &raven.Packet{}
+ err := d.Decode(p)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+
+ pch <- p
+ }))
+ defer s.Close()
+
+ fragments := strings.SplitN(s.URL, "://", 2)
+ dsn := fmt.Sprintf(
+ "%s://public:secret@%s/sentry/project-id",
+ fragments[0],
+ fragments[1],
+ )
+ tf(dsn, pch)
+}
+
+func TestSpecialFields(t *testing.T) {
+ WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
+ logger := getTestLogger()
+
+ hook, err := NewSentryHook(dsn, []logrus.Level{
+ logrus.ErrorLevel,
+ })
+
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+ logger.Hooks.Add(hook)
+ logger.WithFields(logrus.Fields{
+ "server_name": server_name,
+ "logger": logger_name,
+ }).Error(message)
+
+ packet := <-pch
+ if packet.Logger != logger_name {
+ t.Errorf("logger should have been %s, was %s", logger_name, packet.Logger)
+ }
+
+ if packet.ServerName != server_name {
+ t.Errorf("server_name should have been %s, was %s", server_name, packet.ServerName)
+ }
+ })
+}
+
+func TestSentryHandler(t *testing.T) {
+ WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
+ logger := getTestLogger()
+ hook, err := NewSentryHook(dsn, []logrus.Level{
+ logrus.ErrorLevel,
+ })
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+ logger.Hooks.Add(hook)
+
+ logger.Error(message)
+ packet := <-pch
+ if packet.Message != message {
+ t.Errorf("message should have been %s, was %s", message, packet.Message)
+ }
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
new file mode 100644
index 00000000000..4dbb8e72907
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
@@ -0,0 +1,20 @@
+# Syslog Hooks for Logrus
+
+## Usage
+
+```go
+import (
+ "log/syslog"
+ "github.com/Sirupsen/logrus"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
new file mode 100644
index 00000000000..b6fa3746280
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
@@ -0,0 +1,59 @@
+package logrus_syslog
+
+import (
+ "fmt"
+ "github.com/Sirupsen/logrus"
+ "log/syslog"
+ "os"
+)
+
+// SyslogHook to send logs via syslog.
+type SyslogHook struct {
+ Writer *syslog.Writer
+ SyslogNetwork string
+ SyslogRaddr string
+}
+
+// Creates a hook to be added to an instance of logger. This is called with
+// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
+// `if err == nil { log.Hooks.Add(hook) }`
+func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
+ w, err := syslog.Dial(network, raddr, priority, tag)
+ return &SyslogHook{w, network, raddr}, err
+}
+
+func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
+ line, err := entry.String()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
+ return err
+ }
+
+ switch entry.Level {
+ case logrus.PanicLevel:
+ return hook.Writer.Crit(line)
+ case logrus.FatalLevel:
+ return hook.Writer.Crit(line)
+ case logrus.ErrorLevel:
+ return hook.Writer.Err(line)
+ case logrus.WarnLevel:
+ return hook.Writer.Warning(line)
+ case logrus.InfoLevel:
+ return hook.Writer.Info(line)
+ case logrus.DebugLevel:
+ return hook.Writer.Debug(line)
+ default:
+ return nil
+ }
+}
+
+func (hook *SyslogHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.PanicLevel,
+ logrus.FatalLevel,
+ logrus.ErrorLevel,
+ logrus.WarnLevel,
+ logrus.InfoLevel,
+ logrus.DebugLevel,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
new file mode 100644
index 00000000000..42762dc10d7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
@@ -0,0 +1,26 @@
+package logrus_syslog
+
+import (
+ "github.com/Sirupsen/logrus"
+ "log/syslog"
+ "testing"
+)
+
+func TestLocalhostAddAndPrint(t *testing.T) {
+ log := logrus.New()
+ hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+
+ if err != nil {
+ t.Errorf("Unable to connect to local syslog.")
+ }
+
+ log.Hooks.Add(hook)
+
+ for _, level := range hook.Levels() {
+ if len(log.Hooks[level]) != 1 {
+ t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
+ }
+ }
+
+ log.Info("Congratulations!")
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 00000000000..b09227c2b5d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,26 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+type JSONFormatter struct{}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+3)
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ prefixFieldClashes(data)
+ data["time"] = entry.Time.Format(time.RFC3339)
+ data["msg"] = entry.Message
+ data["level"] = entry.Level.String()
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 00000000000..b392e547a7b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,161 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stdout`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks levelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged. `logrus.Debug` is useful in
+ Level Level
+ // Used to sync writing to the log.
+ mu sync.Mutex
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(levelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stdout,
+ Formatter: new(TextFormatter),
+ Hooks: make(levelHooks),
+ Level: InfoLevel,
+ }
+}
+
+// Adds a field to the log entry, note that you it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// Ff you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ return NewEntry(logger).WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ return NewEntry(logger).WithFields(fields)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ NewEntry(logger).Debugf(format, args...)
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ NewEntry(logger).Infof(format, args...)
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ NewEntry(logger).Printf(format, args...)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ NewEntry(logger).Warnf(format, args...)
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ NewEntry(logger).Warnf(format, args...)
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ NewEntry(logger).Errorf(format, args...)
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ NewEntry(logger).Fatalf(format, args...)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ NewEntry(logger).Panicf(format, args...)
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ NewEntry(logger).Debug(args...)
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ NewEntry(logger).Info(args...)
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ NewEntry(logger).Info(args...)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ NewEntry(logger).Warn(args...)
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ NewEntry(logger).Warn(args...)
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ NewEntry(logger).Error(args...)
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ NewEntry(logger).Fatal(args...)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ NewEntry(logger).Panic(args...)
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ NewEntry(logger).Debugln(args...)
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ NewEntry(logger).Infoln(args...)
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ NewEntry(logger).Println(args...)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ NewEntry(logger).Warnln(args...)
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ NewEntry(logger).Warnln(args...)
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ NewEntry(logger).Errorln(args...)
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ NewEntry(logger).Fatalln(args...)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ NewEntry(logger).Panicln(args...)
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 00000000000..43ee12e90e7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,94 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch lvl {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var _ StdLogger = &log.Logger{}
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go
new file mode 100644
index 00000000000..53025425816
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go
@@ -0,0 +1,283 @@
+package logrus
+
+import (
+ "bytes"
+ "encoding/json"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ log(logger)
+
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ assertions(fields)
+}
+
+func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
+ var buffer bytes.Buffer
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = &TextFormatter{
+ DisableColors: true,
+ }
+
+ log(logger)
+
+ fields := make(map[string]string)
+ for _, kv := range strings.Split(buffer.String(), " ") {
+ if !strings.Contains(kv, "=") {
+ continue
+ }
+ kvArr := strings.Split(kv, "=")
+ key := strings.TrimSpace(kvArr[0])
+ val := kvArr[1]
+ if kvArr[1][0] == '"' {
+ var err error
+ val, err = strconv.Unquote(val)
+ assert.NoError(t, err)
+ }
+ fields[key] = val
+ }
+ assertions(fields)
+}
+
+func TestPrint(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "info")
+ })
+}
+
+func TestInfo(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "info")
+ })
+}
+
+func TestWarn(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Warn("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "warning")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln("test", "test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test test")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln("test", 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test 10")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln(10, 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "10 10")
+ })
+}
+
+func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln(10, 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "10 10")
+ })
+}
+
+func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test", 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test10")
+ })
+}
+
+func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test", "test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "testtest")
+ })
+}
+
+func TestWithFieldsShouldAllowAssignments(t *testing.T) {
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ localLog := logger.WithFields(Fields{
+ "key1": "value1",
+ })
+
+ localLog.WithField("key2", "value2").Info("test")
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ assert.Equal(t, "value2", fields["key2"])
+ assert.Equal(t, "value1", fields["key1"])
+
+ buffer = bytes.Buffer{}
+ fields = Fields{}
+ localLog.Info("test")
+ err = json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ _, ok := fields["key2"]
+ assert.Equal(t, false, ok)
+ assert.Equal(t, "value1", fields["key1"])
+}
+
+func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("msg", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ })
+}
+
+func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("msg", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["fields.msg"], "hello")
+ })
+}
+
+func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("time", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["fields.time"], "hello")
+ })
+}
+
+func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("level", 1).Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["level"], "info")
+ assert.Equal(t, fields["fields.level"], 1)
+ })
+}
+
+func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
+ LogAndAssertText(t, func(log *Logger) {
+ ll := log.WithField("herp", "derp")
+ ll.Info("hello")
+ ll.Info("bye")
+ }, func(fields map[string]string) {
+ for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
+ if _, ok := fields[fieldName]; ok {
+ t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
+ }
+ }
+ })
+}
+
+func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
+
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ llog := logger.WithField("context", "eating raw fish")
+
+ llog.Info("looks delicious")
+
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.NoError(t, err, "should have decoded first message")
+ assert.Len(t, fields, 4, "should only have msg/time/level/context fields")
+ assert.Equal(t, fields["msg"], "looks delicious")
+ assert.Equal(t, fields["context"], "eating raw fish")
+
+ buffer.Reset()
+
+ llog.Warn("omg it is!")
+
+ err = json.Unmarshal(buffer.Bytes(), &fields)
+ assert.NoError(t, err, "should have decoded second message")
+ assert.Len(t, fields, 4, "should only have msg/time/level/context fields")
+ assert.Equal(t, fields["msg"], "omg it is!")
+ assert.Equal(t, fields["context"], "eating raw fish")
+ assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
+
+}
+
+func TestConvertLevelToString(t *testing.T) {
+ assert.Equal(t, "debug", DebugLevel.String())
+ assert.Equal(t, "info", InfoLevel.String())
+ assert.Equal(t, "warning", WarnLevel.String())
+ assert.Equal(t, "error", ErrorLevel.String())
+ assert.Equal(t, "fatal", FatalLevel.String())
+ assert.Equal(t, "panic", PanicLevel.String())
+}
+
+func TestParseLevel(t *testing.T) {
+ l, err := ParseLevel("panic")
+ assert.Nil(t, err)
+ assert.Equal(t, PanicLevel, l)
+
+ l, err = ParseLevel("fatal")
+ assert.Nil(t, err)
+ assert.Equal(t, FatalLevel, l)
+
+ l, err = ParseLevel("error")
+ assert.Nil(t, err)
+ assert.Equal(t, ErrorLevel, l)
+
+ l, err = ParseLevel("warn")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("warning")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("info")
+ assert.Nil(t, err)
+ assert.Equal(t, InfoLevel, l)
+
+ l, err = ParseLevel("debug")
+ assert.Nil(t, err)
+ assert.Equal(t, DebugLevel, l)
+
+ l, err = ParseLevel("invalid")
+ assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go
new file mode 100644
index 00000000000..8fe02a4aec1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go
new file mode 100644
index 00000000000..0428ee5d52a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go
@@ -0,0 +1,20 @@
+/*
+ Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
+*/
+package logrus
+
+import (
+ "syscall"
+)
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 00000000000..a2c0b40db61
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 00000000000..276447bd5c0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,21 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux,!appengine darwin freebsd
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stdout
+ var termios Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 00000000000..2e09f6f7e31
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stdout
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 00000000000..78e78893568
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,124 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 34
+)
+
+var (
+ baseTimestamp time.Time
+ isTerminal bool
+ noQuoteNeeded *regexp.Regexp
+)
+
+func init() {
+ baseTimestamp = time.Now()
+ isTerminal = IsTerminal()
+}
+
+func miniTS() int {
+ return int(time.Since(baseTimestamp) / time.Second)
+}
+
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+ DisableColors bool
+ // Set to true to disable timestamp logging (useful when the output
+ // is redirected to a logging system already adding a timestamp)
+ DisableTimestamp bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+
+ var keys []string
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ b := &bytes.Buffer{}
+
+ prefixFieldClashes(entry.Data)
+
+ isColored := (f.ForceColors || isTerminal) && !f.DisableColors
+
+ if isColored {
+ printColored(b, entry, keys)
+ } else {
+ if !f.DisableTimestamp {
+ f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339))
+ }
+ f.appendKeyValue(b, "level", entry.Level.String())
+ f.appendKeyValue(b, "msg", entry.Message)
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
+ var levelColor int
+ switch entry.Level {
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
+ }
+}
+
+func needsQuoting(text string) bool {
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch < '9') ||
+ ch == '-' || ch == '.') {
+ return false
+ }
+ }
+ return true
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
+ switch value.(type) {
+ case string:
+ if needsQuoting(value.(string)) {
+ fmt.Fprintf(b, "%v=%s ", key, value)
+ } else {
+ fmt.Fprintf(b, "%v=%q ", key, value)
+ }
+ case error:
+ if needsQuoting(value.(error).Error()) {
+ fmt.Fprintf(b, "%v=%s ", key, value)
+ } else {
+ fmt.Fprintf(b, "%v=%q ", key, value)
+ }
+ default:
+ fmt.Fprintf(b, "%v=%v ", key, value)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go
new file mode 100644
index 00000000000..f604f1b0026
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go
@@ -0,0 +1,33 @@
+package logrus
+
+import (
+ "bytes"
+ "errors"
+
+ "testing"
+)
+
+func TestQuoting(t *testing.T) {
+ tf := &TextFormatter{DisableColors: true}
+
+ checkQuoting := func(q bool, value interface{}) {
+ b, _ := tf.Format(WithField("test", value))
+ idx := bytes.Index(b, ([]byte)("test="))
+ cont := bytes.Contains(b[idx+5:], []byte{'"'})
+ if cont != q {
+ if q {
+ t.Errorf("quoting expected for: %#v", value)
+ } else {
+ t.Errorf("quoting not expected for: %#v", value)
+ }
+ }
+ }
+
+ checkQuoting(false, "abcd")
+ checkQuoting(false, "v1.0")
+ checkQuoting(true, "/foobar")
+ checkQuoting(true, "x y")
+ checkQuoting(true, "x,y")
+ checkQuoting(false, errors.New("invalid"))
+ checkQuoting(true, errors.New("invalid argument"))
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/MAINTAINERS
new file mode 100644
index 00000000000..2aac7265d23
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/MAINTAINERS
@@ -0,0 +1,2 @@
+Cristian Staretu (@unclejack)
+Tibor Vass (@tiborvass)
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md
new file mode 100644
index 00000000000..7307d9694f6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md
@@ -0,0 +1 @@
+This code provides helper functions for dealing with archive files.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go
new file mode 100644
index 00000000000..ec45d8546dc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go
@@ -0,0 +1,802 @@
+package archive
+
+import (
+ "bufio"
+ "bytes"
+ "compress/bzip2"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+
+ log "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/fileutils"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/promise"
+ "github.com/docker/docker/pkg/system"
+)
+
+type (
+ Archive io.ReadCloser
+ ArchiveReader io.Reader
+ Compression int
+ TarOptions struct {
+ Includes []string
+ Excludes []string
+ Compression Compression
+ NoLchown bool
+ Name string
+ }
+
+ // Archiver allows the reuse of most utility functions of this package
+ // with a pluggable Untar function.
+ Archiver struct {
+ Untar func(io.Reader, string, *TarOptions) error
+ }
+
+ // breakoutError is used to differentiate errors related to breaking out
+ // When testing archive breakout in the unit tests, this error is expected
+ // in order for the test to pass.
+ breakoutError error
+)
+
+var (
+ ErrNotImplemented = errors.New("Function not implemented")
+ defaultArchiver = &Archiver{Untar}
+)
+
+const (
+ Uncompressed Compression = iota
+ Bzip2
+ Gzip
+ Xz
+)
+
+func IsArchive(header []byte) bool {
+ compression := DetectCompression(header)
+ if compression != Uncompressed {
+ return true
+ }
+ r := tar.NewReader(bytes.NewBuffer(header))
+ _, err := r.Next()
+ return err == nil
+}
+
+func DetectCompression(source []byte) Compression {
+ for compression, m := range map[Compression][]byte{
+ Bzip2: {0x42, 0x5A, 0x68},
+ Gzip: {0x1F, 0x8B, 0x08},
+ Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
+ } {
+ if len(source) < len(m) {
+ log.Debugf("Len too short")
+ continue
+ }
+ if bytes.Compare(m, source[:len(m)]) == 0 {
+ return compression
+ }
+ }
+ return Uncompressed
+}
+
+func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
+ args := []string{"xz", "-d", "-c", "-q"}
+
+ return CmdStream(exec.Command(args[0], args[1:]...), archive)
+}
+
+func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
+ p := pools.BufioReader32KPool
+ buf := p.Get(archive)
+ bs, err := buf.Peek(10)
+ if err != nil {
+ return nil, err
+ }
+ log.Debugf("[tar autodetect] n: %v", bs)
+
+ compression := DetectCompression(bs)
+ switch compression {
+ case Uncompressed:
+ readBufWrapper := p.NewReadCloserWrapper(buf, buf)
+ return readBufWrapper, nil
+ case Gzip:
+ gzReader, err := gzip.NewReader(buf)
+ if err != nil {
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
+ return readBufWrapper, nil
+ case Bzip2:
+ bz2Reader := bzip2.NewReader(buf)
+ readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
+ return readBufWrapper, nil
+ case Xz:
+ xzReader, err := xzDecompress(buf)
+ if err != nil {
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
+ return readBufWrapper, nil
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
+ p := pools.BufioWriter32KPool
+ buf := p.Get(dest)
+ switch compression {
+ case Uncompressed:
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
+ return writeBufWrapper, nil
+ case Gzip:
+ gzWriter := gzip.NewWriter(dest)
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
+ return writeBufWrapper, nil
+ case Bzip2, Xz:
+ // archive/bzip2 does not support writing, and there is no xz support at all
+ // However, this is not a problem as docker only currently generates gzipped tars
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+func (compression *Compression) Extension() string {
+ switch *compression {
+ case Uncompressed:
+ return "tar"
+ case Bzip2:
+ return "tar.bz2"
+ case Gzip:
+ return "tar.gz"
+ case Xz:
+ return "tar.xz"
+ }
+ return ""
+}
+
+type tarAppender struct {
+ TarWriter *tar.Writer
+ Buffer *bufio.Writer
+
+ // for hardlink mapping
+ SeenFiles map[uint64]string
+}
+
+func (ta *tarAppender) addTarFile(path, name string) error {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return err
+ }
+
+ link := ""
+ if fi.Mode()&os.ModeSymlink != 0 {
+ if link, err = os.Readlink(path); err != nil {
+ return err
+ }
+ }
+
+ hdr, err := tar.FileInfoHeader(fi, link)
+ if err != nil {
+ return err
+ }
+
+ if fi.IsDir() && !strings.HasSuffix(name, "/") {
+ name = name + "/"
+ }
+
+ hdr.Name = name
+
+ nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
+ if err != nil {
+ return err
+ }
+
+ // if it's a regular file and has more than 1 link,
+ // it's hardlinked, so set the type flag accordingly
+ if fi.Mode().IsRegular() && nlink > 1 {
+ // a link should have a name that it links too
+ // and that linked name should be first in the tar archive
+ if oldpath, ok := ta.SeenFiles[inode]; ok {
+ hdr.Typeflag = tar.TypeLink
+ hdr.Linkname = oldpath
+ hdr.Size = 0 // This Must be here for the writer math to add up!
+ } else {
+ ta.SeenFiles[inode] = name
+ }
+ }
+
+ capability, _ := system.Lgetxattr(path, "security.capability")
+ if capability != nil {
+ hdr.Xattrs = make(map[string]string)
+ hdr.Xattrs["security.capability"] = string(capability)
+ }
+
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ if hdr.Typeflag == tar.TypeReg {
+ file, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+
+ ta.Buffer.Reset(ta.TarWriter)
+ defer ta.Buffer.Reset(nil)
+ _, err = io.Copy(ta.Buffer, file)
+ file.Close()
+ if err != nil {
+ return err
+ }
+ err = ta.Buffer.Flush()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error {
+ // hdr.Mode is in linux format, which we can use for sycalls,
+ // but for os.Foo() calls we need the mode converted to os.FileMode,
+ // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
+ hdrInfo := hdr.FileInfo()
+
+ switch hdr.Typeflag {
+ case tar.TypeDir:
+ // Create directory unless it exists as a directory already.
+ // In that case we just want to merge the two
+ if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+ if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+
+ case tar.TypeReg, tar.TypeRegA:
+ // Source is regular file
+ file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(file, reader); err != nil {
+ file.Close()
+ return err
+ }
+ file.Close()
+
+ case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
+ mode := uint32(hdr.Mode & 07777)
+ switch hdr.Typeflag {
+ case tar.TypeBlock:
+ mode |= syscall.S_IFBLK
+ case tar.TypeChar:
+ mode |= syscall.S_IFCHR
+ case tar.TypeFifo:
+ mode |= syscall.S_IFIFO
+ }
+
+ if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
+ return err
+ }
+
+ case tar.TypeLink:
+ targetPath := filepath.Join(extractDir, hdr.Linkname)
+ // check for hardlink breakout
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
+ }
+ if err := os.Link(targetPath, path); err != nil {
+ return err
+ }
+
+ case tar.TypeSymlink:
+ // path -> hdr.Linkname = targetPath
+ // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
+ targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
+
+ // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
+ // that symlink would first have to be created, which would be caught earlier, at this very check:
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
+ }
+ if err := os.Symlink(hdr.Linkname, path); err != nil {
+ return err
+ }
+
+ case tar.TypeXGlobalHeader:
+ log.Debugf("PAX Global Extended Headers found and ignored")
+ return nil
+
+ default:
+ return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
+ }
+
+ if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown {
+ return err
+ }
+
+ for key, value := range hdr.Xattrs {
+ if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
+ return err
+ }
+ }
+
+ // There is no LChmod, so ignore mode for symlink. Also, this
+ // must happen after chown, as that can modify the file mode
+ if hdr.Typeflag != tar.TypeSymlink {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+
+ ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
+ // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
+ if hdr.Typeflag != tar.TypeSymlink {
+ if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ return err
+ }
+ } else {
+ if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ return err
+ }
+ }
+ return nil
+}
+
+// Tar creates an archive from the directory at `path`, and returns it as a
+// stream of bytes.
+func Tar(path string, compression Compression) (io.ReadCloser, error) {
+ return TarWithOptions(path, &TarOptions{Compression: compression})
+}
+
+func escapeName(name string) string {
+ escaped := make([]byte, 0)
+ for i, c := range []byte(name) {
+ if i == 0 && c == '/' {
+ continue
+ }
+ // all printable chars except "-" which is 0x2d
+ if (0x20 <= c && c <= 0x7E) && c != 0x2d {
+ escaped = append(escaped, c)
+ } else {
+ escaped = append(escaped, fmt.Sprintf("\\%03o", c)...)
+ }
+ }
+ return string(escaped)
+}
+
+// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
+// paths are included in `options.Includes` (if non-nil) or not in `options.Excludes`.
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+ pipeReader, pipeWriter := io.Pipe()
+
+ compressWriter, err := CompressStream(pipeWriter, options.Compression)
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ ta := &tarAppender{
+ TarWriter: tar.NewWriter(compressWriter),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ SeenFiles: make(map[uint64]string),
+ }
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+
+ if options.Includes == nil {
+ options.Includes = []string{"."}
+ }
+
+ var renamedRelFilePath string // For when tar.Options.Name is set
+ for _, include := range options.Includes {
+ filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
+ if err != nil {
+ log.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
+ return nil
+ }
+
+ relFilePath, err := filepath.Rel(srcPath, filePath)
+ if err != nil || (relFilePath == "." && f.IsDir()) {
+ // Error getting relative path OR we are looking
+ // at the root path. Skip in both situations.
+ return nil
+ }
+
+ skip, err := fileutils.Matches(relFilePath, options.Excludes)
+ if err != nil {
+ log.Debugf("Error matching %s", relFilePath, err)
+ return err
+ }
+
+ if skip {
+ if f.IsDir() {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ // Rename the base resource
+ if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) {
+ renamedRelFilePath = relFilePath
+ }
+ // Set this to make sure the items underneath also get renamed
+ if options.Name != "" {
+ relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1)
+ }
+
+ if err := ta.addTarFile(filePath, relFilePath); err != nil {
+ log.Debugf("Can't add file %s to tar: %s", srcPath, err)
+ }
+ return nil
+ })
+ }
+
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ log.Debugf("Can't close tar writer: %s", err)
+ }
+ if err := compressWriter.Close(); err != nil {
+ log.Debugf("Can't close compress writer: %s", err)
+ }
+ if err := pipeWriter.Close(); err != nil {
+ log.Debugf("Can't close pipe writer: %s", err)
+ }
+ }()
+
+ return pipeReader, nil
+}
+
+func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
+ tr := tar.NewReader(decompressedArchive)
+ trBuf := pools.BufioReader32KPool.Get(nil)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+
+ // Iterate through the files in the archive.
+loop:
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ // Normalize name, for safety and for a simple is-root check
+ // This keeps "../" as-is, but normalizes "/../" to "/"
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ for _, exclude := range options.Excludes {
+ if strings.HasPrefix(hdr.Name, exclude) {
+ continue loop
+ }
+ }
+
+ if !strings.HasSuffix(hdr.Name, "/") {
+ // Not the root directory, ensure that the parent directory exists
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = os.MkdirAll(parentPath, 0777)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(rel, "..") {
+ return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+
+ // If path exits we almost always just want to remove and replace it
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if fi.IsDir() && hdr.Name == "." {
+ continue
+ }
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return err
+ }
+ }
+ }
+ trBuf.Reset(tr)
+ if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil {
+ return err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+ ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
+ if err := syscall.UtimesNano(path, ts); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+// identity (uncompressed), gzip, bzip2, xz.
+// FIXME: specify behavior when target path exists vs. doesn't exist.
+func Untar(archive io.Reader, dest string, options *TarOptions) error {
+ if archive == nil {
+ return fmt.Errorf("Empty archive")
+ }
+ dest = filepath.Clean(dest)
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.Excludes == nil {
+ options.Excludes = []string{}
+ }
+ decompressedArchive, err := DecompressStream(archive)
+ if err != nil {
+ return err
+ }
+ defer decompressedArchive.Close()
+ return Unpack(decompressedArchive, dest, options)
+}
+
+func (archiver *Archiver) TarUntar(src, dst string) error {
+ log.Debugf("TarUntar(%s %s)", src, dst)
+ archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ return archiver.Untar(archive, dst, nil)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func TarUntar(src, dst string) error {
+ return defaultArchiver.TarUntar(src, dst)
+}
+
+func (archiver *Archiver) UntarPath(src, dst string) error {
+ archive, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ if err := archiver.Untar(archive, dst, nil); err != nil {
+ return err
+ }
+ return nil
+}
+
+// UntarPath is a convenience function which looks for an archive
+// at filesystem path `src`, and unpacks it at `dst`.
+func UntarPath(src, dst string) error {
+ return defaultArchiver.UntarPath(src, dst)
+}
+
+func (archiver *Archiver) CopyWithTar(src, dst string) error {
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !srcSt.IsDir() {
+ return archiver.CopyFileWithTar(src, dst)
+ }
+ // Create dst, copy src's content into it
+ log.Debugf("Creating dest directory: %s", dst)
+ if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
+ return err
+ }
+ log.Debugf("Calling TarUntar(%s, %s)", src, dst)
+ return archiver.TarUntar(src, dst)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func CopyWithTar(src, dst string) error {
+ return defaultArchiver.CopyWithTar(src, dst)
+}
+
+func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
+ log.Debugf("CopyFileWithTar(%s, %s)", src, dst)
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if srcSt.IsDir() {
+ return fmt.Errorf("Can't copy a directory")
+ }
+ // Clean up the trailing /
+ if dst[len(dst)-1] == '/' {
+ dst = path.Join(dst, filepath.Base(src))
+ }
+ // Create the holding directory if necessary
+ if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
+ return err
+ }
+
+ r, w := io.Pipe()
+ errC := promise.Go(func() error {
+ defer w.Close()
+
+ srcF, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ hdr, err := tar.FileInfoHeader(srcSt, "")
+ if err != nil {
+ return err
+ }
+ hdr.Name = filepath.Base(dst)
+ tw := tar.NewWriter(w)
+ defer tw.Close()
+ if err := tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if _, err := io.Copy(tw, srcF); err != nil {
+ return err
+ }
+ return nil
+ })
+ defer func() {
+ if er := <-errC; err != nil {
+ err = er
+ }
+ }()
+ return archiver.Untar(r, filepath.Dir(dst), nil)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+//
+// If `dst` ends with a trailing slash '/', the final destination path
+// will be `dst/base(src)`.
+func CopyFileWithTar(src, dst string) (err error) {
+ return defaultArchiver.CopyFileWithTar(src, dst)
+}
+
+// CmdStream executes a command, and returns its stdout as a stream.
+// If the command fails to run or doesn't complete successfully, an error
+// will be returned, including anything written on stderr.
+func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
+ if input != nil {
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ return nil, err
+ }
+ // Write stdin if any
+ go func() {
+ io.Copy(stdin, input)
+ stdin.Close()
+ }()
+ }
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ return nil, err
+ }
+ stderr, err := cmd.StderrPipe()
+ if err != nil {
+ return nil, err
+ }
+ pipeR, pipeW := io.Pipe()
+ errChan := make(chan []byte)
+ // Collect stderr, we will use it in case of an error
+ go func() {
+ errText, e := ioutil.ReadAll(stderr)
+ if e != nil {
+ errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
+ }
+ errChan <- errText
+ }()
+ // Copy stdout to the returned pipe
+ go func() {
+ _, err := io.Copy(pipeW, stdout)
+ if err != nil {
+ pipeW.CloseWithError(err)
+ }
+ errText := <-errChan
+ if err := cmd.Wait(); err != nil {
+ pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
+ } else {
+ pipeW.Close()
+ }
+ }()
+ // Run the command and return the pipe
+ if err := cmd.Start(); err != nil {
+ return nil, err
+ }
+ return pipeR, nil
+}
+
+// NewTempArchive reads the content of src into a temporary file, and returns the contents
+// of that file as an archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
+ f, err := ioutil.TempFile(dir, "")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.Copy(f, src); err != nil {
+ return nil, err
+ }
+ if err = f.Sync(); err != nil {
+ return nil, err
+ }
+ if _, err := f.Seek(0, 0); err != nil {
+ return nil, err
+ }
+ st, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ size := st.Size()
+ return &TempArchive{File: f, Size: size}, nil
+}
+
+type TempArchive struct {
+ *os.File
+ Size int64 // Pre-computed from Stat().Size() as a convenience
+ read int64
+ closed bool
+}
+
+// Close closes the underlying file if it's still open, or does a no-op
+// to allow callers to try to close the TempArchive multiple times safely.
+func (archive *TempArchive) Close() error {
+ if archive.closed {
+ return nil
+ }
+
+ archive.closed = true
+
+ return archive.File.Close()
+}
+
+func (archive *TempArchive) Read(data []byte) (int, error) {
+ n, err := archive.File.Read(data)
+ archive.read += int64(n)
+ if err != nil || archive.read == archive.Size {
+ archive.Close()
+ os.Remove(archive.File.Name())
+ }
+ return n, err
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go
new file mode 100644
index 00000000000..fdba6fb87cb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go
@@ -0,0 +1,625 @@
+package archive
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strings"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+)
+
+func TestCmdStreamLargeStderr(t *testing.T) {
+ cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello")
+ out, err := CmdStream(cmd, nil)
+ if err != nil {
+ t.Fatalf("Failed to start command: %s", err)
+ }
+ errCh := make(chan error)
+ go func() {
+ _, err := io.Copy(ioutil.Discard, out)
+ errCh <- err
+ }()
+ select {
+ case err := <-errCh:
+ if err != nil {
+ t.Fatalf("Command should not have failed (err=%.100s...)", err)
+ }
+ case <-time.After(5 * time.Second):
+ t.Fatalf("Command did not complete in 5 seconds; probable deadlock")
+ }
+}
+
+func TestCmdStreamBad(t *testing.T) {
+ badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1")
+ out, err := CmdStream(badCmd, nil)
+ if err != nil {
+ t.Fatalf("Failed to start command: %s", err)
+ }
+ if output, err := ioutil.ReadAll(out); err == nil {
+ t.Fatalf("Command should have failed")
+ } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" {
+ t.Fatalf("Wrong error value (%s)", err)
+ } else if s := string(output); s != "hello\n" {
+ t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
+ }
+}
+
+func TestCmdStreamGood(t *testing.T) {
+ cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0")
+ out, err := CmdStream(cmd, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if output, err := ioutil.ReadAll(out); err != nil {
+ t.Fatalf("Command should not have failed (err=%s)", err)
+ } else if s := string(output); s != "hello\n" {
+ t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
+ }
+}
+
+func TestTarFiles(t *testing.T) {
+ // try without hardlinks
+ if err := checkNoChanges(1000, false); err != nil {
+ t.Fatal(err)
+ }
+ // try with hardlinks
+ if err := checkNoChanges(1000, true); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func checkNoChanges(fileNum int, hardlinks bool) error {
+ srcDir, err := ioutil.TempDir("", "docker-test-srcDir")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(srcDir)
+
+ destDir, err := ioutil.TempDir("", "docker-test-destDir")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(destDir)
+
+ _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks)
+ if err != nil {
+ return err
+ }
+
+ err = TarUntar(srcDir, destDir)
+ if err != nil {
+ return err
+ }
+
+ changes, err := ChangesDirs(destDir, srcDir)
+ if err != nil {
+ return err
+ }
+ if len(changes) > 0 {
+ return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes))
+ }
+ return nil
+}
+
+func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) {
+ archive, err := TarWithOptions(origin, options)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer archive.Close()
+
+ buf := make([]byte, 10)
+ if _, err := archive.Read(buf); err != nil {
+ return nil, err
+ }
+ wrap := io.MultiReader(bytes.NewReader(buf), archive)
+
+ detectedCompression := DetectCompression(buf)
+ compression := options.Compression
+ if detectedCompression.Extension() != compression.Extension() {
+ return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension())
+ }
+
+ tmp, err := ioutil.TempDir("", "docker-test-untar")
+ if err != nil {
+ return nil, err
+ }
+ defer os.RemoveAll(tmp)
+ if err := Untar(wrap, tmp, nil); err != nil {
+ return nil, err
+ }
+ if _, err := os.Stat(tmp); err != nil {
+ return nil, err
+ }
+
+ return ChangesDirs(origin, tmp)
+}
+
+func TestTarUntar(t *testing.T) {
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil {
+ t.Fatal(err)
+ }
+
+ for _, c := range []Compression{
+ Uncompressed,
+ Gzip,
+ } {
+ changes, err := tarUntar(t, origin, &TarOptions{
+ Compression: c,
+ Excludes: []string{"3"},
+ })
+
+ if err != nil {
+ t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err)
+ }
+
+ if len(changes) != 1 || changes[0].Path != "/3" {
+ t.Fatalf("Unexpected differences after tarUntar: %v", changes)
+ }
+ }
+}
+
+func TestTarWithOptions(t *testing.T) {
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
+ t.Fatal(err)
+ }
+
+ cases := []struct {
+ opts *TarOptions
+ numChanges int
+ }{
+ {&TarOptions{Includes: []string{"1"}}, 1},
+ {&TarOptions{Excludes: []string{"2"}}, 1},
+ }
+ for _, testCase := range cases {
+ changes, err := tarUntar(t, origin, testCase.opts)
+ if err != nil {
+ t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err)
+ }
+ if len(changes) != testCase.numChanges {
+ t.Errorf("Expected %d changes, got %d for %+v:",
+ testCase.numChanges, len(changes), testCase.opts)
+ }
+ }
+}
+
+// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz
+// use PAX Global Extended Headers.
+// Failing prevents the archives from being uncompressed during ADD
+func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) {
+ hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader}
+ tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpDir)
+ err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things.
+// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work.
+func TestUntarUstarGnuConflict(t *testing.T) {
+ f, err := os.Open("testdata/broken.tar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ found := false
+ tr := tar.NewReader(f)
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm")
+ }
+}
+
+func TestTarWithHardLink(t *testing.T) {
+ origin, err := ioutil.TempDir("", "docker-test-tar-hardlink")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Link(path.Join(origin, "1"), path.Join(origin, "2")); err != nil {
+ t.Fatal(err)
+ }
+
+ var i1, i2 uint64
+ if i1, err = getNlink(path.Join(origin, "1")); err != nil {
+ t.Fatal(err)
+ }
+ // sanity check that we can hardlink
+ if i1 != 2 {
+ t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1)
+ }
+
+ dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dest)
+
+ // we'll do this in two steps to separate failure
+ fh, err := Tar(origin, Uncompressed)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // ensure we can read the whole thing with no error, before writing back out
+ buf, err := ioutil.ReadAll(fh)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ bRdr := bytes.NewReader(buf)
+ err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if i1, err = getInode(path.Join(dest, "1")); err != nil {
+ t.Fatal(err)
+ }
+ if i2, err = getInode(path.Join(dest, "2")); err != nil {
+ t.Fatal(err)
+ }
+
+ if i1 != i2 {
+ t.Errorf("expected matching inodes, but got %d and %d", i1, i2)
+ }
+}
+
+func getNlink(path string) (uint64, error) {
+ stat, err := os.Stat(path)
+ if err != nil {
+ return 0, err
+ }
+ statT, ok := stat.Sys().(*syscall.Stat_t)
+ if !ok {
+ return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys())
+ }
+ return statT.Nlink, nil
+}
+
+func getInode(path string) (uint64, error) {
+ stat, err := os.Stat(path)
+ if err != nil {
+ return 0, err
+ }
+ statT, ok := stat.Sys().(*syscall.Stat_t)
+ if !ok {
+ return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys())
+ }
+ return statT.Ino, nil
+}
+
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
+ fileData := []byte("fooo")
+ for n := 0; n < numberOfFiles; n++ {
+ fileName := fmt.Sprintf("file-%d", n)
+ if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
+ return 0, err
+ }
+ if makeLinks {
+ if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
+ return 0, err
+ }
+ }
+ }
+ totalSize := numberOfFiles * len(fileData)
+ return totalSize, nil
+}
+
+func BenchmarkTarUntar(b *testing.B) {
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+ if err != nil {
+ b.Fatal(err)
+ }
+ tempDir, err := ioutil.TempDir("", "docker-test-untar-destination")
+ if err != nil {
+ b.Fatal(err)
+ }
+ target := path.Join(tempDir, "dest")
+ n, err := prepareUntarSourceDirectory(100, origin, false)
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ defer os.RemoveAll(tempDir)
+
+ b.ResetTimer()
+ b.SetBytes(int64(n))
+ for n := 0; n < b.N; n++ {
+ err := TarUntar(origin, target)
+ if err != nil {
+ b.Fatal(err)
+ }
+ os.RemoveAll(target)
+ }
+}
+
+func BenchmarkTarUntarWithLinks(b *testing.B) {
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+ if err != nil {
+ b.Fatal(err)
+ }
+ tempDir, err := ioutil.TempDir("", "docker-test-untar-destination")
+ if err != nil {
+ b.Fatal(err)
+ }
+ target := path.Join(tempDir, "dest")
+ n, err := prepareUntarSourceDirectory(100, origin, true)
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ defer os.RemoveAll(tempDir)
+
+ b.ResetTimer()
+ b.SetBytes(int64(n))
+ for n := 0; n < b.N; n++ {
+ err := TarUntar(origin, target)
+ if err != nil {
+ b.Fatal(err)
+ }
+ os.RemoveAll(target)
+ }
+}
+
+func TestUntarInvalidFilenames(t *testing.T) {
+ for i, headers := range [][]*tar.Header{
+ {
+ {
+ Name: "../victim/dotdot",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ {
+ {
+ // Note the leading slash
+ Name: "/../victim/slash-dotdot",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestUntarInvalidHardlink(t *testing.T) {
+ for i, headers := range [][]*tar.Header{
+ { // try reading victim/hello (../)
+ {
+ Name: "dotdot",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (/../)
+ {
+ Name: "slash-dotdot",
+ Typeflag: tar.TypeLink,
+ // Note the leading slash
+ Linkname: "/../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try writing victim/file
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (hardlink, symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "symlink",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // Try reading victim/hello (hardlink, hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "hardlink",
+ Typeflag: tar.TypeLink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // Try removing victim directory (hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestUntarInvalidSymlink(t *testing.T) {
+ for i, headers := range [][]*tar.Header{
+ { // try reading victim/hello (../)
+ {
+ Name: "dotdot",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (/../)
+ {
+ Name: "slash-dotdot",
+ Typeflag: tar.TypeSymlink,
+ // Note the leading slash
+ Linkname: "/../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try writing victim/file
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (symlink, symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "symlink",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (symlink, hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "hardlink",
+ Typeflag: tar.TypeLink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try removing victim directory (symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ { // try writing to victim/newdir/newfile with a symlink in the path
+ {
+ // this header needs to be before the next one, or else there is an error
+ Name: "dir/loophole",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "dir/loophole/newdir/newfile",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestTempArchiveCloseMultipleTimes(t *testing.T) {
+ reader := ioutil.NopCloser(strings.NewReader("hello"))
+ tempArchive, err := NewTempArchive(reader, "")
+ buf := make([]byte, 10)
+ n, err := tempArchive.Read(buf)
+ if n != 5 {
+ t.Fatalf("Expected to read 5 bytes. Read %d instead", n)
+ }
+ for i := 0; i < 3; i++ {
+ if err = tempArchive.Close(); err != nil {
+ t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go
new file mode 100644
index 00000000000..c0e8aee93cf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go
@@ -0,0 +1,39 @@
+// +build !windows
+
+package archive
+
+import (
+ "errors"
+ "syscall"
+
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+)
+
+func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if !ok {
+ err = errors.New("cannot convert stat value to syscall.Stat_t")
+ return
+ }
+
+ nlink = uint32(s.Nlink)
+ inode = uint64(s.Ino)
+
+ // Currently go does not fil in the major/minors
+ if s.Mode&syscall.S_IFBLK == syscall.S_IFBLK ||
+ s.Mode&syscall.S_IFCHR == syscall.S_IFCHR {
+ hdr.Devmajor = int64(major(uint64(s.Rdev)))
+ hdr.Devminor = int64(minor(uint64(s.Rdev)))
+ }
+
+ return
+}
+
+func major(device uint64) uint64 {
+ return (device >> 8) & 0xfff
+}
+
+func minor(device uint64) uint64 {
+ return (device & 0xff) | ((device >> 12) & 0xfff00)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go
new file mode 100644
index 00000000000..3cc2493f6f2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go
@@ -0,0 +1,12 @@
+// +build windows
+
+package archive
+
+import (
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+)
+
+func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) {
+ // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go
new file mode 100644
index 00000000000..85217f6e088
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go
@@ -0,0 +1,413 @@
+package archive
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+
+ log "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+)
+
+type ChangeType int
+
+const (
+ ChangeModify = iota
+ ChangeAdd
+ ChangeDelete
+)
+
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+func (change *Change) String() string {
+ var kind string
+ switch change.Kind {
+ case ChangeModify:
+ kind = "C"
+ case ChangeAdd:
+ kind = "A"
+ case ChangeDelete:
+ kind = "D"
+ }
+ return fmt.Sprintf("%s %s", kind, change.Path)
+}
+
+// Gnu tar and the go tar writer don't have sub-second mtime
+// precision, which is problematic when we apply changes via tar
+// files, we handle this by comparing for exact times, *or* same
+// second count and either a or b having exactly 0 nanoseconds
+func sameFsTime(a, b time.Time) bool {
+ return a == b ||
+ (a.Unix() == b.Unix() &&
+ (a.Nanosecond() == 0 || b.Nanosecond() == 0))
+}
+
+func sameFsTimeSpec(a, b syscall.Timespec) bool {
+ return a.Sec == b.Sec &&
+ (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
+}
+
+// Changes walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func Changes(layers []string, rw string) ([]Change, error) {
+ var changes []Change
+ err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ path, err = filepath.Rel(rw, path)
+ if err != nil {
+ return err
+ }
+ path = filepath.Join("/", path)
+
+ // Skip root
+ if path == "/" {
+ return nil
+ }
+
+ // Skip AUFS metadata
+ if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched {
+ return err
+ }
+
+ change := Change{
+ Path: path,
+ }
+
+ // Find out what kind of modification happened
+ file := filepath.Base(path)
+ // If there is a whiteout, then the file was removed
+ if strings.HasPrefix(file, ".wh.") {
+ originalFile := file[len(".wh."):]
+ change.Path = filepath.Join(filepath.Dir(path), originalFile)
+ change.Kind = ChangeDelete
+ } else {
+ // Otherwise, the file was added
+ change.Kind = ChangeAdd
+
+ // ...Unless it already existed in a top layer, in which case, it's a modification
+ for _, layer := range layers {
+ stat, err := os.Stat(filepath.Join(layer, path))
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err == nil {
+ // The file existed in the top layer, so that's a modification
+
+ // However, if it's a directory, maybe it wasn't actually modified.
+ // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
+ if stat.IsDir() && f.IsDir() {
+ if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
+ // Both directories are the same, don't record the change
+ return nil
+ }
+ }
+ change.Kind = ChangeModify
+ break
+ }
+ }
+ }
+
+ // Record change
+ changes = append(changes, change)
+ return nil
+ })
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ return changes, nil
+}
+
+type FileInfo struct {
+ parent *FileInfo
+ name string
+ stat *system.Stat
+ children map[string]*FileInfo
+ capability []byte
+ added bool
+}
+
+func (root *FileInfo) LookUp(path string) *FileInfo {
+ parent := root
+ if path == "/" {
+ return root
+ }
+
+ pathElements := strings.Split(path, "/")
+ for _, elem := range pathElements {
+ if elem != "" {
+ child := parent.children[elem]
+ if child == nil {
+ return nil
+ }
+ parent = child
+ }
+ }
+ return parent
+}
+
+func (info *FileInfo) path() string {
+ if info.parent == nil {
+ return "/"
+ }
+ return filepath.Join(info.parent.path(), info.name)
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR == syscall.S_IFDIR
+}
+
+func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
+
+ sizeAtEntry := len(*changes)
+
+ if oldInfo == nil {
+ // add
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeAdd,
+ }
+ *changes = append(*changes, change)
+ info.added = true
+ }
+
+ // We make a copy so we can modify it to detect additions
+ // also, we only recurse on the old dir if the new info is a directory
+ // otherwise any previous delete/change is considered recursive
+ oldChildren := make(map[string]*FileInfo)
+ if oldInfo != nil && info.isDir() {
+ for k, v := range oldInfo.children {
+ oldChildren[k] = v
+ }
+ }
+
+ for name, newChild := range info.children {
+ oldChild, _ := oldChildren[name]
+ if oldChild != nil {
+ // change?
+ oldStat := oldChild.stat
+ newStat := newChild.stat
+ // Note: We can't compare inode or ctime or blocksize here, because these change
+ // when copying a file into a container. However, that is not generally a problem
+ // because any content change will change mtime, and any status change should
+ // be visible when actually comparing the stat fields. The only time this
+ // breaks down is if some code intentionally hides a change by setting
+ // back mtime
+ if oldStat.Mode() != newStat.Mode() ||
+ oldStat.Uid() != newStat.Uid() ||
+ oldStat.Gid() != newStat.Gid() ||
+ oldStat.Rdev() != newStat.Rdev() ||
+ // Don't look at size for dirs, its not a good measure of change
+ (oldStat.Size() != newStat.Size() && oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR) ||
+ !sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) ||
+ bytes.Compare(oldChild.capability, newChild.capability) != 0 {
+ change := Change{
+ Path: newChild.path(),
+ Kind: ChangeModify,
+ }
+ *changes = append(*changes, change)
+ newChild.added = true
+ }
+
+ // Remove from copy so we can detect deletions
+ delete(oldChildren, name)
+ }
+
+ newChild.addChanges(oldChild, changes)
+ }
+ for _, oldChild := range oldChildren {
+ // delete
+ change := Change{
+ Path: oldChild.path(),
+ Kind: ChangeDelete,
+ }
+ *changes = append(*changes, change)
+ }
+
+ // If there were changes inside this directory, we need to add it, even if the directory
+ // itself wasn't changed. This is needed to properly save and restore filesystem permissions.
+ if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != "/" {
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeModify,
+ }
+ // Let's insert the directory entry before the recently added entries located inside this dir
+ *changes = append(*changes, change) // just to resize the slice, will be overwritten
+ copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
+ (*changes)[sizeAtEntry] = change
+ }
+
+}
+
+func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
+ var changes []Change
+
+ info.addChanges(oldInfo, &changes)
+
+ return changes
+}
+
+func newRootFileInfo() *FileInfo {
+ root := &FileInfo{
+ name: "/",
+ children: make(map[string]*FileInfo),
+ }
+ return root
+}
+
+func collectFileInfo(sourceDir string) (*FileInfo, error) {
+ root := newRootFileInfo()
+
+ err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ relPath, err := filepath.Rel(sourceDir, path)
+ if err != nil {
+ return err
+ }
+ relPath = filepath.Join("/", relPath)
+
+ if relPath == "/" {
+ return nil
+ }
+
+ parent := root.LookUp(filepath.Dir(relPath))
+ if parent == nil {
+ return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
+ }
+
+ info := &FileInfo{
+ name: filepath.Base(relPath),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+
+ s, err := system.Lstat(path)
+ if err != nil {
+ return err
+ }
+ info.stat = s
+
+ info.capability, _ = system.Lgetxattr(path, "security.capability")
+
+ parent.children[info.name] = info
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return root, nil
+}
+
+// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
+// If oldDir is "", then all files in newDir will be Add-Changes.
+func ChangesDirs(newDir, oldDir string) ([]Change, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ err1, err2 error
+ errs = make(chan error, 2)
+ )
+ go func() {
+ if oldDir != "" {
+ oldRoot, err1 = collectFileInfo(oldDir)
+ }
+ errs <- err1
+ }()
+ go func() {
+ newRoot, err2 = collectFileInfo(newDir)
+ errs <- err2
+ }()
+
+ // block until both routines have returned
+ for i := 0; i < 2; i++ {
+ if err := <-errs; err != nil {
+ return nil, err
+ }
+ }
+
+ return newRoot.Changes(oldRoot), nil
+}
+
+// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
+func ChangesSize(newDir string, changes []Change) int64 {
+ var size int64
+ for _, change := range changes {
+ if change.Kind == ChangeModify || change.Kind == ChangeAdd {
+ file := filepath.Join(newDir, change.Path)
+ fileInfo, _ := os.Lstat(file)
+ if fileInfo != nil && !fileInfo.IsDir() {
+ size += fileInfo.Size()
+ }
+ }
+ }
+ return size
+}
+
+// ExportChanges produces an Archive from the provided changes, relative to dir.
+func ExportChanges(dir string, changes []Change) (Archive, error) {
+ reader, writer := io.Pipe()
+ go func() {
+ ta := &tarAppender{
+ TarWriter: tar.NewWriter(writer),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ SeenFiles: make(map[uint64]string),
+ }
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+ for _, change := range changes {
+ if change.Kind == ChangeDelete {
+ whiteOutDir := filepath.Dir(change.Path)
+ whiteOutBase := filepath.Base(change.Path)
+ whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase)
+ timestamp := time.Now()
+ hdr := &tar.Header{
+ Name: whiteOut[1:],
+ Size: 0,
+ ModTime: timestamp,
+ AccessTime: timestamp,
+ ChangeTime: timestamp,
+ }
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ log.Debugf("Can't write whiteout header: %s", err)
+ }
+ } else {
+ path := filepath.Join(dir, change.Path)
+ if err := ta.addTarFile(path, change.Path[1:]); err != nil {
+ log.Debugf("Can't add file %s to tar: %s", path, err)
+ }
+ }
+ }
+
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ log.Debugf("Can't close layer: %s", err)
+ }
+ if err := writer.Close(); err != nil {
+ log.Debugf("failed close Changes writer: %s", err)
+ }
+ }()
+ return reader, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go
new file mode 100644
index 00000000000..34c0f0da646
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go
@@ -0,0 +1,301 @@
+package archive
+
+import (
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "sort"
+ "testing"
+ "time"
+)
+
+func max(x, y int) int {
+ if x >= y {
+ return x
+ }
+ return y
+}
+
+func copyDir(src, dst string) error {
+ cmd := exec.Command("cp", "-a", src, dst)
+ if err := cmd.Run(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Helper to sort []Change by path
+type byPath struct{ changes []Change }
+
+func (b byPath) Less(i, j int) bool { return b.changes[i].Path < b.changes[j].Path }
+func (b byPath) Len() int { return len(b.changes) }
+func (b byPath) Swap(i, j int) { b.changes[i], b.changes[j] = b.changes[j], b.changes[i] }
+
+type FileType uint32
+
+const (
+ Regular FileType = iota
+ Dir
+ Symlink
+)
+
+type FileData struct {
+ filetype FileType
+ path string
+ contents string
+ permissions os.FileMode
+}
+
+func createSampleDir(t *testing.T, root string) {
+ files := []FileData{
+ {Regular, "file1", "file1\n", 0600},
+ {Regular, "file2", "file2\n", 0666},
+ {Regular, "file3", "file3\n", 0404},
+ {Regular, "file4", "file4\n", 0600},
+ {Regular, "file5", "file5\n", 0600},
+ {Regular, "file6", "file6\n", 0600},
+ {Regular, "file7", "file7\n", 0600},
+ {Dir, "dir1", "", 0740},
+ {Regular, "dir1/file1-1", "file1-1\n", 01444},
+ {Regular, "dir1/file1-2", "file1-2\n", 0666},
+ {Dir, "dir2", "", 0700},
+ {Regular, "dir2/file2-1", "file2-1\n", 0666},
+ {Regular, "dir2/file2-2", "file2-2\n", 0666},
+ {Dir, "dir3", "", 0700},
+ {Regular, "dir3/file3-1", "file3-1\n", 0666},
+ {Regular, "dir3/file3-2", "file3-2\n", 0666},
+ {Dir, "dir4", "", 0700},
+ {Regular, "dir4/file3-1", "file4-1\n", 0666},
+ {Regular, "dir4/file3-2", "file4-2\n", 0666},
+ {Symlink, "symlink1", "target1", 0666},
+ {Symlink, "symlink2", "target2", 0666},
+ }
+
+ now := time.Now()
+ for _, info := range files {
+ p := path.Join(root, info.path)
+ if info.filetype == Dir {
+ if err := os.MkdirAll(p, info.permissions); err != nil {
+ t.Fatal(err)
+ }
+ } else if info.filetype == Regular {
+ if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil {
+ t.Fatal(err)
+ }
+ } else if info.filetype == Symlink {
+ if err := os.Symlink(info.contents, p); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if info.filetype != Symlink {
+ // Set a consistent ctime, atime for all files and dirs
+ if err := os.Chtimes(p, now, now); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+}
+
+// Create an directory, copy it, make sure we report no changes between the two
+func TestChangesDirsEmpty(t *testing.T) {
+ src, err := ioutil.TempDir("", "docker-changes-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ createSampleDir(t, src)
+ dst := src + "-copy"
+ if err := copyDir(src, dst); err != nil {
+ t.Fatal(err)
+ }
+ changes, err := ChangesDirs(dst, src)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(changes) != 0 {
+ t.Fatalf("Reported changes for identical dirs: %v", changes)
+ }
+ os.RemoveAll(src)
+ os.RemoveAll(dst)
+}
+
+func mutateSampleDir(t *testing.T, root string) {
+ // Remove a regular file
+ if err := os.RemoveAll(path.Join(root, "file1")); err != nil {
+ t.Fatal(err)
+ }
+
+ // Remove a directory
+ if err := os.RemoveAll(path.Join(root, "dir1")); err != nil {
+ t.Fatal(err)
+ }
+
+ // Remove a symlink
+ if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil {
+ t.Fatal(err)
+ }
+
+ // Rewrite a file
+ if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ // Replace a file
+ if err := os.RemoveAll(path.Join(root, "file3")); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil {
+ t.Fatal(err)
+ }
+
+ // Touch file
+ if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
+ t.Fatal(err)
+ }
+
+ // Replace file with dir
+ if err := os.RemoveAll(path.Join(root, "file5")); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create new file
+ if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create new dir
+ if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a new symlink
+ if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil {
+ t.Fatal(err)
+ }
+
+ // Change a symlink
+ if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil {
+ t.Fatal(err)
+ }
+
+ // Replace dir with file
+ if err := os.RemoveAll(path.Join(root, "dir2")); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ // Touch dir
+ if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestChangesDirsMutated(t *testing.T) {
+ src, err := ioutil.TempDir("", "docker-changes-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ createSampleDir(t, src)
+ dst := src + "-copy"
+ if err := copyDir(src, dst); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(src)
+ defer os.RemoveAll(dst)
+
+ mutateSampleDir(t, dst)
+
+ changes, err := ChangesDirs(dst, src)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sort.Sort(byPath{changes})
+
+ expectedChanges := []Change{
+ {"/dir1", ChangeDelete},
+ {"/dir2", ChangeModify},
+ {"/dir3", ChangeModify},
+ {"/dirnew", ChangeAdd},
+ {"/file1", ChangeDelete},
+ {"/file2", ChangeModify},
+ {"/file3", ChangeModify},
+ {"/file4", ChangeModify},
+ {"/file5", ChangeModify},
+ {"/filenew", ChangeAdd},
+ {"/symlink1", ChangeDelete},
+ {"/symlink2", ChangeModify},
+ {"/symlinknew", ChangeAdd},
+ }
+
+ for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
+ if i >= len(expectedChanges) {
+ t.Fatalf("unexpected change %s\n", changes[i].String())
+ }
+ if i >= len(changes) {
+ t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
+ }
+ if changes[i].Path == expectedChanges[i].Path {
+ if changes[i] != expectedChanges[i] {
+ t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
+ }
+ } else if changes[i].Path < expectedChanges[i].Path {
+ t.Fatalf("unexpected change %s\n", changes[i].String())
+ } else {
+ t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
+ }
+ }
+}
+
+func TestApplyLayer(t *testing.T) {
+ src, err := ioutil.TempDir("", "docker-changes-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ createSampleDir(t, src)
+ defer os.RemoveAll(src)
+ dst := src + "-copy"
+ if err := copyDir(src, dst); err != nil {
+ t.Fatal(err)
+ }
+ mutateSampleDir(t, dst)
+ defer os.RemoveAll(dst)
+
+ changes, err := ChangesDirs(dst, src)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ layer, err := ExportChanges(dst, changes)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ layerCopy, err := NewTempArchive(layer, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := ApplyLayer(src, layerCopy); err != nil {
+ t.Fatal(err)
+ }
+
+ changes2, err := ChangesDirs(src, dst)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(changes2) != 0 {
+ t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go
new file mode 100644
index 00000000000..ba22c41f3cd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go
@@ -0,0 +1,165 @@
+package archive
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+)
+
+func UnpackLayer(dest string, layer ArchiveReader) error {
+ tr := tar.NewReader(layer)
+ trBuf := pools.BufioReader32KPool.Get(tr)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+
+ aufsTempdir := ""
+ aufsHardlinks := make(map[string]*tar.Header)
+
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ // Normalize name, for safety and for a simple is-root check
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ if !strings.HasSuffix(hdr.Name, "/") {
+ // Not the root directory, ensure that the parent directory exists.
+ // This happened in some tests where an image had a tarfile without any
+ // parent directories.
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = os.MkdirAll(parentPath, 0600)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ // Skip AUFS metadata dirs
+ if strings.HasPrefix(hdr.Name, ".wh..wh.") {
+ // Regular files inside /.wh..wh.plnk can be used as hardlink targets
+ // We don't want this directory, but we need the files in them so that
+ // such hardlinks can be resolved.
+ if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg {
+ basename := filepath.Base(hdr.Name)
+ aufsHardlinks[basename] = hdr
+ if aufsTempdir == "" {
+ if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
+ return err
+ }
+ defer os.RemoveAll(aufsTempdir)
+ }
+ if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(rel, "..") {
+ return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+ base := filepath.Base(path)
+
+ if strings.HasPrefix(base, ".wh.") {
+ originalBase := base[len(".wh."):]
+ originalPath := filepath.Join(filepath.Dir(path), originalBase)
+ if err := os.RemoveAll(originalPath); err != nil {
+ return err
+ }
+ } else {
+ // If path exits we almost always just want to remove and replace it.
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return err
+ }
+ }
+ }
+
+ trBuf.Reset(tr)
+ srcData := io.Reader(trBuf)
+ srcHdr := hdr
+
+ // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
+ // we manually retarget these into the temporary files we extracted them into
+ if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") {
+ linkBasename := filepath.Base(hdr.Linkname)
+ srcHdr = aufsHardlinks[linkBasename]
+ if srcHdr == nil {
+ return fmt.Errorf("Invalid aufs hardlink")
+ }
+ tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
+ if err != nil {
+ return err
+ }
+ defer tmpFile.Close()
+ srcData = tmpFile
+ }
+
+ if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil {
+ return err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+ ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
+ if err := syscall.UtimesNano(path, ts); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ApplyLayer parses a diff in the standard layer format from `layer`, and
+// applies it to the directory `dest`.
+func ApplyLayer(dest string, layer ArchiveReader) error {
+ dest = filepath.Clean(dest)
+
+ // We need to be able to set any perms
+ oldmask, err := system.Umask(0)
+ if err != nil {
+ return err
+ }
+ defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
+
+ layer, err = DecompressStream(layer)
+ if err != nil {
+ return err
+ }
+ return UnpackLayer(dest, layer)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go
new file mode 100644
index 00000000000..758c4115d57
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go
@@ -0,0 +1,191 @@
+package archive
+
+import (
+ "testing"
+
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+)
+
+func TestApplyLayerInvalidFilenames(t *testing.T) {
+ for i, headers := range [][]*tar.Header{
+ {
+ {
+ Name: "../victim/dotdot",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ {
+ {
+ // Note the leading slash
+ Name: "/../victim/slash-dotdot",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestApplyLayerInvalidHardlink(t *testing.T) {
+ for i, headers := range [][]*tar.Header{
+ { // try reading victim/hello (../)
+ {
+ Name: "dotdot",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (/../)
+ {
+ Name: "slash-dotdot",
+ Typeflag: tar.TypeLink,
+ // Note the leading slash
+ Linkname: "/../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try writing victim/file
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (hardlink, symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "symlink",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // Try reading victim/hello (hardlink, hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "hardlink",
+ Typeflag: tar.TypeLink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // Try removing victim directory (hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestApplyLayerInvalidSymlink(t *testing.T) {
+ for i, headers := range [][]*tar.Header{
+ { // try reading victim/hello (../)
+ {
+ Name: "dotdot",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (/../)
+ {
+ Name: "slash-dotdot",
+ Typeflag: tar.TypeSymlink,
+ // Note the leading slash
+ Linkname: "/../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try writing victim/file
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (symlink, symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "symlink",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (symlink, hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "hardlink",
+ Typeflag: tar.TypeLink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try removing victim directory (symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go
new file mode 100644
index 00000000000..cedd46a408e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go
@@ -0,0 +1,97 @@
+// +build ignore
+
+// Simple tool to create an archive stream from an old and new directory
+//
+// By default it will stream the comparison of two temporary directories with junk files
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/archive"
+)
+
+var (
+ flDebug = flag.Bool("D", false, "debugging output")
+ flNewDir = flag.String("newdir", "", "")
+ flOldDir = flag.String("olddir", "", "")
+ log = logrus.New()
+)
+
+func main() {
+ flag.Usage = func() {
+ fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
+ fmt.Printf("%s [OPTIONS]\n", os.Args[0])
+ flag.PrintDefaults()
+ }
+ flag.Parse()
+ log.Out = os.Stderr
+ if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ var newDir, oldDir string
+
+ if len(*flNewDir) == 0 {
+ var err error
+ newDir, err = ioutil.TempDir("", "docker-test-newDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(newDir)
+ if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
+ log.Fatal(err)
+ }
+ } else {
+ newDir = *flNewDir
+ }
+
+ if len(*flOldDir) == 0 {
+ oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(oldDir)
+ } else {
+ oldDir = *flOldDir
+ }
+
+ changes, err := archive.ChangesDirs(newDir, oldDir)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ a, err := archive.ExportChanges(newDir, changes)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer a.Close()
+
+ i, err := io.Copy(os.Stdout, a)
+ if err != nil && err != io.EOF {
+ log.Fatal(err)
+ }
+ fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
+}
+
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
+ fileData := []byte("fooo")
+ for n := 0; n < numberOfFiles; n++ {
+ fileName := fmt.Sprintf("file-%d", n)
+ if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
+ return 0, err
+ }
+ if makeLinks {
+ if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
+ return 0, err
+ }
+ }
+ }
+ totalSize := numberOfFiles * len(fileData)
+ return totalSize, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar
new file mode 100644
index 00000000000..8f10ea6b87d
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go
new file mode 100644
index 00000000000..3448569b1eb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go
@@ -0,0 +1,16 @@
+package archive
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ if time.IsZero() {
+ // Return UTIME_OMIT special value
+ ts.Sec = 0
+ ts.Nsec = ((1 << 30) - 2)
+ return
+ }
+ return syscall.NsecToTimespec(time.UnixNano())
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go
new file mode 100644
index 00000000000..e85aac05408
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go
@@ -0,0 +1,16 @@
+// +build !linux
+
+package archive
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ nsec := int64(0)
+ if !time.IsZero() {
+ nsec = time.UnixNano()
+ }
+ return syscall.NsecToTimespec(nsec)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go
new file mode 100644
index 00000000000..3624fe5afa8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go
@@ -0,0 +1,166 @@
+package archive
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+)
+
+var testUntarFns = map[string]func(string, io.Reader) error{
+ "untar": func(dest string, r io.Reader) error {
+ return Untar(r, dest, nil)
+ },
+ "applylayer": func(dest string, r io.Reader) error {
+ return ApplyLayer(dest, ArchiveReader(r))
+ },
+}
+
+// testBreakout is a helper function that, within the provided `tmpdir` directory,
+// creates a `victim` folder with a generated `hello` file in it.
+// `untar` extracts to a directory named `dest`, the tar file created from `headers`.
+//
+// Here are the tested scenarios:
+// - removed `victim` folder (write)
+// - removed files from `victim` folder (write)
+// - new files in `victim` folder (write)
+// - modified files in `victim` folder (write)
+// - file in `dest` with same content as `victim/hello` (read)
+//
+// When using testBreakout make sure you cover one of the scenarios listed above.
+func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error {
+ tmpdir, err := ioutil.TempDir("", tmpdir)
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(tmpdir)
+
+ dest := filepath.Join(tmpdir, "dest")
+ if err := os.Mkdir(dest, 0755); err != nil {
+ return err
+ }
+
+ victim := filepath.Join(tmpdir, "victim")
+ if err := os.Mkdir(victim, 0755); err != nil {
+ return err
+ }
+ hello := filepath.Join(victim, "hello")
+ helloData, err := time.Now().MarshalText()
+ if err != nil {
+ return err
+ }
+ if err := ioutil.WriteFile(hello, helloData, 0644); err != nil {
+ return err
+ }
+ helloStat, err := os.Stat(hello)
+ if err != nil {
+ return err
+ }
+
+ reader, writer := io.Pipe()
+ go func() {
+ t := tar.NewWriter(writer)
+ for _, hdr := range headers {
+ t.WriteHeader(hdr)
+ }
+ t.Close()
+ }()
+
+ untar := testUntarFns[untarFn]
+ if untar == nil {
+ return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn)
+ }
+ if err := untar(dest, reader); err != nil {
+ if _, ok := err.(breakoutError); !ok {
+ // If untar returns an error unrelated to an archive breakout,
+ // then consider this an unexpected error and abort.
+ return err
+ }
+ // Here, untar detected the breakout.
+ // Let's move on verifying that indeed there was no breakout.
+ fmt.Printf("breakoutError: %v\n", err)
+ }
+
+ // Check victim folder
+ f, err := os.Open(victim)
+ if err != nil {
+ // codepath taken if victim folder was removed
+ return fmt.Errorf("archive breakout: error reading %q: %v", victim, err)
+ }
+ defer f.Close()
+
+ // Check contents of victim folder
+ //
+ // We are only interested in getting 2 files from the victim folder, because if all is well
+ // we expect only one result, the `hello` file. If there is a second result, it cannot
+ // hold the same name `hello` and we assume that a new file got created in the victim folder.
+ // That is enough to detect an archive breakout.
+ names, err := f.Readdirnames(2)
+ if err != nil {
+ // codepath taken if victim is not a folder
+ return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err)
+ }
+ for _, name := range names {
+ if name != "hello" {
+ // codepath taken if new file was created in victim folder
+ return fmt.Errorf("archive breakout: new file %q", name)
+ }
+ }
+
+ // Check victim/hello
+ f, err = os.Open(hello)
+ if err != nil {
+ // codepath taken if read permissions were removed
+ return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err)
+ }
+ defer f.Close()
+ b, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
+ fi, err := f.Stat()
+ if err != nil {
+ return err
+ }
+ if helloStat.IsDir() != fi.IsDir() ||
+ // TODO: cannot check for fi.ModTime() change
+ helloStat.Mode() != fi.Mode() ||
+ helloStat.Size() != fi.Size() ||
+ !bytes.Equal(helloData, b) {
+ // codepath taken if hello has been modified
+ return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v.", hello, helloData, b, helloStat, fi)
+ }
+
+ // Check that nothing in dest/ has the same content as victim/hello.
+ // Since victim/hello was generated with time.Now(), it is safe to assume
+ // that any file whose content matches exactly victim/hello, managed somehow
+ // to access victim/hello.
+ return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error {
+ if info.IsDir() {
+ if err != nil {
+ // skip directory if error
+ return filepath.SkipDir
+ }
+ // enter directory
+ return nil
+ }
+ if err != nil {
+ // skip file if error
+ return nil
+ }
+ b, err := ioutil.ReadFile(path)
+ if err != nil {
+ // Houston, we have a problem. Aborting (space)walk.
+ return err
+ }
+ if bytes.Equal(helloData, b) {
+ return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path)
+ }
+ return nil
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go
new file mode 100644
index 00000000000..b8b60197a3d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go
@@ -0,0 +1,59 @@
+package archive
+
+import (
+ "bytes"
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+ "io/ioutil"
+)
+
+// Generate generates a new archive from the content provided
+// as input.
+//
+// `files` is a sequence of path/content pairs. A new file is
+// added to the archive for each pair.
+// If the last pair is incomplete, the file is created with an
+// empty content. For example:
+//
+// Generate("foo.txt", "hello world", "emptyfile")
+//
+// The above call will return an archive with 2 files:
+// * ./foo.txt with content "hello world"
+// * ./empty with empty content
+//
+// FIXME: stream content instead of buffering
+// FIXME: specify permissions and other archive metadata
+func Generate(input ...string) (Archive, error) {
+ files := parseStringPairs(input...)
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, file := range files {
+ name, content := file[0], file[1]
+ hdr := &tar.Header{
+ Name: name,
+ Size: int64(len(content)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return nil, err
+ }
+ if _, err := tw.Write([]byte(content)); err != nil {
+ return nil, err
+ }
+ }
+ if err := tw.Close(); err != nil {
+ return nil, err
+ }
+ return ioutil.NopCloser(buf), nil
+}
+
+func parseStringPairs(input ...string) (output [][2]string) {
+ output = make([][2]string, 0, len(input)/2+1)
+ for i := 0; i < len(input); i += 2 {
+ var pair [2]string
+ pair[0] = input[i]
+ if i+1 < len(input) {
+ pair[1] = input[i+1]
+ }
+ output = append(output, pair)
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go
new file mode 100644
index 00000000000..4e4a91b91ae
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go
@@ -0,0 +1,26 @@
+package fileutils
+
+import (
+ log "github.com/Sirupsen/logrus"
+ "path/filepath"
+)
+
+// Matches returns true if relFilePath matches any of the patterns
+func Matches(relFilePath string, patterns []string) (bool, error) {
+ for _, exclude := range patterns {
+ matched, err := filepath.Match(exclude, relFilePath)
+ if err != nil {
+ log.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude)
+ return false, err
+ }
+ if matched {
+ if filepath.Clean(relFilePath) == "." {
+ log.Errorf("Can't exclude whole path, excluding pattern: %s", exclude)
+ continue
+ }
+ log.Debugf("Skipping excluded path: %s", relFilePath)
+ return true, nil
+ }
+ }
+ return false, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go
new file mode 100644
index 00000000000..22f46fbd92e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go
@@ -0,0 +1,114 @@
+package ioutils
+
+import (
+ "bytes"
+ "io"
+ "sync"
+)
+
+type readCloserWrapper struct {
+ io.Reader
+ closer func() error
+}
+
+func (r *readCloserWrapper) Close() error {
+ return r.closer()
+}
+
+func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
+ return &readCloserWrapper{
+ Reader: r,
+ closer: closer,
+ }
+}
+
+type readerErrWrapper struct {
+ reader io.Reader
+ closer func()
+}
+
+func (r *readerErrWrapper) Read(p []byte) (int, error) {
+ n, err := r.reader.Read(p)
+ if err != nil {
+ r.closer()
+ }
+ return n, err
+}
+
+func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
+ return &readerErrWrapper{
+ reader: r,
+ closer: closer,
+ }
+}
+
+type bufReader struct {
+ sync.Mutex
+ buf *bytes.Buffer
+ reader io.Reader
+ err error
+ wait sync.Cond
+ drainBuf []byte
+}
+
+func NewBufReader(r io.Reader) *bufReader {
+ reader := &bufReader{
+ buf: &bytes.Buffer{},
+ drainBuf: make([]byte, 1024),
+ reader: r,
+ }
+ reader.wait.L = &reader.Mutex
+ go reader.drain()
+ return reader
+}
+
+func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader {
+ reader := &bufReader{
+ buf: buffer,
+ drainBuf: drainBuffer,
+ reader: r,
+ }
+ reader.wait.L = &reader.Mutex
+ go reader.drain()
+ return reader
+}
+
+func (r *bufReader) drain() {
+ for {
+ n, err := r.reader.Read(r.drainBuf)
+ r.Lock()
+ if err != nil {
+ r.err = err
+ } else {
+ r.buf.Write(r.drainBuf[0:n])
+ }
+ r.wait.Signal()
+ r.Unlock()
+ if err != nil {
+ break
+ }
+ }
+}
+
+func (r *bufReader) Read(p []byte) (n int, err error) {
+ r.Lock()
+ defer r.Unlock()
+ for {
+ n, err = r.buf.Read(p)
+ if n > 0 {
+ return n, err
+ }
+ if r.err != nil {
+ return 0, r.err
+ }
+ r.wait.Wait()
+ }
+}
+
+func (r *bufReader) Close() error {
+ closer, ok := r.reader.(io.ReadCloser)
+ if !ok {
+ return nil
+ }
+ return closer.Close()
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go
new file mode 100644
index 00000000000..a7a2dad176d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go
@@ -0,0 +1,34 @@
+package ioutils
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+func TestBufReader(t *testing.T) {
+ reader, writer := io.Pipe()
+ bufreader := NewBufReader(reader)
+
+ // Write everything down to a Pipe
+ // Usually, a pipe should block but because of the buffered reader,
+ // the writes will go through
+ done := make(chan bool)
+ go func() {
+ writer.Write([]byte("hello world"))
+ writer.Close()
+ done <- true
+ }()
+
+ // Drain the reader *after* everything has been written, just to verify
+ // it is indeed buffering
+ <-done
+ output, err := ioutil.ReadAll(bufreader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(output, []byte("hello world")) {
+ t.Error(string(output))
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go
new file mode 100644
index 00000000000..c0b3608fe6f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go
@@ -0,0 +1,39 @@
+package ioutils
+
+import "io"
+
+type NopWriter struct{}
+
+func (*NopWriter) Write(buf []byte) (int, error) {
+ return len(buf), nil
+}
+
+type nopWriteCloser struct {
+ io.Writer
+}
+
+func (w *nopWriteCloser) Close() error { return nil }
+
+func NopWriteCloser(w io.Writer) io.WriteCloser {
+ return &nopWriteCloser{w}
+}
+
+type NopFlusher struct{}
+
+func (f *NopFlusher) Flush() {}
+
+type writeCloserWrapper struct {
+ io.Writer
+ closer func() error
+}
+
+func (r *writeCloserWrapper) Close() error {
+ return r.closer()
+}
+
+func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
+ return &writeCloserWrapper{
+ Writer: r,
+ closer: closer,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go
new file mode 100644
index 00000000000..5338a0cfb25
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go
@@ -0,0 +1,111 @@
+// +build go1.3
+
+// Package pools provides a collection of pools which provide various
+// data types with buffers. These can be used to lower the number of
+// memory allocations and reuse buffers.
+//
+// New pools should be added to this package to allow them to be
+// shared across packages.
+//
+// Utility functions which operate on pools should be added to this
+// package to allow them to be reused.
+package pools
+
+import (
+ "bufio"
+ "io"
+ "sync"
+
+ "github.com/docker/docker/pkg/ioutils"
+)
+
+var (
+ // Pool which returns bufio.Reader with a 32K buffer
+ BufioReader32KPool *BufioReaderPool
+ // Pool which returns bufio.Writer with a 32K buffer
+ BufioWriter32KPool *BufioWriterPool
+)
+
+const buffer32K = 32 * 1024
+
+type BufioReaderPool struct {
+ pool sync.Pool
+}
+
+func init() {
+ BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
+ BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
+}
+
+// newBufioReaderPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
+ pool := sync.Pool{
+ New: func() interface{} { return bufio.NewReaderSize(nil, size) },
+ }
+ return &BufioReaderPool{pool: pool}
+}
+
+// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
+func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
+ buf := bufPool.pool.Get().(*bufio.Reader)
+ buf.Reset(r)
+ return buf
+}
+
+// Put puts the bufio.Reader back into the pool.
+func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
+// into the pool and closes the reader if it's an io.ReadCloser.
+func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
+ return ioutils.NewReadCloserWrapper(r, func() error {
+ if readCloser, ok := r.(io.ReadCloser); ok {
+ readCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
+
+type BufioWriterPool struct {
+ pool sync.Pool
+}
+
+// newBufioWriterPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
+ pool := sync.Pool{
+ New: func() interface{} { return bufio.NewWriterSize(nil, size) },
+ }
+ return &BufioWriterPool{pool: pool}
+}
+
+// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
+func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
+ buf := bufPool.pool.Get().(*bufio.Writer)
+ buf.Reset(w)
+ return buf
+}
+
+// Put puts the bufio.Writer back into the pool.
+func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
+// into the pool and closes the writer if it's an io.Writecloser.
+func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
+ return ioutils.NewWriteCloserWrapper(w, func() error {
+ buf.Flush()
+ if writeCloser, ok := w.(io.WriteCloser); ok {
+ writeCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go
new file mode 100644
index 00000000000..48903c2396a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go
@@ -0,0 +1,73 @@
+// +build !go1.3
+
+package pools
+
+import (
+ "bufio"
+ "io"
+
+ "github.com/docker/docker/pkg/ioutils"
+)
+
+var (
+ BufioReader32KPool *BufioReaderPool
+ BufioWriter32KPool *BufioWriterPool
+)
+
+const buffer32K = 32 * 1024
+
+type BufioReaderPool struct {
+ size int
+}
+
+func init() {
+ BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
+ BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
+}
+
+func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
+ return &BufioReaderPool{size: size}
+}
+
+func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
+ return bufio.NewReaderSize(r, bufPool.size)
+}
+
+func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
+ b.Reset(nil)
+}
+
+func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
+ return ioutils.NewReadCloserWrapper(r, func() error {
+ if readCloser, ok := r.(io.ReadCloser); ok {
+ return readCloser.Close()
+ }
+ return nil
+ })
+}
+
+type BufioWriterPool struct {
+ size int
+}
+
+func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
+ return &BufioWriterPool{size: size}
+}
+
+func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
+ return bufio.NewWriterSize(w, bufPool.size)
+}
+
+func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
+ b.Reset(nil)
+}
+
+func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
+ return ioutils.NewWriteCloserWrapper(w, func() error {
+ buf.Flush()
+ if writeCloser, ok := w.(io.WriteCloser); ok {
+ return writeCloser.Close()
+ }
+ return nil
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go
new file mode 100644
index 00000000000..dd52b9082f7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go
@@ -0,0 +1,11 @@
+package promise
+
+// Go is a basic promise implementation: it wraps calls a function in a goroutine,
+// and returns a channel which will later return the function's return value.
+func Go(f func() error) chan error {
+ ch := make(chan error, 1)
+ go func() {
+ ch <- f()
+ }()
+ return ch
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/MAINTAINERS
new file mode 100644
index 00000000000..68a97d2fc24
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/MAINTAINERS
@@ -0,0 +1,2 @@
+Michael Crosby (@crosbymichael)
+Victor Vieux (@vieux)
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go
new file mode 100644
index 00000000000..63045186fe8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go
@@ -0,0 +1,9 @@
+package system
+
+import (
+ "errors"
+)
+
+var (
+ ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
+)
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go
new file mode 100644
index 00000000000..9ef82d55237
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go
@@ -0,0 +1,16 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+func Lstat(path string) (*Stat, error) {
+ s := &syscall.Stat_t{}
+ err := syscall.Lstat(path, s)
+ if err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go
new file mode 100644
index 00000000000..9bab4d7b0ce
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go
@@ -0,0 +1,27 @@
+package system
+
+import (
+ "os"
+ "testing"
+)
+
+func TestLstat(t *testing.T) {
+ file, invalid, _, dir := prepareFiles(t)
+ defer os.RemoveAll(dir)
+
+ statFile, err := Lstat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if statFile == nil {
+ t.Fatal("returned empty stat for existing file")
+ }
+
+ statInvalid, err := Lstat(invalid)
+ if err == nil {
+ t.Fatal("did not return error for non-existing file")
+ }
+ if statInvalid != nil {
+ t.Fatal("returned non-nil stat for non-existing file")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go
new file mode 100644
index 00000000000..213a7c7ade3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go
@@ -0,0 +1,8 @@
+// +build windows
+
+package system
+
+func Lstat(path string) (*Stat, error) {
+ // should not be called on cli code path
+ return nil, ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go
new file mode 100644
index 00000000000..3b6e947e675
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go
@@ -0,0 +1,17 @@
+package system
+
+// MemInfo contains memory statistics of the host system.
+type MemInfo struct {
+ // Total usable RAM (i.e. physical RAM minus a few reserved bits and the
+ // kernel binary code).
+ MemTotal int64
+
+ // Amount of free memory.
+ MemFree int64
+
+ // Total amount of swap space available.
+ SwapTotal int64
+
+ // Amount of swap space that is currently unused.
+ SwapFree int64
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go
new file mode 100644
index 00000000000..b7de3ff7765
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go
@@ -0,0 +1,67 @@
+package system
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/docker/docker/pkg/units"
+)
+
+var (
+ ErrMalformed = errors.New("malformed file")
+)
+
+// Retrieve memory statistics of the host system and parse them into a MemInfo
+// type.
+func ReadMemInfo() (*MemInfo, error) {
+ file, err := os.Open("/proc/meminfo")
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return parseMemInfo(file)
+}
+
+func parseMemInfo(reader io.Reader) (*MemInfo, error) {
+ meminfo := &MemInfo{}
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ // Expected format: ["MemTotal:", "1234", "kB"]
+ parts := strings.Fields(scanner.Text())
+
+ // Sanity checks: Skip malformed entries.
+ if len(parts) < 3 || parts[2] != "kB" {
+ continue
+ }
+
+ // Convert to bytes.
+ size, err := strconv.Atoi(parts[1])
+ if err != nil {
+ continue
+ }
+ bytes := int64(size) * units.KiB
+
+ switch parts[0] {
+ case "MemTotal:":
+ meminfo.MemTotal = bytes
+ case "MemFree:":
+ meminfo.MemFree = bytes
+ case "SwapTotal:":
+ meminfo.SwapTotal = bytes
+ case "SwapFree:":
+ meminfo.SwapFree = bytes
+ }
+
+ }
+
+ // Handle errors that may have occurred during the reading of the file.
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return meminfo, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go
new file mode 100644
index 00000000000..377405ea69a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go
@@ -0,0 +1,37 @@
+package system
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/docker/docker/pkg/units"
+)
+
+func TestMemInfo(t *testing.T) {
+ const input = `
+ MemTotal: 1 kB
+ MemFree: 2 kB
+ SwapTotal: 3 kB
+ SwapFree: 4 kB
+ Malformed1:
+ Malformed2: 1
+ Malformed3: 2 MB
+ Malformed4: X kB
+ `
+ meminfo, err := parseMemInfo(strings.NewReader(input))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if meminfo.MemTotal != 1*units.KiB {
+ t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal)
+ }
+ if meminfo.MemFree != 2*units.KiB {
+ t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree)
+ }
+ if meminfo.SwapTotal != 3*units.KiB {
+ t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal)
+ }
+ if meminfo.SwapFree != 4*units.KiB {
+ t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go
new file mode 100644
index 00000000000..63b8b16e05a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go
@@ -0,0 +1,7 @@
+// +build !linux
+
+package system
+
+func ReadMemInfo() (*MemInfo, error) {
+ return nil, ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go
new file mode 100644
index 00000000000..06f9c6afbb8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go
@@ -0,0 +1,18 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+func Mknod(path string, mode uint32, dev int) error {
+ return syscall.Mknod(path, mode, dev)
+}
+
+// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
+// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
+// then the top 12 bits of the minor
+func Mkdev(major int64, minor int64) uint32 {
+ return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go
new file mode 100644
index 00000000000..b4020c11b64
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go
@@ -0,0 +1,12 @@
+// +build windows
+
+package system
+
+func Mknod(path string, mode uint32, dev int) error {
+ // should not be called on cli code path
+ return ErrNotSupportedPlatform
+}
+
+func Mkdev(major int64, minor int64) uint32 {
+ panic("Mkdev not implemented on windows, should not be called on cli code")
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go
new file mode 100644
index 00000000000..5d47494d212
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go
@@ -0,0 +1,42 @@
+package system
+
+import (
+ "syscall"
+)
+
+type Stat struct {
+ mode uint32
+ uid uint32
+ gid uint32
+ rdev uint64
+ size int64
+ mtim syscall.Timespec
+}
+
+func (s Stat) Mode() uint32 {
+ return s.mode
+}
+
+func (s Stat) Uid() uint32 {
+ return s.uid
+}
+
+func (s Stat) Gid() uint32 {
+ return s.gid
+}
+
+func (s Stat) Rdev() uint64 {
+ return s.rdev
+}
+
+func (s Stat) Size() int64 {
+ return s.size
+}
+
+func (s Stat) Mtim() syscall.Timespec {
+ return s.mtim
+}
+
+func (s Stat) GetLastModification() syscall.Timespec {
+ return s.Mtim()
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go
new file mode 100644
index 00000000000..47cebef5cf1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go
@@ -0,0 +1,14 @@
+package system
+
+import (
+ "syscall"
+)
+
+func fromStatT(s *syscall.Stat_t) (*Stat, error) {
+ return &Stat{size: s.Size,
+ mode: s.Mode,
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: s.Rdev,
+ mtim: s.Mtim}, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go
new file mode 100644
index 00000000000..abcc8ea7a60
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go
@@ -0,0 +1,36 @@
+package system
+
+import (
+ "os"
+ "syscall"
+ "testing"
+)
+
+func TestFromStatT(t *testing.T) {
+ file, _, _, dir := prepareFiles(t)
+ defer os.RemoveAll(dir)
+
+ stat := &syscall.Stat_t{}
+ err := syscall.Lstat(file, stat)
+
+ s, err := fromStatT(stat)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if stat.Mode != s.Mode() {
+ t.Fatal("got invalid mode")
+ }
+ if stat.Uid != s.Uid() {
+ t.Fatal("got invalid uid")
+ }
+ if stat.Gid != s.Gid() {
+ t.Fatal("got invalid gid")
+ }
+ if stat.Rdev != s.Rdev() {
+ t.Fatal("got invalid rdev")
+ }
+ if stat.Mtim != s.Mtim() {
+ t.Fatal("got invalid mtim")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go
new file mode 100644
index 00000000000..c4d53e6cd61
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go
@@ -0,0 +1,16 @@
+// +build !linux,!windows
+
+package system
+
+import (
+ "syscall"
+)
+
+func fromStatT(s *syscall.Stat_t) (*Stat, error) {
+ return &Stat{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go
new file mode 100644
index 00000000000..584e8940ccf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go
@@ -0,0 +1,12 @@
+// +build windows
+
+package system
+
+import (
+ "errors"
+ "syscall"
+)
+
+func fromStatT(s *syscall.Win32FileAttributeData) (*Stat, error) {
+ return nil, errors.New("fromStatT should not be called on windows path")
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go
new file mode 100644
index 00000000000..fddbecd3903
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+func Umask(newmask int) (oldmask int, err error) {
+ return syscall.Umask(newmask), nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go
new file mode 100644
index 00000000000..3be563f89e6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go
@@ -0,0 +1,8 @@
+// +build windows
+
+package system
+
+func Umask(newmask int) (oldmask int, err error) {
+ // should not be called on cli code path
+ return 0, ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go
new file mode 100644
index 00000000000..4c6002fe8e2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go
@@ -0,0 +1,11 @@
+package system
+
+import "syscall"
+
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
+
+func UtimesNano(path string, ts []syscall.Timespec) error {
+ return syscall.UtimesNano(path, ts)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go
new file mode 100644
index 00000000000..ceaa044c1c5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go
@@ -0,0 +1,24 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ var _path *byte
+ _path, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS {
+ return err
+ }
+
+ return nil
+}
+
+func UtimesNano(path string, ts []syscall.Timespec) error {
+ return syscall.UtimesNano(path, ts)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go
new file mode 100644
index 00000000000..8f90298271e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go
@@ -0,0 +1,28 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ // These are not currently available in syscall
+ AT_FDCWD := -100
+ AT_SYMLINK_NOFOLLOW := 0x100
+
+ var _path *byte
+ _path, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS {
+ return err
+ }
+
+ return nil
+}
+
+func UtimesNano(path string, ts []syscall.Timespec) error {
+ return syscall.UtimesNano(path, ts)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go
new file mode 100644
index 00000000000..1dea47cc15d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go
@@ -0,0 +1,65 @@
+package system
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "syscall"
+ "testing"
+)
+
+func prepareFiles(t *testing.T) (string, string, string, string) {
+ dir, err := ioutil.TempDir("", "docker-system-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ file := filepath.Join(dir, "exist")
+ if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ invalid := filepath.Join(dir, "doesnt-exist")
+
+ symlink := filepath.Join(dir, "symlink")
+ if err := os.Symlink(file, symlink); err != nil {
+ t.Fatal(err)
+ }
+
+ return file, invalid, symlink, dir
+}
+
+func TestLUtimesNano(t *testing.T) {
+ file, invalid, symlink, dir := prepareFiles(t)
+ defer os.RemoveAll(dir)
+
+ before, err := os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ts := []syscall.Timespec{{0, 0}, {0, 0}}
+ if err := LUtimesNano(symlink, ts); err != nil {
+ t.Fatal(err)
+ }
+
+ symlinkInfo, err := os.Lstat(symlink)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() {
+ t.Fatal("The modification time of the symlink should be different")
+ }
+
+ fileInfo, err := os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if before.ModTime().Unix() != fileInfo.ModTime().Unix() {
+ t.Fatal("The modification time of the file should be same")
+ }
+
+ if err := LUtimesNano(invalid, ts); err == nil {
+ t.Fatal("Doesn't return an error on a non-existing file")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go
new file mode 100644
index 00000000000..adf2734f277
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go
@@ -0,0 +1,13 @@
+// +build !linux,!freebsd,!darwin
+
+package system
+
+import "syscall"
+
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
+
+func UtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go
new file mode 100644
index 00000000000..00edb201b58
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go
@@ -0,0 +1,59 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// Returns a nil slice and nil error if the xattr is not set
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return nil, err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return nil, err
+ }
+
+ dest := make([]byte, 128)
+ destBytes := unsafe.Pointer(&dest[0])
+ sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+ if errno == syscall.ENODATA {
+ return nil, nil
+ }
+ if errno == syscall.ERANGE {
+ dest = make([]byte, sz)
+ destBytes := unsafe.Pointer(&dest[0])
+ sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+ }
+ if errno != 0 {
+ return nil, errno
+ }
+
+ return dest[:sz], nil
+}
+
+var _zero uintptr
+
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return err
+ }
+ var dataBytes unsafe.Pointer
+ if len(data) > 0 {
+ dataBytes = unsafe.Pointer(&data[0])
+ } else {
+ dataBytes = unsafe.Pointer(&_zero)
+ }
+ _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go
new file mode 100644
index 00000000000..0060c167dc2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go
@@ -0,0 +1,11 @@
+// +build !linux
+
+package system
+
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ return nil, ErrNotSupportedPlatform
+}
+
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/MAINTAINERS
new file mode 100644
index 00000000000..96abeae5700
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/MAINTAINERS
@@ -0,0 +1,2 @@
+Victor Vieux (@vieux)
+Jessie Frazelle (@jfrazelle)
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go
new file mode 100644
index 00000000000..cd331214966
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go
@@ -0,0 +1,31 @@
+package units
+
+import (
+ "fmt"
+ "time"
+)
+
+// HumanDuration returns a human-readable approximation of a duration
+// (eg. "About a minute", "4 hours ago", etc.)
+func HumanDuration(d time.Duration) string {
+ if seconds := int(d.Seconds()); seconds < 1 {
+ return "Less than a second"
+ } else if seconds < 60 {
+ return fmt.Sprintf("%d seconds", seconds)
+ } else if minutes := int(d.Minutes()); minutes == 1 {
+ return "About a minute"
+ } else if minutes < 60 {
+ return fmt.Sprintf("%d minutes", minutes)
+ } else if hours := int(d.Hours()); hours == 1 {
+ return "About an hour"
+ } else if hours < 48 {
+ return fmt.Sprintf("%d hours", hours)
+ } else if hours < 24*7*2 {
+ return fmt.Sprintf("%d days", hours/24)
+ } else if hours < 24*30*3 {
+ return fmt.Sprintf("%d weeks", hours/24/7)
+ } else if hours < 24*365*2 {
+ return fmt.Sprintf("%d months", hours/24/30)
+ }
+ return fmt.Sprintf("%f years", d.Hours()/24/365)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go
new file mode 100644
index 00000000000..a22947402b8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go
@@ -0,0 +1,46 @@
+package units
+
+import (
+ "testing"
+ "time"
+)
+
+func TestHumanDuration(t *testing.T) {
+ // Useful duration abstractions
+ day := 24 * time.Hour
+ week := 7 * day
+ month := 30 * day
+ year := 365 * day
+
+ assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond))
+ assertEquals(t, "47 seconds", HumanDuration(47*time.Second))
+ assertEquals(t, "About a minute", HumanDuration(1*time.Minute))
+ assertEquals(t, "3 minutes", HumanDuration(3*time.Minute))
+ assertEquals(t, "35 minutes", HumanDuration(35*time.Minute))
+ assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second))
+ assertEquals(t, "About an hour", HumanDuration(1*time.Hour))
+ assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute))
+ assertEquals(t, "3 hours", HumanDuration(3*time.Hour))
+ assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute))
+ assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute))
+ assertEquals(t, "24 hours", HumanDuration(24*time.Hour))
+ assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour))
+ assertEquals(t, "2 days", HumanDuration(2*day))
+ assertEquals(t, "7 days", HumanDuration(7*day))
+ assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour))
+ assertEquals(t, "2 weeks", HumanDuration(2*week))
+ assertEquals(t, "2 weeks", HumanDuration(2*week+4*day))
+ assertEquals(t, "3 weeks", HumanDuration(3*week))
+ assertEquals(t, "4 weeks", HumanDuration(4*week))
+ assertEquals(t, "4 weeks", HumanDuration(4*week+3*day))
+ assertEquals(t, "4 weeks", HumanDuration(1*month))
+ assertEquals(t, "6 weeks", HumanDuration(1*month+2*week))
+ assertEquals(t, "8 weeks", HumanDuration(2*month))
+ assertEquals(t, "3 months", HumanDuration(3*month+1*week))
+ assertEquals(t, "5 months", HumanDuration(5*month+2*week))
+ assertEquals(t, "13 months", HumanDuration(13*month))
+ assertEquals(t, "23 months", HumanDuration(23*month))
+ assertEquals(t, "24 months", HumanDuration(24*month))
+ assertEquals(t, "2.010959 years", HumanDuration(24*month+2*week))
+ assertEquals(t, "3.164384 years", HumanDuration(3*year+2*month))
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go
new file mode 100644
index 00000000000..264f388225e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go
@@ -0,0 +1,91 @@
+package units
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// See: http://en.wikipedia.org/wiki/Binary_prefix
+const (
+ // Decimal
+
+ KB = 1000
+ MB = 1000 * KB
+ GB = 1000 * MB
+ TB = 1000 * GB
+ PB = 1000 * TB
+
+ // Binary
+
+ KiB = 1024
+ MiB = 1024 * KiB
+ GiB = 1024 * MiB
+ TiB = 1024 * GiB
+ PiB = 1024 * TiB
+)
+
+type unitMap map[string]int64
+
+var (
+ decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
+ binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
+ sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`)
+)
+
+var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+
+// HumanSize returns a human-readable approximation of a size
+// using SI standard (eg. "44kB", "17MB")
+func HumanSize(size int64) string {
+ return intToString(float64(size), 1000.0, decimapAbbrs)
+}
+
+func BytesSize(size float64) string {
+ return intToString(size, 1024.0, binaryAbbrs)
+}
+
+func intToString(size, unit float64, _map []string) string {
+ i := 0
+ for size >= unit {
+ size = size / unit
+ i++
+ }
+ return fmt.Sprintf("%.4g %s", size, _map[i])
+}
+
+// FromHumanSize returns an integer from a human-readable specification of a
+// size using SI standard (eg. "44kB", "17MB")
+func FromHumanSize(size string) (int64, error) {
+ return parseSize(size, decimalMap)
+}
+
+// RAMInBytes parses a human-readable string representing an amount of RAM
+// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
+// returns the number of bytes, or -1 if the string is unparseable.
+// Units are case-insensitive, and the 'b' suffix is optional.
+func RAMInBytes(size string) (int64, error) {
+ return parseSize(size, binaryMap)
+}
+
+// Parses the human-readable size string into the amount it represents
+func parseSize(sizeStr string, uMap unitMap) (int64, error) {
+ matches := sizeRegex.FindStringSubmatch(sizeStr)
+ if len(matches) != 3 {
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+
+ size, err := strconv.ParseInt(matches[1], 10, 0)
+ if err != nil {
+ return -1, err
+ }
+
+ unitPrefix := strings.ToLower(matches[2])
+ if mul, ok := uMap[unitPrefix]; ok {
+ size *= mul
+ }
+
+ return size, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go
new file mode 100644
index 00000000000..3e410b0db85
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go
@@ -0,0 +1,108 @@
+package units
+
+import (
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func TestBytesSize(t *testing.T) {
+ assertEquals(t, "1 KiB", BytesSize(1024))
+ assertEquals(t, "1 MiB", BytesSize(1024*1024))
+ assertEquals(t, "1 MiB", BytesSize(1048576))
+ assertEquals(t, "2 MiB", BytesSize(2*MiB))
+ assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB))
+ assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB))
+ assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB))
+}
+
+func TestHumanSize(t *testing.T) {
+ assertEquals(t, "1 kB", HumanSize(1000))
+ assertEquals(t, "1.024 kB", HumanSize(1024))
+ assertEquals(t, "1 MB", HumanSize(1000000))
+ assertEquals(t, "1.049 MB", HumanSize(1048576))
+ assertEquals(t, "2 MB", HumanSize(2*MB))
+ assertEquals(t, "3.42 GB", HumanSize(int64(float64(3.42*GB))))
+ assertEquals(t, "5.372 TB", HumanSize(int64(float64(5.372*TB))))
+ assertEquals(t, "2.22 PB", HumanSize(int64(float64(2.22*PB))))
+}
+
+func TestFromHumanSize(t *testing.T) {
+ assertSuccessEquals(t, 32, FromHumanSize, "32")
+ assertSuccessEquals(t, 32, FromHumanSize, "32b")
+ assertSuccessEquals(t, 32, FromHumanSize, "32B")
+ assertSuccessEquals(t, 32*KB, FromHumanSize, "32k")
+ assertSuccessEquals(t, 32*KB, FromHumanSize, "32K")
+ assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb")
+ assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb")
+ assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb")
+ assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb")
+ assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb")
+ assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb")
+
+ assertError(t, FromHumanSize, "")
+ assertError(t, FromHumanSize, "hello")
+ assertError(t, FromHumanSize, "-32")
+ assertError(t, FromHumanSize, "32.3")
+ assertError(t, FromHumanSize, " 32 ")
+ assertError(t, FromHumanSize, "32.3Kb")
+ assertError(t, FromHumanSize, "32 mb")
+ assertError(t, FromHumanSize, "32m b")
+ assertError(t, FromHumanSize, "32bm")
+}
+
+func TestRAMInBytes(t *testing.T) {
+ assertSuccessEquals(t, 32, RAMInBytes, "32")
+ assertSuccessEquals(t, 32, RAMInBytes, "32b")
+ assertSuccessEquals(t, 32, RAMInBytes, "32B")
+ assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k")
+ assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K")
+ assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb")
+ assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb")
+ assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb")
+ assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb")
+ assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb")
+ assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb")
+ assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB")
+ assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P")
+
+ assertError(t, RAMInBytes, "")
+ assertError(t, RAMInBytes, "hello")
+ assertError(t, RAMInBytes, "-32")
+ assertError(t, RAMInBytes, "32.3")
+ assertError(t, RAMInBytes, " 32 ")
+ assertError(t, RAMInBytes, "32.3Kb")
+ assertError(t, RAMInBytes, "32 mb")
+ assertError(t, RAMInBytes, "32m b")
+ assertError(t, RAMInBytes, "32bm")
+}
+
+func assertEquals(t *testing.T, expected, actual interface{}) {
+ if expected != actual {
+ t.Errorf("Expected '%v' but got '%v'", expected, actual)
+ }
+}
+
+// func that maps to the parse function signatures as testing abstraction
+type parseFn func(string) (int64, error)
+
+// Define 'String()' for pretty-print
+func (fn parseFn) String() string {
+ fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()
+ return fnName[strings.LastIndex(fnName, ".")+1:]
+}
+
+func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) {
+ res, err := fn(arg)
+ if err != nil || res != expected {
+ t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err)
+ }
+}
+
+func assertError(t *testing.T, fn parseFn, arg string) {
+ res, err := fn(arg)
+ if err == nil && res != -1 {
+ t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go
new file mode 100644
index 00000000000..e363aa793e0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go
@@ -0,0 +1,305 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tar implements access to tar archives.
+// It aims to cover most of the variations, including those produced
+// by GNU and BSD tars.
+//
+// References:
+// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
+// http://www.gnu.org/software/tar/manual/html_node/Standard.html
+// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
+package tar
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "path"
+ "time"
+)
+
+const (
+ blockSize = 512
+
+ // Types
+ TypeReg = '0' // regular file
+ TypeRegA = '\x00' // regular file
+ TypeLink = '1' // hard link
+ TypeSymlink = '2' // symbolic link
+ TypeChar = '3' // character device node
+ TypeBlock = '4' // block device node
+ TypeDir = '5' // directory
+ TypeFifo = '6' // fifo node
+ TypeCont = '7' // reserved
+ TypeXHeader = 'x' // extended header
+ TypeXGlobalHeader = 'g' // global extended header
+ TypeGNULongName = 'L' // Next file has a long name
+ TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
+ TypeGNUSparse = 'S' // sparse file
+)
+
+// A Header represents a single header in a tar archive.
+// Some fields may not be populated.
+type Header struct {
+ Name string // name of header file entry
+ Mode int64 // permission and mode bits
+ Uid int // user id of owner
+ Gid int // group id of owner
+ Size int64 // length in bytes
+ ModTime time.Time // modified time
+ Typeflag byte // type of header entry
+ Linkname string // target name of link
+ Uname string // user name of owner
+ Gname string // group name of owner
+ Devmajor int64 // major number of character or block device
+ Devminor int64 // minor number of character or block device
+ AccessTime time.Time // access time
+ ChangeTime time.Time // status change time
+ Xattrs map[string]string
+}
+
+// File name constants from the tar spec.
+const (
+ fileNameSize = 100 // Maximum number of bytes in a standard tar name.
+ fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
+)
+
+// FileInfo returns an os.FileInfo for the Header.
+func (h *Header) FileInfo() os.FileInfo {
+ return headerFileInfo{h}
+}
+
+// headerFileInfo implements os.FileInfo.
+type headerFileInfo struct {
+ h *Header
+}
+
+func (fi headerFileInfo) Size() int64 { return fi.h.Size }
+func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
+func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
+func (fi headerFileInfo) Sys() interface{} { return fi.h }
+
+// Name returns the base name of the file.
+func (fi headerFileInfo) Name() string {
+ if fi.IsDir() {
+ return path.Base(path.Clean(fi.h.Name))
+ }
+ return path.Base(fi.h.Name)
+}
+
+// Mode returns the permission and mode bits for the headerFileInfo.
+func (fi headerFileInfo) Mode() (mode os.FileMode) {
+ // Set file permission bits.
+ mode = os.FileMode(fi.h.Mode).Perm()
+
+ // Set setuid, setgid and sticky bits.
+ if fi.h.Mode&c_ISUID != 0 {
+ // setuid
+ mode |= os.ModeSetuid
+ }
+ if fi.h.Mode&c_ISGID != 0 {
+ // setgid
+ mode |= os.ModeSetgid
+ }
+ if fi.h.Mode&c_ISVTX != 0 {
+ // sticky
+ mode |= os.ModeSticky
+ }
+
+ // Set file mode bits.
+ // clear perm, setuid, setgid and sticky bits.
+ m := os.FileMode(fi.h.Mode) &^ 07777
+ if m == c_ISDIR {
+ // directory
+ mode |= os.ModeDir
+ }
+ if m == c_ISFIFO {
+ // named pipe (FIFO)
+ mode |= os.ModeNamedPipe
+ }
+ if m == c_ISLNK {
+ // symbolic link
+ mode |= os.ModeSymlink
+ }
+ if m == c_ISBLK {
+ // device file
+ mode |= os.ModeDevice
+ }
+ if m == c_ISCHR {
+ // Unix character device
+ mode |= os.ModeDevice
+ mode |= os.ModeCharDevice
+ }
+ if m == c_ISSOCK {
+ // Unix domain socket
+ mode |= os.ModeSocket
+ }
+
+ switch fi.h.Typeflag {
+ case TypeLink, TypeSymlink:
+ // hard link, symbolic link
+ mode |= os.ModeSymlink
+ case TypeChar:
+ // character device node
+ mode |= os.ModeDevice
+ mode |= os.ModeCharDevice
+ case TypeBlock:
+ // block device node
+ mode |= os.ModeDevice
+ case TypeDir:
+ // directory
+ mode |= os.ModeDir
+ case TypeFifo:
+ // fifo node
+ mode |= os.ModeNamedPipe
+ }
+
+ return mode
+}
+
+// sysStat, if non-nil, populates h from system-dependent fields of fi.
+var sysStat func(fi os.FileInfo, h *Header) error
+
+// Mode constants from the tar spec.
+const (
+ c_ISUID = 04000 // Set uid
+ c_ISGID = 02000 // Set gid
+ c_ISVTX = 01000 // Save text (sticky bit)
+ c_ISDIR = 040000 // Directory
+ c_ISFIFO = 010000 // FIFO
+ c_ISREG = 0100000 // Regular file
+ c_ISLNK = 0120000 // Symbolic link
+ c_ISBLK = 060000 // Block special file
+ c_ISCHR = 020000 // Character special file
+ c_ISSOCK = 0140000 // Socket
+)
+
+// Keywords for the PAX Extended Header
+const (
+ paxAtime = "atime"
+ paxCharset = "charset"
+ paxComment = "comment"
+ paxCtime = "ctime" // please note that ctime is not a valid pax header.
+ paxGid = "gid"
+ paxGname = "gname"
+ paxLinkpath = "linkpath"
+ paxMtime = "mtime"
+ paxPath = "path"
+ paxSize = "size"
+ paxUid = "uid"
+ paxUname = "uname"
+ paxXattr = "SCHILY.xattr."
+ paxNone = ""
+)
+
+// FileInfoHeader creates a partially-populated Header from fi.
+// If fi describes a symlink, FileInfoHeader records link as the link target.
+// If fi describes a directory, a slash is appended to the name.
+// Because os.FileInfo's Name method returns only the base name of
+// the file it describes, it may be necessary to modify the Name field
+// of the returned header to provide the full path name of the file.
+func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
+ if fi == nil {
+ return nil, errors.New("tar: FileInfo is nil")
+ }
+ fm := fi.Mode()
+ h := &Header{
+ Name: fi.Name(),
+ ModTime: fi.ModTime(),
+ Mode: int64(fm.Perm()), // or'd with c_IS* constants later
+ }
+ switch {
+ case fm.IsRegular():
+ h.Mode |= c_ISREG
+ h.Typeflag = TypeReg
+ h.Size = fi.Size()
+ case fi.IsDir():
+ h.Typeflag = TypeDir
+ h.Mode |= c_ISDIR
+ h.Name += "/"
+ case fm&os.ModeSymlink != 0:
+ h.Typeflag = TypeSymlink
+ h.Mode |= c_ISLNK
+ h.Linkname = link
+ case fm&os.ModeDevice != 0:
+ if fm&os.ModeCharDevice != 0 {
+ h.Mode |= c_ISCHR
+ h.Typeflag = TypeChar
+ } else {
+ h.Mode |= c_ISBLK
+ h.Typeflag = TypeBlock
+ }
+ case fm&os.ModeNamedPipe != 0:
+ h.Typeflag = TypeFifo
+ h.Mode |= c_ISFIFO
+ case fm&os.ModeSocket != 0:
+ h.Mode |= c_ISSOCK
+ default:
+ return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
+ }
+ if fm&os.ModeSetuid != 0 {
+ h.Mode |= c_ISUID
+ }
+ if fm&os.ModeSetgid != 0 {
+ h.Mode |= c_ISGID
+ }
+ if fm&os.ModeSticky != 0 {
+ h.Mode |= c_ISVTX
+ }
+ if sysStat != nil {
+ return h, sysStat(fi, h)
+ }
+ return h, nil
+}
+
+var zeroBlock = make([]byte, blockSize)
+
+// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
+// We compute and return both.
+func checksum(header []byte) (unsigned int64, signed int64) {
+ for i := 0; i < len(header); i++ {
+ if i == 148 {
+ // The chksum field (header[148:156]) is special: it should be treated as space bytes.
+ unsigned += ' ' * 8
+ signed += ' ' * 8
+ i += 7
+ continue
+ }
+ unsigned += int64(header[i])
+ signed += int64(int8(header[i]))
+ }
+ return
+}
+
+type slicer []byte
+
+func (sp *slicer) next(n int) (b []byte) {
+ s := *sp
+ b, *sp = s[0:n], s[n:]
+ return
+}
+
+func isASCII(s string) bool {
+ for _, c := range s {
+ if c >= 0x80 {
+ return false
+ }
+ }
+ return true
+}
+
+func toASCII(s string) string {
+ if isASCII(s) {
+ return s
+ }
+ var buf bytes.Buffer
+ for _, c := range s {
+ if c < 0x80 {
+ buf.WriteByte(byte(c))
+ }
+ }
+ return buf.String()
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go
new file mode 100644
index 00000000000..351eaa0e6cc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go
@@ -0,0 +1,79 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar_test
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "os"
+)
+
+func Example() {
+ // Create a buffer to write our archive to.
+ buf := new(bytes.Buffer)
+
+ // Create a new tar archive.
+ tw := tar.NewWriter(buf)
+
+ // Add some files to the archive.
+ var files = []struct {
+ Name, Body string
+ }{
+ {"readme.txt", "This archive contains some text files."},
+ {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
+ {"todo.txt", "Get animal handling licence."},
+ }
+ for _, file := range files {
+ hdr := &tar.Header{
+ Name: file.Name,
+ Size: int64(len(file.Body)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ log.Fatalln(err)
+ }
+ if _, err := tw.Write([]byte(file.Body)); err != nil {
+ log.Fatalln(err)
+ }
+ }
+ // Make sure to check the error on Close.
+ if err := tw.Close(); err != nil {
+ log.Fatalln(err)
+ }
+
+ // Open the tar archive for reading.
+ r := bytes.NewReader(buf.Bytes())
+ tr := tar.NewReader(r)
+
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ log.Fatalln(err)
+ }
+ fmt.Printf("Contents of %s:\n", hdr.Name)
+ if _, err := io.Copy(os.Stdout, tr); err != nil {
+ log.Fatalln(err)
+ }
+ fmt.Println()
+ }
+
+ // Output:
+ // Contents of readme.txt:
+ // This archive contains some text files.
+ // Contents of gopher.txt:
+ // Gopher names:
+ // George
+ // Geoffrey
+ // Gonzo
+ // Contents of todo.txt:
+ // Get animal handling licence.
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go
new file mode 100644
index 00000000000..a27559d0f04
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go
@@ -0,0 +1,820 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+// TODO(dsymonds):
+// - pax extensions
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ ErrHeader = errors.New("archive/tar: invalid tar header")
+)
+
+const maxNanoSecondIntSize = 9
+
+// A Reader provides sequential access to the contents of a tar archive.
+// A tar archive consists of a sequence of files.
+// The Next method advances to the next file in the archive (including the first),
+// and then it can be treated as an io.Reader to access the file's data.
+type Reader struct {
+ r io.Reader
+ err error
+ pad int64 // amount of padding (ignored) after current file entry
+ curr numBytesReader // reader for current file entry
+ hdrBuff [blockSize]byte // buffer to use in readHeader
+}
+
+// A numBytesReader is an io.Reader with a numBytes method, returning the number
+// of bytes remaining in the underlying encoded data.
+type numBytesReader interface {
+ io.Reader
+ numBytes() int64
+}
+
+// A regFileReader is a numBytesReader for reading file data from a tar archive.
+type regFileReader struct {
+ r io.Reader // underlying reader
+ nb int64 // number of unread bytes for current file entry
+}
+
+// A sparseFileReader is a numBytesReader for reading sparse file data from a tar archive.
+type sparseFileReader struct {
+ rfr *regFileReader // reads the sparse-encoded file data
+ sp []sparseEntry // the sparse map for the file
+ pos int64 // keeps track of file position
+ tot int64 // total size of the file
+}
+
+// Keywords for GNU sparse files in a PAX extended header
+const (
+ paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
+ paxGNUSparseOffset = "GNU.sparse.offset"
+ paxGNUSparseNumBytes = "GNU.sparse.numbytes"
+ paxGNUSparseMap = "GNU.sparse.map"
+ paxGNUSparseName = "GNU.sparse.name"
+ paxGNUSparseMajor = "GNU.sparse.major"
+ paxGNUSparseMinor = "GNU.sparse.minor"
+ paxGNUSparseSize = "GNU.sparse.size"
+ paxGNUSparseRealSize = "GNU.sparse.realsize"
+)
+
+// Keywords for old GNU sparse headers
+const (
+ oldGNUSparseMainHeaderOffset = 386
+ oldGNUSparseMainHeaderIsExtendedOffset = 482
+ oldGNUSparseMainHeaderNumEntries = 4
+ oldGNUSparseExtendedHeaderIsExtendedOffset = 504
+ oldGNUSparseExtendedHeaderNumEntries = 21
+ oldGNUSparseOffsetSize = 12
+ oldGNUSparseNumBytesSize = 12
+)
+
+// NewReader creates a new Reader reading from r.
+func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
+
+// Next advances to the next entry in the tar archive.
+func (tr *Reader) Next() (*Header, error) {
+ var hdr *Header
+ if tr.err == nil {
+ tr.skipUnread()
+ }
+ if tr.err != nil {
+ return hdr, tr.err
+ }
+ hdr = tr.readHeader()
+ if hdr == nil {
+ return hdr, tr.err
+ }
+ // Check for PAX/GNU header.
+ switch hdr.Typeflag {
+ case TypeXHeader:
+ // PAX extended header
+ headers, err := parsePAX(tr)
+ if err != nil {
+ return nil, err
+ }
+ // We actually read the whole file,
+ // but this skips alignment padding
+ tr.skipUnread()
+ hdr = tr.readHeader()
+ mergePAX(hdr, headers)
+
+ // Check for a PAX format sparse file
+ sp, err := tr.checkForGNUSparsePAXHeaders(hdr, headers)
+ if err != nil {
+ tr.err = err
+ return nil, err
+ }
+ if sp != nil {
+ // Current file is a PAX format GNU sparse file.
+ // Set the current file reader to a sparse file reader.
+ tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
+ }
+ return hdr, nil
+ case TypeGNULongName:
+ // We have a GNU long name header. Its contents are the real file name.
+ realname, err := ioutil.ReadAll(tr)
+ if err != nil {
+ return nil, err
+ }
+ hdr, err := tr.Next()
+ hdr.Name = cString(realname)
+ return hdr, err
+ case TypeGNULongLink:
+ // We have a GNU long link header.
+ realname, err := ioutil.ReadAll(tr)
+ if err != nil {
+ return nil, err
+ }
+ hdr, err := tr.Next()
+ hdr.Linkname = cString(realname)
+ return hdr, err
+ }
+ return hdr, tr.err
+}
+
+// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
+// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
+// be treated as a regular file.
+func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
+ var sparseFormat string
+
+ // Check for sparse format indicators
+ major, majorOk := headers[paxGNUSparseMajor]
+ minor, minorOk := headers[paxGNUSparseMinor]
+ sparseName, sparseNameOk := headers[paxGNUSparseName]
+ _, sparseMapOk := headers[paxGNUSparseMap]
+ sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
+ sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
+
+ // Identify which, if any, sparse format applies from which PAX headers are set
+ if majorOk && minorOk {
+ sparseFormat = major + "." + minor
+ } else if sparseNameOk && sparseMapOk {
+ sparseFormat = "0.1"
+ } else if sparseSizeOk {
+ sparseFormat = "0.0"
+ } else {
+ // Not a PAX format GNU sparse file.
+ return nil, nil
+ }
+
+ // Check for unknown sparse format
+ if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
+ return nil, nil
+ }
+
+ // Update hdr from GNU sparse PAX headers
+ if sparseNameOk {
+ hdr.Name = sparseName
+ }
+ if sparseSizeOk {
+ realSize, err := strconv.ParseInt(sparseSize, 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ hdr.Size = realSize
+ } else if sparseRealSizeOk {
+ realSize, err := strconv.ParseInt(sparseRealSize, 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ hdr.Size = realSize
+ }
+
+ // Set up the sparse map, according to the particular sparse format in use
+ var sp []sparseEntry
+ var err error
+ switch sparseFormat {
+ case "0.0", "0.1":
+ sp, err = readGNUSparseMap0x1(headers)
+ case "1.0":
+ sp, err = readGNUSparseMap1x0(tr.curr)
+ }
+ return sp, err
+}
+
+// mergePAX merges well known headers according to PAX standard.
+// In general headers with the same name as those found
+// in the header struct overwrite those found in the header
+// struct with higher precision or longer values. Esp. useful
+// for name and linkname fields.
+func mergePAX(hdr *Header, headers map[string]string) error {
+ for k, v := range headers {
+ switch k {
+ case paxPath:
+ hdr.Name = v
+ case paxLinkpath:
+ hdr.Linkname = v
+ case paxGname:
+ hdr.Gname = v
+ case paxUname:
+ hdr.Uname = v
+ case paxUid:
+ uid, err := strconv.ParseInt(v, 10, 0)
+ if err != nil {
+ return err
+ }
+ hdr.Uid = int(uid)
+ case paxGid:
+ gid, err := strconv.ParseInt(v, 10, 0)
+ if err != nil {
+ return err
+ }
+ hdr.Gid = int(gid)
+ case paxAtime:
+ t, err := parsePAXTime(v)
+ if err != nil {
+ return err
+ }
+ hdr.AccessTime = t
+ case paxMtime:
+ t, err := parsePAXTime(v)
+ if err != nil {
+ return err
+ }
+ hdr.ModTime = t
+ case paxCtime:
+ t, err := parsePAXTime(v)
+ if err != nil {
+ return err
+ }
+ hdr.ChangeTime = t
+ case paxSize:
+ size, err := strconv.ParseInt(v, 10, 0)
+ if err != nil {
+ return err
+ }
+ hdr.Size = int64(size)
+ default:
+ if strings.HasPrefix(k, paxXattr) {
+ if hdr.Xattrs == nil {
+ hdr.Xattrs = make(map[string]string)
+ }
+ hdr.Xattrs[k[len(paxXattr):]] = v
+ }
+ }
+ }
+ return nil
+}
+
+// parsePAXTime takes a string of the form %d.%d as described in
+// the PAX specification.
+func parsePAXTime(t string) (time.Time, error) {
+ buf := []byte(t)
+ pos := bytes.IndexByte(buf, '.')
+ var seconds, nanoseconds int64
+ var err error
+ if pos == -1 {
+ seconds, err = strconv.ParseInt(t, 10, 0)
+ if err != nil {
+ return time.Time{}, err
+ }
+ } else {
+ seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
+ if err != nil {
+ return time.Time{}, err
+ }
+ nano_buf := string(buf[pos+1:])
+ // Pad as needed before converting to a decimal.
+ // For example .030 -> .030000000 -> 30000000 nanoseconds
+ if len(nano_buf) < maxNanoSecondIntSize {
+ // Right pad
+ nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
+ } else if len(nano_buf) > maxNanoSecondIntSize {
+ // Right truncate
+ nano_buf = nano_buf[:maxNanoSecondIntSize]
+ }
+ nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
+ if err != nil {
+ return time.Time{}, err
+ }
+ }
+ ts := time.Unix(seconds, nanoseconds)
+ return ts, nil
+}
+
+// parsePAX parses PAX headers.
+// If an extended header (type 'x') is invalid, ErrHeader is returned
+func parsePAX(r io.Reader) (map[string]string, error) {
+ buf, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ // For GNU PAX sparse format 0.0 support.
+ // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers.
+ var sparseMap bytes.Buffer
+
+ headers := make(map[string]string)
+ // Each record is constructed as
+ // "%d %s=%s\n", length, keyword, value
+ for len(buf) > 0 {
+ // or the header was empty to start with.
+ var sp int
+ // The size field ends at the first space.
+ sp = bytes.IndexByte(buf, ' ')
+ if sp == -1 {
+ return nil, ErrHeader
+ }
+ // Parse the first token as a decimal integer.
+ n, err := strconv.ParseInt(string(buf[:sp]), 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ // Extract everything between the decimal and the n -1 on the
+ // beginning to eat the ' ', -1 on the end to skip the newline.
+ var record []byte
+ record, buf = buf[sp+1:n-1], buf[n:]
+ // The first equals is guaranteed to mark the end of the key.
+ // Everything else is value.
+ eq := bytes.IndexByte(record, '=')
+ if eq == -1 {
+ return nil, ErrHeader
+ }
+ key, value := record[:eq], record[eq+1:]
+
+ keyStr := string(key)
+ if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
+ // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
+ sparseMap.Write(value)
+ sparseMap.Write([]byte{','})
+ } else {
+ // Normal key. Set the value in the headers map.
+ headers[keyStr] = string(value)
+ }
+ }
+ if sparseMap.Len() != 0 {
+ // Add sparse info to headers, chopping off the extra comma
+ sparseMap.Truncate(sparseMap.Len() - 1)
+ headers[paxGNUSparseMap] = sparseMap.String()
+ }
+ return headers, nil
+}
+
+// cString parses bytes as a NUL-terminated C-style string.
+// If a NUL byte is not found then the whole slice is returned as a string.
+func cString(b []byte) string {
+ n := 0
+ for n < len(b) && b[n] != 0 {
+ n++
+ }
+ return string(b[0:n])
+}
+
+func (tr *Reader) octal(b []byte) int64 {
+ // Check for binary format first.
+ if len(b) > 0 && b[0]&0x80 != 0 {
+ var x int64
+ for i, c := range b {
+ if i == 0 {
+ c &= 0x7f // ignore signal bit in first byte
+ }
+ x = x<<8 | int64(c)
+ }
+ return x
+ }
+
+ // Because unused fields are filled with NULs, we need
+ // to skip leading NULs. Fields may also be padded with
+ // spaces or NULs.
+ // So we remove leading and trailing NULs and spaces to
+ // be sure.
+ b = bytes.Trim(b, " \x00")
+
+ if len(b) == 0 {
+ return 0
+ }
+ x, err := strconv.ParseUint(cString(b), 8, 64)
+ if err != nil {
+ tr.err = err
+ }
+ return int64(x)
+}
+
+// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding.
+func (tr *Reader) skipUnread() {
+ nr := tr.numBytes() + tr.pad // number of bytes to skip
+ tr.curr, tr.pad = nil, 0
+ if sr, ok := tr.r.(io.Seeker); ok {
+ if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil {
+ return
+ }
+ }
+ _, tr.err = io.CopyN(ioutil.Discard, tr.r, nr)
+}
+
+func (tr *Reader) verifyChecksum(header []byte) bool {
+ if tr.err != nil {
+ return false
+ }
+
+ given := tr.octal(header[148:156])
+ unsigned, signed := checksum(header)
+ return given == unsigned || given == signed
+}
+
+func (tr *Reader) readHeader() *Header {
+ header := tr.hdrBuff[:]
+ copy(header, zeroBlock)
+
+ if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
+ return nil
+ }
+
+ // Two blocks of zero bytes marks the end of the archive.
+ if bytes.Equal(header, zeroBlock[0:blockSize]) {
+ if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
+ return nil
+ }
+ if bytes.Equal(header, zeroBlock[0:blockSize]) {
+ tr.err = io.EOF
+ } else {
+ tr.err = ErrHeader // zero block and then non-zero block
+ }
+ return nil
+ }
+
+ if !tr.verifyChecksum(header) {
+ tr.err = ErrHeader
+ return nil
+ }
+
+ // Unpack
+ hdr := new(Header)
+ s := slicer(header)
+
+ hdr.Name = cString(s.next(100))
+ hdr.Mode = tr.octal(s.next(8))
+ hdr.Uid = int(tr.octal(s.next(8)))
+ hdr.Gid = int(tr.octal(s.next(8)))
+ hdr.Size = tr.octal(s.next(12))
+ hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0)
+ s.next(8) // chksum
+ hdr.Typeflag = s.next(1)[0]
+ hdr.Linkname = cString(s.next(100))
+
+ // The remainder of the header depends on the value of magic.
+ // The original (v7) version of tar had no explicit magic field,
+ // so its magic bytes, like the rest of the block, are NULs.
+ magic := string(s.next(8)) // contains version field as well.
+ var format string
+ switch {
+ case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988)
+ if string(header[508:512]) == "tar\x00" {
+ format = "star"
+ } else {
+ format = "posix"
+ }
+ case magic == "ustar \x00": // old GNU tar
+ format = "gnu"
+ }
+
+ switch format {
+ case "posix", "gnu", "star":
+ hdr.Uname = cString(s.next(32))
+ hdr.Gname = cString(s.next(32))
+ devmajor := s.next(8)
+ devminor := s.next(8)
+ if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
+ hdr.Devmajor = tr.octal(devmajor)
+ hdr.Devminor = tr.octal(devminor)
+ }
+ var prefix string
+ switch format {
+ case "posix", "gnu":
+ prefix = cString(s.next(155))
+ case "star":
+ prefix = cString(s.next(131))
+ hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0)
+ hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0)
+ }
+ if len(prefix) > 0 {
+ hdr.Name = prefix + "/" + hdr.Name
+ }
+ }
+
+ if tr.err != nil {
+ tr.err = ErrHeader
+ return nil
+ }
+
+ // Maximum value of hdr.Size is 64 GB (12 octal digits),
+ // so there's no risk of int64 overflowing.
+ nb := int64(hdr.Size)
+ tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
+
+ // Set the current file reader.
+ tr.curr = ®FileReader{r: tr.r, nb: nb}
+
+ // Check for old GNU sparse format entry.
+ if hdr.Typeflag == TypeGNUSparse {
+ // Get the real size of the file.
+ hdr.Size = tr.octal(header[483:495])
+
+ // Read the sparse map.
+ sp := tr.readOldGNUSparseMap(header)
+ if tr.err != nil {
+ return nil
+ }
+ // Current file is a GNU sparse file. Update the current file reader.
+ tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
+ }
+
+ return hdr
+}
+
+// A sparseEntry holds a single entry in a sparse file's sparse map.
+// A sparse entry indicates the offset and size in a sparse file of a
+// block of data.
+type sparseEntry struct {
+ offset int64
+ numBytes int64
+}
+
+// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
+// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
+// then one or more extension headers are used to store the rest of the sparse map.
+func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
+ isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
+ spCap := oldGNUSparseMainHeaderNumEntries
+ if isExtended {
+ spCap += oldGNUSparseExtendedHeaderNumEntries
+ }
+ sp := make([]sparseEntry, 0, spCap)
+ s := slicer(header[oldGNUSparseMainHeaderOffset:])
+
+ // Read the four entries from the main tar header
+ for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
+ offset := tr.octal(s.next(oldGNUSparseOffsetSize))
+ numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize))
+ if tr.err != nil {
+ tr.err = ErrHeader
+ return nil
+ }
+ if offset == 0 && numBytes == 0 {
+ break
+ }
+ sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ }
+
+ for isExtended {
+ // There are more entries. Read an extension header and parse its entries.
+ sparseHeader := make([]byte, blockSize)
+ if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil {
+ return nil
+ }
+ isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
+ s = slicer(sparseHeader)
+ for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
+ offset := tr.octal(s.next(oldGNUSparseOffsetSize))
+ numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize))
+ if tr.err != nil {
+ tr.err = ErrHeader
+ return nil
+ }
+ if offset == 0 && numBytes == 0 {
+ break
+ }
+ sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ }
+ }
+ return sp
+}
+
+// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format version 1.0.
+// The sparse map is stored just before the file data and padded out to the nearest block boundary.
+func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
+ buf := make([]byte, 2*blockSize)
+ sparseHeader := buf[:blockSize]
+
+ // readDecimal is a helper function to read a decimal integer from the sparse map
+ // while making sure to read from the file in blocks of size blockSize
+ readDecimal := func() (int64, error) {
+ // Look for newline
+ nl := bytes.IndexByte(sparseHeader, '\n')
+ if nl == -1 {
+ if len(sparseHeader) >= blockSize {
+ // This is an error
+ return 0, ErrHeader
+ }
+ oldLen := len(sparseHeader)
+ newLen := oldLen + blockSize
+ if cap(sparseHeader) < newLen {
+ // There's more header, but we need to make room for the next block
+ copy(buf, sparseHeader)
+ sparseHeader = buf[:newLen]
+ } else {
+ // There's more header, and we can just reslice
+ sparseHeader = sparseHeader[:newLen]
+ }
+
+ // Now that sparseHeader is large enough, read next block
+ if _, err := io.ReadFull(r, sparseHeader[oldLen:newLen]); err != nil {
+ return 0, err
+ }
+
+ // Look for a newline in the new data
+ nl = bytes.IndexByte(sparseHeader[oldLen:newLen], '\n')
+ if nl == -1 {
+ // This is an error
+ return 0, ErrHeader
+ }
+ nl += oldLen // We want the position from the beginning
+ }
+ // Now that we've found a newline, read a number
+ n, err := strconv.ParseInt(string(sparseHeader[:nl]), 10, 0)
+ if err != nil {
+ return 0, ErrHeader
+ }
+
+ // Update sparseHeader to consume this number
+ sparseHeader = sparseHeader[nl+1:]
+ return n, nil
+ }
+
+ // Read the first block
+ if _, err := io.ReadFull(r, sparseHeader); err != nil {
+ return nil, err
+ }
+
+ // The first line contains the number of entries
+ numEntries, err := readDecimal()
+ if err != nil {
+ return nil, err
+ }
+
+ // Read all the entries
+ sp := make([]sparseEntry, 0, numEntries)
+ for i := int64(0); i < numEntries; i++ {
+ // Read the offset
+ offset, err := readDecimal()
+ if err != nil {
+ return nil, err
+ }
+ // Read numBytes
+ numBytes, err := readDecimal()
+ if err != nil {
+ return nil, err
+ }
+
+ sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ }
+
+ return sp, nil
+}
+
+// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format version 0.1.
+// The sparse map is stored in the PAX headers.
+func readGNUSparseMap0x1(headers map[string]string) ([]sparseEntry, error) {
+ // Get number of entries
+ numEntriesStr, ok := headers[paxGNUSparseNumBlocks]
+ if !ok {
+ return nil, ErrHeader
+ }
+ numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+
+ sparseMap := strings.Split(headers[paxGNUSparseMap], ",")
+
+ // There should be two numbers in sparseMap for each entry
+ if int64(len(sparseMap)) != 2*numEntries {
+ return nil, ErrHeader
+ }
+
+ // Loop through the entries in the sparse map
+ sp := make([]sparseEntry, 0, numEntries)
+ for i := int64(0); i < numEntries; i++ {
+ offset, err := strconv.ParseInt(sparseMap[2*i], 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ }
+
+ return sp, nil
+}
+
+// numBytes returns the number of bytes left to read in the current file's entry
+// in the tar archive, or 0 if there is no current file.
+func (tr *Reader) numBytes() int64 {
+ if tr.curr == nil {
+ // No current file, so no bytes
+ return 0
+ }
+ return tr.curr.numBytes()
+}
+
+// Read reads from the current entry in the tar archive.
+// It returns 0, io.EOF when it reaches the end of that entry,
+// until Next is called to advance to the next entry.
+func (tr *Reader) Read(b []byte) (n int, err error) {
+ if tr.curr == nil {
+ return 0, io.EOF
+ }
+ n, err = tr.curr.Read(b)
+ if err != nil && err != io.EOF {
+ tr.err = err
+ }
+ return
+}
+
+func (rfr *regFileReader) Read(b []byte) (n int, err error) {
+ if rfr.nb == 0 {
+ // file consumed
+ return 0, io.EOF
+ }
+ if int64(len(b)) > rfr.nb {
+ b = b[0:rfr.nb]
+ }
+ n, err = rfr.r.Read(b)
+ rfr.nb -= int64(n)
+
+ if err == io.EOF && rfr.nb > 0 {
+ err = io.ErrUnexpectedEOF
+ }
+ return
+}
+
+// numBytes returns the number of bytes left to read in the file's data in the tar archive.
+func (rfr *regFileReader) numBytes() int64 {
+ return rfr.nb
+}
+
+// readHole reads a sparse file hole ending at offset toOffset
+func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int {
+ n64 := toOffset - sfr.pos
+ if n64 > int64(len(b)) {
+ n64 = int64(len(b))
+ }
+ n := int(n64)
+ for i := 0; i < n; i++ {
+ b[i] = 0
+ }
+ sfr.pos += n64
+ return n
+}
+
+// Read reads the sparse file data in expanded form.
+func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
+ if len(sfr.sp) == 0 {
+ // No more data fragments to read from.
+ if sfr.pos < sfr.tot {
+ // We're in the last hole
+ n = sfr.readHole(b, sfr.tot)
+ return
+ }
+ // Otherwise, we're at the end of the file
+ return 0, io.EOF
+ }
+ if sfr.pos < sfr.sp[0].offset {
+ // We're in a hole
+ n = sfr.readHole(b, sfr.sp[0].offset)
+ return
+ }
+
+ // We're not in a hole, so we'll read from the next data fragment
+ posInFragment := sfr.pos - sfr.sp[0].offset
+ bytesLeft := sfr.sp[0].numBytes - posInFragment
+ if int64(len(b)) > bytesLeft {
+ b = b[0:bytesLeft]
+ }
+
+ n, err = sfr.rfr.Read(b)
+ sfr.pos += int64(n)
+
+ if int64(n) == bytesLeft {
+ // We're done with this fragment
+ sfr.sp = sfr.sp[1:]
+ }
+
+ if err == io.EOF && sfr.pos < sfr.tot {
+ // We reached the end of the last fragment's data, but there's a final hole
+ err = nil
+ }
+ return
+}
+
+// numBytes returns the number of bytes left to read in the sparse file's
+// sparse-encoded data in the tar archive.
+func (sfr *sparseFileReader) numBytes() int64 {
+ return sfr.rfr.nb
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go
new file mode 100644
index 00000000000..9601ffe4597
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go
@@ -0,0 +1,743 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+import (
+ "bytes"
+ "crypto/md5"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+type untarTest struct {
+ file string
+ headers []*Header
+ cksums []string
+}
+
+var gnuTarTest = &untarTest{
+ file: "testdata/gnu.tar",
+ headers: []*Header{
+ {
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ ModTime: time.Unix(1244428340, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ {
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ ModTime: time.Unix(1244436044, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ },
+ cksums: []string{
+ "e38b27eaccb4391bdec553a7f3ae6b2f",
+ "c65bd2e50a56a2138bf1716f2fd56fe9",
+ },
+}
+
+var sparseTarTest = &untarTest{
+ file: "testdata/sparse-formats.tar",
+ headers: []*Header{
+ {
+ Name: "sparse-gnu",
+ Mode: 420,
+ Uid: 1000,
+ Gid: 1000,
+ Size: 200,
+ ModTime: time.Unix(1392395740, 0),
+ Typeflag: 0x53,
+ Linkname: "",
+ Uname: "david",
+ Gname: "david",
+ Devmajor: 0,
+ Devminor: 0,
+ },
+ {
+ Name: "sparse-posix-0.0",
+ Mode: 420,
+ Uid: 1000,
+ Gid: 1000,
+ Size: 200,
+ ModTime: time.Unix(1392342187, 0),
+ Typeflag: 0x30,
+ Linkname: "",
+ Uname: "david",
+ Gname: "david",
+ Devmajor: 0,
+ Devminor: 0,
+ },
+ {
+ Name: "sparse-posix-0.1",
+ Mode: 420,
+ Uid: 1000,
+ Gid: 1000,
+ Size: 200,
+ ModTime: time.Unix(1392340456, 0),
+ Typeflag: 0x30,
+ Linkname: "",
+ Uname: "david",
+ Gname: "david",
+ Devmajor: 0,
+ Devminor: 0,
+ },
+ {
+ Name: "sparse-posix-1.0",
+ Mode: 420,
+ Uid: 1000,
+ Gid: 1000,
+ Size: 200,
+ ModTime: time.Unix(1392337404, 0),
+ Typeflag: 0x30,
+ Linkname: "",
+ Uname: "david",
+ Gname: "david",
+ Devmajor: 0,
+ Devminor: 0,
+ },
+ {
+ Name: "end",
+ Mode: 420,
+ Uid: 1000,
+ Gid: 1000,
+ Size: 4,
+ ModTime: time.Unix(1392398319, 0),
+ Typeflag: 0x30,
+ Linkname: "",
+ Uname: "david",
+ Gname: "david",
+ Devmajor: 0,
+ Devminor: 0,
+ },
+ },
+ cksums: []string{
+ "6f53234398c2449fe67c1812d993012f",
+ "6f53234398c2449fe67c1812d993012f",
+ "6f53234398c2449fe67c1812d993012f",
+ "6f53234398c2449fe67c1812d993012f",
+ "b0061974914468de549a2af8ced10316",
+ },
+}
+
+var untarTests = []*untarTest{
+ gnuTarTest,
+ sparseTarTest,
+ {
+ file: "testdata/star.tar",
+ headers: []*Header{
+ {
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ ModTime: time.Unix(1244592783, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ AccessTime: time.Unix(1244592783, 0),
+ ChangeTime: time.Unix(1244592783, 0),
+ },
+ {
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ ModTime: time.Unix(1244592783, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ AccessTime: time.Unix(1244592783, 0),
+ ChangeTime: time.Unix(1244592783, 0),
+ },
+ },
+ },
+ {
+ file: "testdata/v7.tar",
+ headers: []*Header{
+ {
+ Name: "small.txt",
+ Mode: 0444,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ ModTime: time.Unix(1244593104, 0),
+ Typeflag: '\x00',
+ },
+ {
+ Name: "small2.txt",
+ Mode: 0444,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ ModTime: time.Unix(1244593104, 0),
+ Typeflag: '\x00',
+ },
+ },
+ },
+ {
+ file: "testdata/pax.tar",
+ headers: []*Header{
+ {
+ Name: "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
+ Mode: 0664,
+ Uid: 1000,
+ Gid: 1000,
+ Uname: "shane",
+ Gname: "shane",
+ Size: 7,
+ ModTime: time.Unix(1350244992, 23960108),
+ ChangeTime: time.Unix(1350244992, 23960108),
+ AccessTime: time.Unix(1350244992, 23960108),
+ Typeflag: TypeReg,
+ },
+ {
+ Name: "a/b",
+ Mode: 0777,
+ Uid: 1000,
+ Gid: 1000,
+ Uname: "shane",
+ Gname: "shane",
+ Size: 0,
+ ModTime: time.Unix(1350266320, 910238425),
+ ChangeTime: time.Unix(1350266320, 910238425),
+ AccessTime: time.Unix(1350266320, 910238425),
+ Typeflag: TypeSymlink,
+ Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
+ },
+ },
+ },
+ {
+ file: "testdata/nil-uid.tar", // golang.org/issue/5290
+ headers: []*Header{
+ {
+ Name: "P1050238.JPG.log",
+ Mode: 0664,
+ Uid: 0,
+ Gid: 0,
+ Size: 14,
+ ModTime: time.Unix(1365454838, 0),
+ Typeflag: TypeReg,
+ Linkname: "",
+ Uname: "eyefi",
+ Gname: "eyefi",
+ Devmajor: 0,
+ Devminor: 0,
+ },
+ },
+ },
+ {
+ file: "testdata/xattrs.tar",
+ headers: []*Header{
+ {
+ Name: "small.txt",
+ Mode: 0644,
+ Uid: 1000,
+ Gid: 10,
+ Size: 5,
+ ModTime: time.Unix(1386065770, 448252320),
+ Typeflag: '0',
+ Uname: "alex",
+ Gname: "wheel",
+ AccessTime: time.Unix(1389782991, 419875220),
+ ChangeTime: time.Unix(1389782956, 794414986),
+ Xattrs: map[string]string{
+ "user.key": "value",
+ "user.key2": "value2",
+ // Interestingly, selinux encodes the terminating null inside the xattr
+ "security.selinux": "unconfined_u:object_r:default_t:s0\x00",
+ },
+ },
+ {
+ Name: "small2.txt",
+ Mode: 0644,
+ Uid: 1000,
+ Gid: 10,
+ Size: 11,
+ ModTime: time.Unix(1386065770, 449252304),
+ Typeflag: '0',
+ Uname: "alex",
+ Gname: "wheel",
+ AccessTime: time.Unix(1389782991, 419875220),
+ ChangeTime: time.Unix(1386065770, 449252304),
+ Xattrs: map[string]string{
+ "security.selinux": "unconfined_u:object_r:default_t:s0\x00",
+ },
+ },
+ },
+ },
+}
+
+func TestReader(t *testing.T) {
+testLoop:
+ for i, test := range untarTests {
+ f, err := os.Open(test.file)
+ if err != nil {
+ t.Errorf("test %d: Unexpected error: %v", i, err)
+ continue
+ }
+ defer f.Close()
+ tr := NewReader(f)
+ for j, header := range test.headers {
+ hdr, err := tr.Next()
+ if err != nil || hdr == nil {
+ t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
+ f.Close()
+ continue testLoop
+ }
+ if !reflect.DeepEqual(*hdr, *header) {
+ t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
+ i, j, *hdr, *header)
+ }
+ }
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ continue testLoop
+ }
+ if hdr != nil || err != nil {
+ t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, hdr, err)
+ }
+ }
+}
+
+func TestPartialRead(t *testing.T) {
+ f, err := os.Open("testdata/gnu.tar")
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ defer f.Close()
+
+ tr := NewReader(f)
+
+ // Read the first four bytes; Next() should skip the last byte.
+ hdr, err := tr.Next()
+ if err != nil || hdr == nil {
+ t.Fatalf("Didn't get first file: %v", err)
+ }
+ buf := make([]byte, 4)
+ if _, err := io.ReadFull(tr, buf); err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ if expected := []byte("Kilt"); !bytes.Equal(buf, expected) {
+ t.Errorf("Contents = %v, want %v", buf, expected)
+ }
+
+ // Second file
+ hdr, err = tr.Next()
+ if err != nil || hdr == nil {
+ t.Fatalf("Didn't get second file: %v", err)
+ }
+ buf = make([]byte, 6)
+ if _, err := io.ReadFull(tr, buf); err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ if expected := []byte("Google"); !bytes.Equal(buf, expected) {
+ t.Errorf("Contents = %v, want %v", buf, expected)
+ }
+}
+
+func TestIncrementalRead(t *testing.T) {
+ test := gnuTarTest
+ f, err := os.Open(test.file)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ defer f.Close()
+
+ tr := NewReader(f)
+
+ headers := test.headers
+ cksums := test.cksums
+ nread := 0
+
+ // loop over all files
+ for ; ; nread++ {
+ hdr, err := tr.Next()
+ if hdr == nil || err == io.EOF {
+ break
+ }
+
+ // check the header
+ if !reflect.DeepEqual(*hdr, *headers[nread]) {
+ t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
+ *hdr, headers[nread])
+ }
+
+ // read file contents in little chunks EOF,
+ // checksumming all the way
+ h := md5.New()
+ rdbuf := make([]uint8, 8)
+ for {
+ nr, err := tr.Read(rdbuf)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Errorf("Read: unexpected error %v\n", err)
+ break
+ }
+ h.Write(rdbuf[0:nr])
+ }
+ // verify checksum
+ have := fmt.Sprintf("%x", h.Sum(nil))
+ want := cksums[nread]
+ if want != have {
+ t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
+ }
+ }
+ if nread != len(headers) {
+ t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
+ }
+}
+
+func TestNonSeekable(t *testing.T) {
+ test := gnuTarTest
+ f, err := os.Open(test.file)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ defer f.Close()
+
+ type readerOnly struct {
+ io.Reader
+ }
+ tr := NewReader(readerOnly{f})
+ nread := 0
+
+ for ; ; nread++ {
+ _, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ }
+
+ if nread != len(test.headers) {
+ t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(test.headers), nread)
+ }
+}
+
+func TestParsePAXHeader(t *testing.T) {
+ paxTests := [][3]string{
+ {"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths
+ {"a", "a=name", "9 a=name\n"}, // Test case involving multiple acceptable length
+ {"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}}
+ for _, test := range paxTests {
+ key, expected, raw := test[0], test[1], test[2]
+ reader := bytes.NewReader([]byte(raw))
+ headers, err := parsePAX(reader)
+ if err != nil {
+ t.Errorf("Couldn't parse correctly formatted headers: %v", err)
+ continue
+ }
+ if strings.EqualFold(headers[key], expected) {
+ t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected)
+ continue
+ }
+ trailer := make([]byte, 100)
+ n, err := reader.Read(trailer)
+ if err != io.EOF || n != 0 {
+ t.Error("Buffer wasn't consumed")
+ }
+ }
+ badHeader := bytes.NewReader([]byte("3 somelongkey="))
+ if _, err := parsePAX(badHeader); err != ErrHeader {
+ t.Fatal("Unexpected success when parsing bad header")
+ }
+}
+
+func TestParsePAXTime(t *testing.T) {
+ // Some valid PAX time values
+ timestamps := map[string]time.Time{
+ "1350244992.023960108": time.Unix(1350244992, 23960108), // The common case
+ "1350244992.02396010": time.Unix(1350244992, 23960100), // Lower precision value
+ "1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value
+ "1350244992": time.Unix(1350244992, 0), // Low precision value
+ }
+ for input, expected := range timestamps {
+ ts, err := parsePAXTime(input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !ts.Equal(expected) {
+ t.Fatalf("Time parsing failure %s %s", ts, expected)
+ }
+ }
+}
+
+func TestMergePAX(t *testing.T) {
+ hdr := new(Header)
+ // Test a string, integer, and time based value.
+ headers := map[string]string{
+ "path": "a/b/c",
+ "uid": "1000",
+ "mtime": "1350244992.023960108",
+ }
+ err := mergePAX(hdr, headers)
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := &Header{
+ Name: "a/b/c",
+ Uid: 1000,
+ ModTime: time.Unix(1350244992, 23960108),
+ }
+ if !reflect.DeepEqual(hdr, want) {
+ t.Errorf("incorrect merge: got %+v, want %+v", hdr, want)
+ }
+}
+
+func TestSparseEndToEnd(t *testing.T) {
+ test := sparseTarTest
+ f, err := os.Open(test.file)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ defer f.Close()
+
+ tr := NewReader(f)
+
+ headers := test.headers
+ cksums := test.cksums
+ nread := 0
+
+ // loop over all files
+ for ; ; nread++ {
+ hdr, err := tr.Next()
+ if hdr == nil || err == io.EOF {
+ break
+ }
+
+ // check the header
+ if !reflect.DeepEqual(*hdr, *headers[nread]) {
+ t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
+ *hdr, headers[nread])
+ }
+
+ // read and checksum the file data
+ h := md5.New()
+ _, err = io.Copy(h, tr)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ // verify checksum
+ have := fmt.Sprintf("%x", h.Sum(nil))
+ want := cksums[nread]
+ if want != have {
+ t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
+ }
+ }
+ if nread != len(headers) {
+ t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
+ }
+}
+
+type sparseFileReadTest struct {
+ sparseData []byte
+ sparseMap []sparseEntry
+ realSize int64
+ expected []byte
+}
+
+var sparseFileReadTests = []sparseFileReadTest{
+ {
+ sparseData: []byte("abcde"),
+ sparseMap: []sparseEntry{
+ {offset: 0, numBytes: 2},
+ {offset: 5, numBytes: 3},
+ },
+ realSize: 8,
+ expected: []byte("ab\x00\x00\x00cde"),
+ },
+ {
+ sparseData: []byte("abcde"),
+ sparseMap: []sparseEntry{
+ {offset: 0, numBytes: 2},
+ {offset: 5, numBytes: 3},
+ },
+ realSize: 10,
+ expected: []byte("ab\x00\x00\x00cde\x00\x00"),
+ },
+ {
+ sparseData: []byte("abcde"),
+ sparseMap: []sparseEntry{
+ {offset: 1, numBytes: 3},
+ {offset: 6, numBytes: 2},
+ },
+ realSize: 8,
+ expected: []byte("\x00abc\x00\x00de"),
+ },
+ {
+ sparseData: []byte("abcde"),
+ sparseMap: []sparseEntry{
+ {offset: 1, numBytes: 3},
+ {offset: 6, numBytes: 2},
+ },
+ realSize: 10,
+ expected: []byte("\x00abc\x00\x00de\x00\x00"),
+ },
+ {
+ sparseData: []byte(""),
+ sparseMap: nil,
+ realSize: 2,
+ expected: []byte("\x00\x00"),
+ },
+}
+
+func TestSparseFileReader(t *testing.T) {
+ for i, test := range sparseFileReadTests {
+ r := bytes.NewReader(test.sparseData)
+ nb := int64(r.Len())
+ sfr := &sparseFileReader{
+ rfr: ®FileReader{r: r, nb: nb},
+ sp: test.sparseMap,
+ pos: 0,
+ tot: test.realSize,
+ }
+ if sfr.numBytes() != nb {
+ t.Errorf("test %d: Before reading, sfr.numBytes() = %d, want %d", i, sfr.numBytes(), nb)
+ }
+ buf, err := ioutil.ReadAll(sfr)
+ if err != nil {
+ t.Errorf("test %d: Unexpected error: %v", i, err)
+ }
+ if e := test.expected; !bytes.Equal(buf, e) {
+ t.Errorf("test %d: Contents = %v, want %v", i, buf, e)
+ }
+ if sfr.numBytes() != 0 {
+ t.Errorf("test %d: After draining the reader, numBytes() was nonzero", i)
+ }
+ }
+}
+
+func TestSparseIncrementalRead(t *testing.T) {
+ sparseMap := []sparseEntry{{10, 2}}
+ sparseData := []byte("Go")
+ expected := "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Go\x00\x00\x00\x00\x00\x00\x00\x00"
+
+ r := bytes.NewReader(sparseData)
+ nb := int64(r.Len())
+ sfr := &sparseFileReader{
+ rfr: ®FileReader{r: r, nb: nb},
+ sp: sparseMap,
+ pos: 0,
+ tot: int64(len(expected)),
+ }
+
+ // We'll read the data 6 bytes at a time, with a hole of size 10 at
+ // the beginning and one of size 8 at the end.
+ var outputBuf bytes.Buffer
+ buf := make([]byte, 6)
+ for {
+ n, err := sfr.Read(buf)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Errorf("Read: unexpected error %v\n", err)
+ }
+ if n > 0 {
+ _, err := outputBuf.Write(buf[:n])
+ if err != nil {
+ t.Errorf("Write: unexpected error %v\n", err)
+ }
+ }
+ }
+ got := outputBuf.String()
+ if got != expected {
+ t.Errorf("Contents = %v, want %v", got, expected)
+ }
+}
+
+func TestReadGNUSparseMap0x1(t *testing.T) {
+ headers := map[string]string{
+ paxGNUSparseNumBlocks: "4",
+ paxGNUSparseMap: "0,5,10,5,20,5,30,5",
+ }
+ expected := []sparseEntry{
+ {offset: 0, numBytes: 5},
+ {offset: 10, numBytes: 5},
+ {offset: 20, numBytes: 5},
+ {offset: 30, numBytes: 5},
+ }
+
+ sp, err := readGNUSparseMap0x1(headers)
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if !reflect.DeepEqual(sp, expected) {
+ t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected)
+ }
+}
+
+func TestReadGNUSparseMap1x0(t *testing.T) {
+ // This test uses lots of holes so the sparse header takes up more than two blocks
+ numEntries := 100
+ expected := make([]sparseEntry, 0, numEntries)
+ sparseMap := new(bytes.Buffer)
+
+ fmt.Fprintf(sparseMap, "%d\n", numEntries)
+ for i := 0; i < numEntries; i++ {
+ offset := int64(2048 * i)
+ numBytes := int64(1024)
+ expected = append(expected, sparseEntry{offset: offset, numBytes: numBytes})
+ fmt.Fprintf(sparseMap, "%d\n%d\n", offset, numBytes)
+ }
+
+ // Make the header the smallest multiple of blockSize that fits the sparseMap
+ headerBlocks := (sparseMap.Len() + blockSize - 1) / blockSize
+ bufLen := blockSize * headerBlocks
+ buf := make([]byte, bufLen)
+ copy(buf, sparseMap.Bytes())
+
+ // Get an reader to read the sparse map
+ r := bytes.NewReader(buf)
+
+ // Read the sparse map
+ sp, err := readGNUSparseMap1x0(r)
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if !reflect.DeepEqual(sp, expected) {
+ t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected)
+ }
+}
+
+func TestUninitializedRead(t *testing.T) {
+ test := gnuTarTest
+ f, err := os.Open(test.file)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ defer f.Close()
+
+ tr := NewReader(f)
+ _, err = tr.Read([]byte{})
+ if err == nil || err != io.EOF {
+ t.Errorf("Unexpected error: %v, wanted %v", err, io.EOF)
+ }
+
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go
new file mode 100644
index 00000000000..cf9cc79c591
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux dragonfly openbsd solaris
+
+package tar
+
+import (
+ "syscall"
+ "time"
+)
+
+func statAtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Atim.Unix())
+}
+
+func statCtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Ctim.Unix())
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go
new file mode 100644
index 00000000000..6f17dbe3072
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd netbsd
+
+package tar
+
+import (
+ "syscall"
+ "time"
+)
+
+func statAtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Atimespec.Unix())
+}
+
+func statCtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Ctimespec.Unix())
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go
new file mode 100644
index 00000000000..cb843db4cfd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go
@@ -0,0 +1,32 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin dragonfly freebsd openbsd netbsd solaris
+
+package tar
+
+import (
+ "os"
+ "syscall"
+)
+
+func init() {
+ sysStat = statUnix
+}
+
+func statUnix(fi os.FileInfo, h *Header) error {
+ sys, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return nil
+ }
+ h.Uid = int(sys.Uid)
+ h.Gid = int(sys.Gid)
+ // TODO(bradfitz): populate username & group. os/user
+ // doesn't cache LookupId lookups, and lacks group
+ // lookup functions.
+ h.AccessTime = statAtime(sys)
+ h.ChangeTime = statCtime(sys)
+ // TODO(bradfitz): major/minor device numbers?
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go
new file mode 100644
index 00000000000..ed333f3ea4f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go
@@ -0,0 +1,284 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "path"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestFileInfoHeader(t *testing.T) {
+ fi, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ h, err := FileInfoHeader(fi, "")
+ if err != nil {
+ t.Fatalf("FileInfoHeader: %v", err)
+ }
+ if g, e := h.Name, "small.txt"; g != e {
+ t.Errorf("Name = %q; want %q", g, e)
+ }
+ if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
+ t.Errorf("Mode = %#o; want %#o", g, e)
+ }
+ if g, e := h.Size, int64(5); g != e {
+ t.Errorf("Size = %v; want %v", g, e)
+ }
+ if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
+ t.Errorf("ModTime = %v; want %v", g, e)
+ }
+ // FileInfoHeader should error when passing nil FileInfo
+ if _, err := FileInfoHeader(nil, ""); err == nil {
+ t.Fatalf("Expected error when passing nil to FileInfoHeader")
+ }
+}
+
+func TestFileInfoHeaderDir(t *testing.T) {
+ fi, err := os.Stat("testdata")
+ if err != nil {
+ t.Fatal(err)
+ }
+ h, err := FileInfoHeader(fi, "")
+ if err != nil {
+ t.Fatalf("FileInfoHeader: %v", err)
+ }
+ if g, e := h.Name, "testdata/"; g != e {
+ t.Errorf("Name = %q; want %q", g, e)
+ }
+ // Ignoring c_ISGID for golang.org/issue/4867
+ if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
+ t.Errorf("Mode = %#o; want %#o", g, e)
+ }
+ if g, e := h.Size, int64(0); g != e {
+ t.Errorf("Size = %v; want %v", g, e)
+ }
+ if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
+ t.Errorf("ModTime = %v; want %v", g, e)
+ }
+}
+
+func TestFileInfoHeaderSymlink(t *testing.T) {
+ h, err := FileInfoHeader(symlink{}, "some-target")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if g, e := h.Name, "some-symlink"; g != e {
+ t.Errorf("Name = %q; want %q", g, e)
+ }
+ if g, e := h.Linkname, "some-target"; g != e {
+ t.Errorf("Linkname = %q; want %q", g, e)
+ }
+}
+
+type symlink struct{}
+
+func (symlink) Name() string { return "some-symlink" }
+func (symlink) Size() int64 { return 0 }
+func (symlink) Mode() os.FileMode { return os.ModeSymlink }
+func (symlink) ModTime() time.Time { return time.Time{} }
+func (symlink) IsDir() bool { return false }
+func (symlink) Sys() interface{} { return nil }
+
+func TestRoundTrip(t *testing.T) {
+ data := []byte("some file contents")
+
+ var b bytes.Buffer
+ tw := NewWriter(&b)
+ hdr := &Header{
+ Name: "file.txt",
+ Uid: 1 << 21, // too big for 8 octal digits
+ Size: int64(len(data)),
+ ModTime: time.Now(),
+ }
+ // tar only supports second precision.
+ hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatalf("tw.WriteHeader: %v", err)
+ }
+ if _, err := tw.Write(data); err != nil {
+ t.Fatalf("tw.Write: %v", err)
+ }
+ if err := tw.Close(); err != nil {
+ t.Fatalf("tw.Close: %v", err)
+ }
+
+ // Read it back.
+ tr := NewReader(&b)
+ rHdr, err := tr.Next()
+ if err != nil {
+ t.Fatalf("tr.Next: %v", err)
+ }
+ if !reflect.DeepEqual(rHdr, hdr) {
+ t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
+ }
+ rData, err := ioutil.ReadAll(tr)
+ if err != nil {
+ t.Fatalf("Read: %v", err)
+ }
+ if !bytes.Equal(rData, data) {
+ t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
+ }
+}
+
+type headerRoundTripTest struct {
+ h *Header
+ fm os.FileMode
+}
+
+func TestHeaderRoundTrip(t *testing.T) {
+ golden := []headerRoundTripTest{
+ // regular file.
+ {
+ h: &Header{
+ Name: "test.txt",
+ Mode: 0644 | c_ISREG,
+ Size: 12,
+ ModTime: time.Unix(1360600916, 0),
+ Typeflag: TypeReg,
+ },
+ fm: 0644,
+ },
+ // hard link.
+ {
+ h: &Header{
+ Name: "hard.txt",
+ Mode: 0644 | c_ISLNK,
+ Size: 0,
+ ModTime: time.Unix(1360600916, 0),
+ Typeflag: TypeLink,
+ },
+ fm: 0644 | os.ModeSymlink,
+ },
+ // symbolic link.
+ {
+ h: &Header{
+ Name: "link.txt",
+ Mode: 0777 | c_ISLNK,
+ Size: 0,
+ ModTime: time.Unix(1360600852, 0),
+ Typeflag: TypeSymlink,
+ },
+ fm: 0777 | os.ModeSymlink,
+ },
+ // character device node.
+ {
+ h: &Header{
+ Name: "dev/null",
+ Mode: 0666 | c_ISCHR,
+ Size: 0,
+ ModTime: time.Unix(1360578951, 0),
+ Typeflag: TypeChar,
+ },
+ fm: 0666 | os.ModeDevice | os.ModeCharDevice,
+ },
+ // block device node.
+ {
+ h: &Header{
+ Name: "dev/sda",
+ Mode: 0660 | c_ISBLK,
+ Size: 0,
+ ModTime: time.Unix(1360578954, 0),
+ Typeflag: TypeBlock,
+ },
+ fm: 0660 | os.ModeDevice,
+ },
+ // directory.
+ {
+ h: &Header{
+ Name: "dir/",
+ Mode: 0755 | c_ISDIR,
+ Size: 0,
+ ModTime: time.Unix(1360601116, 0),
+ Typeflag: TypeDir,
+ },
+ fm: 0755 | os.ModeDir,
+ },
+ // fifo node.
+ {
+ h: &Header{
+ Name: "dev/initctl",
+ Mode: 0600 | c_ISFIFO,
+ Size: 0,
+ ModTime: time.Unix(1360578949, 0),
+ Typeflag: TypeFifo,
+ },
+ fm: 0600 | os.ModeNamedPipe,
+ },
+ // setuid.
+ {
+ h: &Header{
+ Name: "bin/su",
+ Mode: 0755 | c_ISREG | c_ISUID,
+ Size: 23232,
+ ModTime: time.Unix(1355405093, 0),
+ Typeflag: TypeReg,
+ },
+ fm: 0755 | os.ModeSetuid,
+ },
+ // setguid.
+ {
+ h: &Header{
+ Name: "group.txt",
+ Mode: 0750 | c_ISREG | c_ISGID,
+ Size: 0,
+ ModTime: time.Unix(1360602346, 0),
+ Typeflag: TypeReg,
+ },
+ fm: 0750 | os.ModeSetgid,
+ },
+ // sticky.
+ {
+ h: &Header{
+ Name: "sticky.txt",
+ Mode: 0600 | c_ISREG | c_ISVTX,
+ Size: 7,
+ ModTime: time.Unix(1360602540, 0),
+ Typeflag: TypeReg,
+ },
+ fm: 0600 | os.ModeSticky,
+ },
+ }
+
+ for i, g := range golden {
+ fi := g.h.FileInfo()
+ h2, err := FileInfoHeader(fi, "")
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if strings.Contains(fi.Name(), "/") {
+ t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name())
+ }
+ name := path.Base(g.h.Name)
+ if fi.IsDir() {
+ name += "/"
+ }
+ if got, want := h2.Name, name; got != want {
+ t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
+ }
+ if got, want := h2.Size, g.h.Size; got != want {
+ t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
+ }
+ if got, want := h2.Mode, g.h.Mode; got != want {
+ t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
+ }
+ if got, want := fi.Mode(), g.fm; got != want {
+ t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
+ }
+ if got, want := h2.ModTime, g.h.ModTime; got != want {
+ t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
+ }
+ if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {
+ t.Errorf("i=%d: Sys didn't return original *Header", i)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar
new file mode 100644
index 00000000000..fc899dc8dc2
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar
new file mode 100644
index 00000000000..cc9cfaa33cc
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar
new file mode 100644
index 00000000000..9bc24b6587d
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt
new file mode 100644
index 00000000000..b249bfc518a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt
@@ -0,0 +1 @@
+Kilts
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt
new file mode 100644
index 00000000000..394ee3ecd0e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt
@@ -0,0 +1 @@
+Google.com
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar
new file mode 100644
index 00000000000..8bd4e74d50f
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar
new file mode 100644
index 00000000000..59e2d4e6046
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar
new file mode 100644
index 00000000000..29679d9a305
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar
new file mode 100644
index 00000000000..eb65fc94107
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar
new file mode 100644
index 00000000000..5960ee82478
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar
new file mode 100644
index 00000000000..753e883cebf
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar
new file mode 100644
index 00000000000..e6d816ad077
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar
new file mode 100644
index 00000000000..9701950edd1
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go
new file mode 100644
index 00000000000..dafb2cabf37
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go
@@ -0,0 +1,396 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+// TODO(dsymonds):
+// - catch more errors (no first header, etc.)
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ ErrWriteTooLong = errors.New("archive/tar: write too long")
+ ErrFieldTooLong = errors.New("archive/tar: header field too long")
+ ErrWriteAfterClose = errors.New("archive/tar: write after close")
+ errNameTooLong = errors.New("archive/tar: name too long")
+ errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
+)
+
+// A Writer provides sequential writing of a tar archive in POSIX.1 format.
+// A tar archive consists of a sequence of files.
+// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
+// writing at most hdr.Size bytes in total.
+type Writer struct {
+ w io.Writer
+ err error
+ nb int64 // number of unwritten bytes for current file entry
+ pad int64 // amount of padding to write after current file entry
+ closed bool
+ usedBinary bool // whether the binary numeric field extension was used
+ preferPax bool // use pax header instead of binary numeric header
+ hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
+ paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
+}
+
+// NewWriter creates a new Writer writing to w.
+func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
+
+// Flush finishes writing the current file (optional).
+func (tw *Writer) Flush() error {
+ if tw.nb > 0 {
+ tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
+ return tw.err
+ }
+
+ n := tw.nb + tw.pad
+ for n > 0 && tw.err == nil {
+ nr := n
+ if nr > blockSize {
+ nr = blockSize
+ }
+ var nw int
+ nw, tw.err = tw.w.Write(zeroBlock[0:nr])
+ n -= int64(nw)
+ }
+ tw.nb = 0
+ tw.pad = 0
+ return tw.err
+}
+
+// Write s into b, terminating it with a NUL if there is room.
+// If the value is too long for the field and allowPax is true add a paxheader record instead
+func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
+ needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s)
+ if needsPaxHeader {
+ paxHeaders[paxKeyword] = s
+ return
+ }
+ if len(s) > len(b) {
+ if tw.err == nil {
+ tw.err = ErrFieldTooLong
+ }
+ return
+ }
+ ascii := toASCII(s)
+ copy(b, ascii)
+ if len(ascii) < len(b) {
+ b[len(ascii)] = 0
+ }
+}
+
+// Encode x as an octal ASCII string and write it into b with leading zeros.
+func (tw *Writer) octal(b []byte, x int64) {
+ s := strconv.FormatInt(x, 8)
+ // leading zeros, but leave room for a NUL.
+ for len(s)+1 < len(b) {
+ s = "0" + s
+ }
+ tw.cString(b, s, false, paxNone, nil)
+}
+
+// Write x into b, either as octal or as binary (GNUtar/star extension).
+// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead
+func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
+ // Try octal first.
+ s := strconv.FormatInt(x, 8)
+ if len(s) < len(b) {
+ tw.octal(b, x)
+ return
+ }
+
+ // If it is too long for octal, and pax is preferred, use a pax header
+ if allowPax && tw.preferPax {
+ tw.octal(b, 0)
+ s := strconv.FormatInt(x, 10)
+ paxHeaders[paxKeyword] = s
+ return
+ }
+
+ // Too big: use binary (big-endian).
+ tw.usedBinary = true
+ for i := len(b) - 1; x > 0 && i >= 0; i-- {
+ b[i] = byte(x)
+ x >>= 8
+ }
+ b[0] |= 0x80 // highest bit indicates binary format
+}
+
+var (
+ minTime = time.Unix(0, 0)
+ // There is room for 11 octal digits (33 bits) of mtime.
+ maxTime = minTime.Add((1<<33 - 1) * time.Second)
+)
+
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// WriteHeader calls Flush if it is not the first header.
+// Calling after a Close will return ErrWriteAfterClose.
+func (tw *Writer) WriteHeader(hdr *Header) error {
+ return tw.writeHeader(hdr, true)
+}
+
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// WriteHeader calls Flush if it is not the first header.
+// Calling after a Close will return ErrWriteAfterClose.
+// As this method is called internally by writePax header to allow it to
+// suppress writing the pax header.
+func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
+ if tw.closed {
+ return ErrWriteAfterClose
+ }
+ if tw.err == nil {
+ tw.Flush()
+ }
+ if tw.err != nil {
+ return tw.err
+ }
+
+ // a map to hold pax header records, if any are needed
+ paxHeaders := make(map[string]string)
+
+ // TODO(shanemhansen): we might want to use PAX headers for
+ // subsecond time resolution, but for now let's just capture
+ // too long fields or non ascii characters
+
+ var header []byte
+
+ // We need to select which scratch buffer to use carefully,
+ // since this method is called recursively to write PAX headers.
+ // If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
+ // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
+ // already being used by the non-recursive call, so we must use paxHdrBuff.
+ header = tw.hdrBuff[:]
+ if !allowPax {
+ header = tw.paxHdrBuff[:]
+ }
+ copy(header, zeroBlock)
+ s := slicer(header)
+
+ // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
+ pathHeaderBytes := s.next(fileNameSize)
+
+ tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders)
+
+ // Handle out of range ModTime carefully.
+ var modTime int64
+ if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
+ modTime = hdr.ModTime.Unix()
+ }
+
+ tw.octal(s.next(8), hdr.Mode) // 100:108
+ tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116
+ tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124
+ tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders) // 124:136
+ tw.numeric(s.next(12), modTime, false, paxNone, nil) // 136:148 --- consider using pax for finer granularity
+ s.next(8) // chksum (148:156)
+ s.next(1)[0] = hdr.Typeflag // 156:157
+
+ tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders)
+
+ copy(s.next(8), []byte("ustar\x0000")) // 257:265
+ tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297
+ tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329
+ tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil) // 329:337
+ tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil) // 337:345
+
+ // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
+ prefixHeaderBytes := s.next(155)
+ tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500 prefix
+
+ // Use the GNU magic instead of POSIX magic if we used any GNU extensions.
+ if tw.usedBinary {
+ copy(header[257:265], []byte("ustar \x00"))
+ }
+
+ _, paxPathUsed := paxHeaders[paxPath]
+ // try to use a ustar header when only the name is too long
+ if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
+ suffix := hdr.Name
+ prefix := ""
+ if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) {
+ var err error
+ prefix, suffix, err = tw.splitUSTARLongName(hdr.Name)
+ if err == nil {
+ // ok we can use a ustar long name instead of pax, now correct the fields
+
+ // remove the path field from the pax header. this will suppress the pax header
+ delete(paxHeaders, paxPath)
+
+ // update the path fields
+ tw.cString(pathHeaderBytes, suffix, false, paxNone, nil)
+ tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil)
+
+ // Use the ustar magic if we used ustar long names.
+ if len(prefix) > 0 && !tw.usedBinary {
+ copy(header[257:265], []byte("ustar\x00"))
+ }
+ }
+ }
+ }
+
+ // The chksum field is terminated by a NUL and a space.
+ // This is different from the other octal fields.
+ chksum, _ := checksum(header)
+ tw.octal(header[148:155], chksum)
+ header[155] = ' '
+
+ if tw.err != nil {
+ // problem with header; probably integer too big for a field.
+ return tw.err
+ }
+
+ if allowPax {
+ for k, v := range hdr.Xattrs {
+ paxHeaders[paxXattr+k] = v
+ }
+ }
+
+ if len(paxHeaders) > 0 {
+ if !allowPax {
+ return errInvalidHeader
+ }
+ if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
+ return err
+ }
+ }
+ tw.nb = int64(hdr.Size)
+ tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
+
+ _, tw.err = tw.w.Write(header)
+ return tw.err
+}
+
+// writeUSTARLongName splits a USTAR long name hdr.Name.
+// name must be < 256 characters. errNameTooLong is returned
+// if hdr.Name can't be split. The splitting heuristic
+// is compatible with gnu tar.
+func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) {
+ length := len(name)
+ if length > fileNamePrefixSize+1 {
+ length = fileNamePrefixSize + 1
+ } else if name[length-1] == '/' {
+ length--
+ }
+ i := strings.LastIndex(name[:length], "/")
+ // nlen contains the resulting length in the name field.
+ // plen contains the resulting length in the prefix field.
+ nlen := len(name) - i - 1
+ plen := i
+ if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
+ err = errNameTooLong
+ return
+ }
+ prefix, suffix = name[:i], name[i+1:]
+ return
+}
+
+// writePaxHeader writes an extended pax header to the
+// archive.
+func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
+ // Prepare extended header
+ ext := new(Header)
+ ext.Typeflag = TypeXHeader
+ // Setting ModTime is required for reader parsing to
+ // succeed, and seems harmless enough.
+ ext.ModTime = hdr.ModTime
+ // The spec asks that we namespace our pseudo files
+ // with the current pid.
+ pid := os.Getpid()
+ dir, file := path.Split(hdr.Name)
+ fullName := path.Join(dir,
+ fmt.Sprintf("PaxHeaders.%d", pid), file)
+
+ ascii := toASCII(fullName)
+ if len(ascii) > 100 {
+ ascii = ascii[:100]
+ }
+ ext.Name = ascii
+ // Construct the body
+ var buf bytes.Buffer
+
+ for k, v := range paxHeaders {
+ fmt.Fprint(&buf, paxHeader(k+"="+v))
+ }
+
+ ext.Size = int64(len(buf.Bytes()))
+ if err := tw.writeHeader(ext, false); err != nil {
+ return err
+ }
+ if _, err := tw.Write(buf.Bytes()); err != nil {
+ return err
+ }
+ if err := tw.Flush(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// paxHeader formats a single pax record, prefixing it with the appropriate length
+func paxHeader(msg string) string {
+ const padding = 2 // Extra padding for space and newline
+ size := len(msg) + padding
+ size += len(strconv.Itoa(size))
+ record := fmt.Sprintf("%d %s\n", size, msg)
+ if len(record) != size {
+ // Final adjustment if adding size increased
+ // the number of digits in size
+ size = len(record)
+ record = fmt.Sprintf("%d %s\n", size, msg)
+ }
+ return record
+}
+
+// Write writes to the current entry in the tar archive.
+// Write returns the error ErrWriteTooLong if more than
+// hdr.Size bytes are written after WriteHeader.
+func (tw *Writer) Write(b []byte) (n int, err error) {
+ if tw.closed {
+ err = ErrWriteTooLong
+ return
+ }
+ overwrite := false
+ if int64(len(b)) > tw.nb {
+ b = b[0:tw.nb]
+ overwrite = true
+ }
+ n, err = tw.w.Write(b)
+ tw.nb -= int64(n)
+ if err == nil && overwrite {
+ err = ErrWriteTooLong
+ return
+ }
+ tw.err = err
+ return
+}
+
+// Close closes the tar archive, flushing any unwritten
+// data to the underlying writer.
+func (tw *Writer) Close() error {
+ if tw.err != nil || tw.closed {
+ return tw.err
+ }
+ tw.Flush()
+ tw.closed = true
+ if tw.err != nil {
+ return tw.err
+ }
+
+ // trailer: two zero blocks
+ for i := 0; i < 2; i++ {
+ _, tw.err = tw.w.Write(zeroBlock)
+ if tw.err != nil {
+ break
+ }
+ }
+ return tw.err
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go
new file mode 100644
index 00000000000..5e42e322f9c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go
@@ -0,0 +1,491 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+ "testing/iotest"
+ "time"
+)
+
+type writerTestEntry struct {
+ header *Header
+ contents string
+}
+
+type writerTest struct {
+ file string // filename of expected output
+ entries []*writerTestEntry
+}
+
+var writerTests = []*writerTest{
+ // The writer test file was produced with this command:
+ // tar (GNU tar) 1.26
+ // ln -s small.txt link.txt
+ // tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
+ {
+ file: "testdata/writer.tar",
+ entries: []*writerTestEntry{
+ {
+ header: &Header{
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ ModTime: time.Unix(1246508266, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ contents: "Kilts",
+ },
+ {
+ header: &Header{
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ ModTime: time.Unix(1245217492, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ contents: "Google.com\n",
+ },
+ {
+ header: &Header{
+ Name: "link.txt",
+ Mode: 0777,
+ Uid: 1000,
+ Gid: 1000,
+ Size: 0,
+ ModTime: time.Unix(1314603082, 0),
+ Typeflag: '2',
+ Linkname: "small.txt",
+ Uname: "strings",
+ Gname: "strings",
+ },
+ // no contents
+ },
+ },
+ },
+ // The truncated test file was produced using these commands:
+ // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
+ // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
+ {
+ file: "testdata/writer-big.tar",
+ entries: []*writerTestEntry{
+ {
+ header: &Header{
+ Name: "tmp/16gig.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 16 << 30,
+ ModTime: time.Unix(1254699560, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ // fake contents
+ contents: strings.Repeat("\x00", 4<<10),
+ },
+ },
+ },
+ // The truncated test file was produced using these commands:
+ // dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt
+ // tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar
+ {
+ file: "testdata/writer-big-long.tar",
+ entries: []*writerTestEntry{
+ {
+ header: &Header{
+ Name: strings.Repeat("longname/", 15) + "16gig.txt",
+ Mode: 0644,
+ Uid: 1000,
+ Gid: 1000,
+ Size: 16 << 30,
+ ModTime: time.Unix(1399583047, 0),
+ Typeflag: '0',
+ Uname: "guillaume",
+ Gname: "guillaume",
+ },
+ // fake contents
+ contents: strings.Repeat("\x00", 4<<10),
+ },
+ },
+ },
+ // This file was produced using gnu tar 1.17
+ // gnutar -b 4 --format=ustar (longname/)*15 + file.txt
+ {
+ file: "testdata/ustar.tar",
+ entries: []*writerTestEntry{
+ {
+ header: &Header{
+ Name: strings.Repeat("longname/", 15) + "file.txt",
+ Mode: 0644,
+ Uid: 0765,
+ Gid: 024,
+ Size: 06,
+ ModTime: time.Unix(1360135598, 0),
+ Typeflag: '0',
+ Uname: "shane",
+ Gname: "staff",
+ },
+ contents: "hello\n",
+ },
+ },
+ },
+}
+
+// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
+func bytestr(offset int, b []byte) string {
+ const rowLen = 32
+ s := fmt.Sprintf("%04x ", offset)
+ for _, ch := range b {
+ switch {
+ case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
+ s += fmt.Sprintf(" %c", ch)
+ default:
+ s += fmt.Sprintf(" %02x", ch)
+ }
+ }
+ return s
+}
+
+// Render a pseudo-diff between two blocks of bytes.
+func bytediff(a []byte, b []byte) string {
+ const rowLen = 32
+ s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
+ for offset := 0; len(a)+len(b) > 0; offset += rowLen {
+ na, nb := rowLen, rowLen
+ if na > len(a) {
+ na = len(a)
+ }
+ if nb > len(b) {
+ nb = len(b)
+ }
+ sa := bytestr(offset, a[0:na])
+ sb := bytestr(offset, b[0:nb])
+ if sa != sb {
+ s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
+ }
+ a = a[na:]
+ b = b[nb:]
+ }
+ return s
+}
+
+func TestWriter(t *testing.T) {
+testLoop:
+ for i, test := range writerTests {
+ expected, err := ioutil.ReadFile(test.file)
+ if err != nil {
+ t.Errorf("test %d: Unexpected error: %v", i, err)
+ continue
+ }
+
+ buf := new(bytes.Buffer)
+ tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
+ big := false
+ for j, entry := range test.entries {
+ big = big || entry.header.Size > 1<<10
+ if err := tw.WriteHeader(entry.header); err != nil {
+ t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
+ continue testLoop
+ }
+ if _, err := io.WriteString(tw, entry.contents); err != nil {
+ t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
+ continue testLoop
+ }
+ }
+ // Only interested in Close failures for the small tests.
+ if err := tw.Close(); err != nil && !big {
+ t.Errorf("test %d: Failed closing archive: %v", i, err)
+ continue testLoop
+ }
+
+ actual := buf.Bytes()
+ if !bytes.Equal(expected, actual) {
+ t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
+ i, bytediff(expected, actual))
+ }
+ if testing.Short() { // The second test is expensive.
+ break
+ }
+ }
+}
+
+func TestPax(t *testing.T) {
+ // Create an archive with a large name
+ fileinfo, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ hdr, err := FileInfoHeader(fileinfo, "")
+ if err != nil {
+ t.Fatalf("os.Stat: %v", err)
+ }
+ // Force a PAX long name to be written
+ longName := strings.Repeat("ab", 100)
+ contents := strings.Repeat(" ", int(hdr.Size))
+ hdr.Name = longName
+ var buf bytes.Buffer
+ writer := NewWriter(&buf)
+ if err := writer.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if _, err = writer.Write([]byte(contents)); err != nil {
+ t.Fatal(err)
+ }
+ if err := writer.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // Simple test to make sure PAX extensions are in effect
+ if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
+ t.Fatal("Expected at least one PAX header to be written.")
+ }
+ // Test that we can get a long name back out of the archive.
+ reader := NewReader(&buf)
+ hdr, err = reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hdr.Name != longName {
+ t.Fatal("Couldn't recover long file name")
+ }
+}
+
+func TestPaxSymlink(t *testing.T) {
+ // Create an archive with a large linkname
+ fileinfo, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ hdr, err := FileInfoHeader(fileinfo, "")
+ hdr.Typeflag = TypeSymlink
+ if err != nil {
+ t.Fatalf("os.Stat:1 %v", err)
+ }
+ // Force a PAX long linkname to be written
+ longLinkname := strings.Repeat("1234567890/1234567890", 10)
+ hdr.Linkname = longLinkname
+
+ hdr.Size = 0
+ var buf bytes.Buffer
+ writer := NewWriter(&buf)
+ if err := writer.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if err := writer.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // Simple test to make sure PAX extensions are in effect
+ if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
+ t.Fatal("Expected at least one PAX header to be written.")
+ }
+ // Test that we can get a long name back out of the archive.
+ reader := NewReader(&buf)
+ hdr, err = reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hdr.Linkname != longLinkname {
+ t.Fatal("Couldn't recover long link name")
+ }
+}
+
+func TestPaxNonAscii(t *testing.T) {
+ // Create an archive with non ascii. These should trigger a pax header
+ // because pax headers have a defined utf-8 encoding.
+ fileinfo, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hdr, err := FileInfoHeader(fileinfo, "")
+ if err != nil {
+ t.Fatalf("os.Stat:1 %v", err)
+ }
+
+ // some sample data
+ chineseFilename := "文件名"
+ chineseGroupname := "組"
+ chineseUsername := "用戶名"
+
+ hdr.Name = chineseFilename
+ hdr.Gname = chineseGroupname
+ hdr.Uname = chineseUsername
+
+ contents := strings.Repeat(" ", int(hdr.Size))
+
+ var buf bytes.Buffer
+ writer := NewWriter(&buf)
+ if err := writer.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if _, err = writer.Write([]byte(contents)); err != nil {
+ t.Fatal(err)
+ }
+ if err := writer.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // Simple test to make sure PAX extensions are in effect
+ if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
+ t.Fatal("Expected at least one PAX header to be written.")
+ }
+ // Test that we can get a long name back out of the archive.
+ reader := NewReader(&buf)
+ hdr, err = reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hdr.Name != chineseFilename {
+ t.Fatal("Couldn't recover unicode name")
+ }
+ if hdr.Gname != chineseGroupname {
+ t.Fatal("Couldn't recover unicode group")
+ }
+ if hdr.Uname != chineseUsername {
+ t.Fatal("Couldn't recover unicode user")
+ }
+}
+
+func TestPaxXattrs(t *testing.T) {
+ xattrs := map[string]string{
+ "user.key": "value",
+ }
+
+ // Create an archive with an xattr
+ fileinfo, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ hdr, err := FileInfoHeader(fileinfo, "")
+ if err != nil {
+ t.Fatalf("os.Stat: %v", err)
+ }
+ contents := "Kilts"
+ hdr.Xattrs = xattrs
+ var buf bytes.Buffer
+ writer := NewWriter(&buf)
+ if err := writer.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if _, err = writer.Write([]byte(contents)); err != nil {
+ t.Fatal(err)
+ }
+ if err := writer.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // Test that we can get the xattrs back out of the archive.
+ reader := NewReader(&buf)
+ hdr, err = reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
+ t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
+ hdr.Xattrs, xattrs)
+ }
+}
+
+func TestPAXHeader(t *testing.T) {
+ medName := strings.Repeat("CD", 50)
+ longName := strings.Repeat("AB", 100)
+ paxTests := [][2]string{
+ {paxPath + "=/etc/hosts", "19 path=/etc/hosts\n"},
+ {"a=b", "6 a=b\n"}, // Single digit length
+ {"a=names", "11 a=names\n"}, // Test case involving carries
+ {paxPath + "=" + longName, fmt.Sprintf("210 path=%s\n", longName)},
+ {paxPath + "=" + medName, fmt.Sprintf("110 path=%s\n", medName)}}
+
+ for _, test := range paxTests {
+ key, expected := test[0], test[1]
+ if result := paxHeader(key); result != expected {
+ t.Fatalf("paxHeader: got %s, expected %s", result, expected)
+ }
+ }
+}
+
+func TestUSTARLongName(t *testing.T) {
+ // Create an archive with a path that failed to split with USTAR extension in previous versions.
+ fileinfo, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ hdr, err := FileInfoHeader(fileinfo, "")
+ hdr.Typeflag = TypeDir
+ if err != nil {
+ t.Fatalf("os.Stat:1 %v", err)
+ }
+ // Force a PAX long name to be written. The name was taken from a practical example
+ // that fails and replaced ever char through numbers to anonymize the sample.
+ longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
+ hdr.Name = longName
+
+ hdr.Size = 0
+ var buf bytes.Buffer
+ writer := NewWriter(&buf)
+ if err := writer.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if err := writer.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // Test that we can get a long name back out of the archive.
+ reader := NewReader(&buf)
+ hdr, err = reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hdr.Name != longName {
+ t.Fatal("Couldn't recover long name")
+ }
+}
+
+func TestValidTypeflagWithPAXHeader(t *testing.T) {
+ var buffer bytes.Buffer
+ tw := NewWriter(&buffer)
+
+ fileName := strings.Repeat("ab", 100)
+
+ hdr := &Header{
+ Name: fileName,
+ Size: 4,
+ Typeflag: 0,
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatalf("Failed to write header: %s", err)
+ }
+ if _, err := tw.Write([]byte("fooo")); err != nil {
+ t.Fatalf("Failed to write the file's data: %s", err)
+ }
+ tw.Close()
+
+ tr := NewReader(&buffer)
+
+ for {
+ header, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatalf("Failed to read header: %s", err)
+ }
+ if header.Typeflag != 0 {
+ t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag)
+ }
+ }
+}