diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index ef2bce4bde4..8fd17e6388f 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -19,6 +19,11 @@ "Comment": "null-12", "Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9" }, + { + "ImportPath": "code.google.com/p/go.exp/inotify", + "Comment": "null-75", + "Rev": "bd8df7009305d6ada223ea3c95b94c0f38bfa119" + }, { "ImportPath": "code.google.com/p/go.net/spdy", "Comment": "null-240", @@ -58,6 +63,11 @@ "Comment": "v0.2.0-rc1-120-g23142f6", "Rev": "23142f6773a676cc2cae8dd0cb90b2ea761c853f" }, + { + "ImportPath": "github.com/coreos/go-systemd/dbus", + "Comment": "v2-27-g97e243d", + "Rev": "97e243d21a8e232e9d8af38ba2366dfcfceebeba" + }, { "ImportPath": "github.com/cpuguy83/go-md2man/mangen", "Comment": "v1.0.2-5-g2831f11", @@ -82,6 +92,11 @@ "Comment": "v1.4.1-656-g2115131", "Rev": "211513156dc1ace48e630b4bf4ea0fcfdc8d9abf" }, + { + "ImportPath": "github.com/docker/docker/pkg/mount", + "Comment": "v1.4.1-656-g2115131", + "Rev": "211513156dc1ace48e630b4bf4ea0fcfdc8d9abf" + }, { "ImportPath": "github.com/docker/docker/pkg/pools", "Comment": "v1.4.1-656-g2115131", @@ -92,6 +107,11 @@ "Comment": "v1.4.1-656-g2115131", "Rev": "211513156dc1ace48e630b4bf4ea0fcfdc8d9abf" }, + { + "ImportPath": "github.com/docker/docker/pkg/symlink", + "Comment": "v1.4.1-656-g2115131", + "Rev": "211513156dc1ace48e630b4bf4ea0fcfdc8d9abf" + }, { "ImportPath": "github.com/docker/docker/pkg/system", "Comment": "v1.4.1-656-g2115131", @@ -112,6 +132,11 @@ "Comment": "v1.4.1-656-g2115131", "Rev": "211513156dc1ace48e630b4bf4ea0fcfdc8d9abf" }, + { + "ImportPath": "github.com/docker/libcontainer", + "Comment": "v1.4.0-52-gd7dea0e", + "Rev": "d7dea0e925315bab640115053204c16718839b1e" + }, { "ImportPath": "github.com/docker/spdystream", "Rev": "e731c8f9f19ffd7e51a469a2de1580c1dfbb4fae" @@ -137,6 +162,11 @@ "ImportPath": "github.com/ghodss/yaml", "Rev": "588cb435e59ee8b6c2795482887755841ad67207" }, + { + "ImportPath": "github.com/godbus/dbus", + "Comment": "0-7-g939230d", + "Rev": "939230d2086a4f1870e04c52e0a376c25bae0ec4" + }, { "ImportPath": "github.com/golang/glog", "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" @@ -151,13 +181,58 @@ }, { "ImportPath": "github.com/google/cadvisor/client", - "Comment": "0.10.1-30-gb5e2f37", - "Rev": "b5e2f3788e4a39a0836c5490e6bf31832400c1f3" + "Comment": "0.10.1-36-g62a1788", + "Rev": "62a1788621f4adee2dbf08c26060ed7fb8c0297d" + }, + { + "ImportPath": "github.com/google/cadvisor/container", + "Comment": "0.10.1-36-g62a1788", + "Rev": "62a1788621f4adee2dbf08c26060ed7fb8c0297d" + }, + { + "ImportPath": "github.com/google/cadvisor/events", + "Comment": "0.10.1-36-g62a1788", + "Rev": "62a1788621f4adee2dbf08c26060ed7fb8c0297d" + }, + { + "ImportPath": "github.com/google/cadvisor/fs", + "Comment": "0.10.1-36-g62a1788", + "Rev": "62a1788621f4adee2dbf08c26060ed7fb8c0297d" }, { "ImportPath": "github.com/google/cadvisor/info/v1", - "Comment": "0.10.1-30-gb5e2f37", - "Rev": "b5e2f3788e4a39a0836c5490e6bf31832400c1f3" + "Comment": "0.10.1-36-g62a1788", + "Rev": "62a1788621f4adee2dbf08c26060ed7fb8c0297d" + }, + { + "ImportPath": "github.com/google/cadvisor/info/v2", + "Comment": "0.10.1-36-g62a1788", + "Rev": "62a1788621f4adee2dbf08c26060ed7fb8c0297d" + }, + { + "ImportPath": "github.com/google/cadvisor/manager", + "Comment": "0.10.1-36-g62a1788", + "Rev": "62a1788621f4adee2dbf08c26060ed7fb8c0297d" + }, + { + "ImportPath": "github.com/google/cadvisor/storage", + "Comment": "0.10.1-36-g62a1788", + "Rev": "62a1788621f4adee2dbf08c26060ed7fb8c0297d" + }, + { + "ImportPath": "github.com/google/cadvisor/summary", + "Comment": "0.10.1-36-g62a1788", + "Rev": "62a1788621f4adee2dbf08c26060ed7fb8c0297d" + }, + { + "ImportPath": "github.com/google/cadvisor/utils", + "Comment": "0.10.1-36-g62a1788", + "Rev": "62a1788621f4adee2dbf08c26060ed7fb8c0297d" + }, + { + "ImportPath": "github.com/google/cadvisor/version", + "Comment": "0.10.1-36-g62a1788", + "Rev": "62a1788621f4adee2dbf08c26060ed7fb8c0297d" }, { "ImportPath": "github.com/google/gofuzz", diff --git a/Godeps/_workspace/src/code.google.com/p/go.exp/inotify/inotify_linux.go b/Godeps/_workspace/src/code.google.com/p/go.exp/inotify/inotify_linux.go new file mode 100644 index 00000000000..f671f47a130 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.exp/inotify/inotify_linux.go @@ -0,0 +1,300 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package inotify implements a wrapper for the Linux inotify system. + +Example: + watcher, err := inotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + err = watcher.Watch("/tmp") + if err != nil { + log.Fatal(err) + } + for { + select { + case ev := <-watcher.Event: + log.Println("event:", ev) + case err := <-watcher.Error: + log.Println("error:", err) + } + } + +*/ +package inotify + +import ( + "errors" + "fmt" + "os" + "strings" + "sync" + "syscall" + "unsafe" +) + +type Event struct { + Mask uint32 // Mask of events + Cookie uint32 // Unique cookie associating related events (for rename(2)) + Name string // File name (optional) +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +type Watcher struct { + mu sync.Mutex + fd int // File descriptor (as returned by the inotify_init() syscall) + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + Error chan error // Errors are sent on this channel + Event chan *Event // Events are returned on this channel + done chan bool // Channel for sending a "quit message" to the reader goroutine + isClosed bool // Set to true when Close() is first called +} + +// NewWatcher creates and returns a new inotify instance using inotify_init(2) +func NewWatcher() (*Watcher, error) { + fd, errno := syscall.InotifyInit() + if fd == -1 { + return nil, os.NewSyscallError("inotify_init", errno) + } + w := &Watcher{ + fd: fd, + watches: make(map[string]*watch), + paths: make(map[int]string), + Event: make(chan *Event), + Error: make(chan error), + done: make(chan bool, 1), + } + + go w.readEvents() + return w, nil +} + +// Close closes an inotify watcher instance +// It sends a message to the reader goroutine to quit and removes all watches +// associated with the inotify instance +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + w.done <- true + for path := range w.watches { + w.RemoveWatch(path) + } + + return nil +} + +// AddWatch adds path to the watched file set. +// The flags are interpreted as described in inotify_add_watch(2). +func (w *Watcher) AddWatch(path string, flags uint32) error { + if w.isClosed { + return errors.New("inotify instance already closed") + } + + watchEntry, found := w.watches[path] + if found { + watchEntry.flags |= flags + flags |= syscall.IN_MASK_ADD + } + + w.mu.Lock() // synchronize with readEvents goroutine + + wd, err := syscall.InotifyAddWatch(w.fd, path, flags) + if err != nil { + w.mu.Unlock() + return &os.PathError{ + Op: "inotify_add_watch", + Path: path, + Err: err, + } + } + + if !found { + w.watches[path] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = path + } + w.mu.Unlock() + return nil +} + +// Watch adds path to the watched file set, watching all events. +func (w *Watcher) Watch(path string) error { + return w.AddWatch(path, IN_ALL_EVENTS) +} + +// RemoveWatch removes path from the watched file set. +func (w *Watcher) RemoveWatch(path string) error { + watch, ok := w.watches[path] + if !ok { + return errors.New(fmt.Sprintf("can't remove non-existent inotify watch for: %s", path)) + } + success, errno := syscall.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + return os.NewSyscallError("inotify_rm_watch", errno) + } + delete(w.watches, path) + return nil +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Event channel +func (w *Watcher) readEvents() { + var buf [syscall.SizeofInotifyEvent * 4096]byte + + for { + n, err := syscall.Read(w.fd, buf[:]) + // See if there is a message on the "done" channel + var done bool + select { + case done = <-w.done: + default: + } + + // If EOF or a "done" message is received + if n == 0 || done { + // The syscall.Close can be slow. Close + // w.Event first. + close(w.Event) + err := syscall.Close(w.fd) + if err != nil { + w.Error <- os.NewSyscallError("close", err) + } + close(w.Error) + return + } + if n < 0 { + w.Error <- os.NewSyscallError("read", err) + continue + } + if n < syscall.SizeofInotifyEvent { + w.Error <- errors.New("inotify: short read in readEvents()") + continue + } + + var offset uint32 = 0 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-syscall.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset])) + event := new(Event) + event.Mask = uint32(raw.Mask) + event.Cookie = uint32(raw.Cookie) + nameLen := uint32(raw.Len) + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + event.Name = w.paths[int(raw.Wd)] + w.mu.Unlock() + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent])) + // The filename is padded with NUL bytes. TrimRight() gets rid of those. + event.Name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + // Send the event on the events channel + w.Event <- event + + // Move to the next event in the buffer + offset += syscall.SizeofInotifyEvent + nameLen + } + } +} + +// String formats the event e in the form +// "filename: 0xEventMask = IN_ACCESS|IN_ATTRIB_|..." +func (e *Event) String() string { + var events string = "" + + m := e.Mask + for _, b := range eventBits { + if m&b.Value != 0 { + m &^= b.Value + events += "|" + b.Name + } + } + + if m != 0 { + events += fmt.Sprintf("|%#x", m) + } + if len(events) > 0 { + events = " == " + events[1:] + } + + return fmt.Sprintf("%q: %#x%s", e.Name, e.Mask, events) +} + +const ( + // Options for inotify_init() are not exported + // IN_CLOEXEC uint32 = syscall.IN_CLOEXEC + // IN_NONBLOCK uint32 = syscall.IN_NONBLOCK + + // Options for AddWatch + IN_DONT_FOLLOW uint32 = syscall.IN_DONT_FOLLOW + IN_ONESHOT uint32 = syscall.IN_ONESHOT + IN_ONLYDIR uint32 = syscall.IN_ONLYDIR + + // The "IN_MASK_ADD" option is not exported, as AddWatch + // adds it automatically, if there is already a watch for the given path + // IN_MASK_ADD uint32 = syscall.IN_MASK_ADD + + // Events + IN_ACCESS uint32 = syscall.IN_ACCESS + IN_ALL_EVENTS uint32 = syscall.IN_ALL_EVENTS + IN_ATTRIB uint32 = syscall.IN_ATTRIB + IN_CLOSE uint32 = syscall.IN_CLOSE + IN_CLOSE_NOWRITE uint32 = syscall.IN_CLOSE_NOWRITE + IN_CLOSE_WRITE uint32 = syscall.IN_CLOSE_WRITE + IN_CREATE uint32 = syscall.IN_CREATE + IN_DELETE uint32 = syscall.IN_DELETE + IN_DELETE_SELF uint32 = syscall.IN_DELETE_SELF + IN_MODIFY uint32 = syscall.IN_MODIFY + IN_MOVE uint32 = syscall.IN_MOVE + IN_MOVED_FROM uint32 = syscall.IN_MOVED_FROM + IN_MOVED_TO uint32 = syscall.IN_MOVED_TO + IN_MOVE_SELF uint32 = syscall.IN_MOVE_SELF + IN_OPEN uint32 = syscall.IN_OPEN + + // Special events + IN_ISDIR uint32 = syscall.IN_ISDIR + IN_IGNORED uint32 = syscall.IN_IGNORED + IN_Q_OVERFLOW uint32 = syscall.IN_Q_OVERFLOW + IN_UNMOUNT uint32 = syscall.IN_UNMOUNT +) + +var eventBits = []struct { + Value uint32 + Name string +}{ + {IN_ACCESS, "IN_ACCESS"}, + {IN_ATTRIB, "IN_ATTRIB"}, + {IN_CLOSE, "IN_CLOSE"}, + {IN_CLOSE_NOWRITE, "IN_CLOSE_NOWRITE"}, + {IN_CLOSE_WRITE, "IN_CLOSE_WRITE"}, + {IN_CREATE, "IN_CREATE"}, + {IN_DELETE, "IN_DELETE"}, + {IN_DELETE_SELF, "IN_DELETE_SELF"}, + {IN_MODIFY, "IN_MODIFY"}, + {IN_MOVE, "IN_MOVE"}, + {IN_MOVED_FROM, "IN_MOVED_FROM"}, + {IN_MOVED_TO, "IN_MOVED_TO"}, + {IN_MOVE_SELF, "IN_MOVE_SELF"}, + {IN_OPEN, "IN_OPEN"}, + {IN_ISDIR, "IN_ISDIR"}, + {IN_IGNORED, "IN_IGNORED"}, + {IN_Q_OVERFLOW, "IN_Q_OVERFLOW"}, + {IN_UNMOUNT, "IN_UNMOUNT"}, +} diff --git a/Godeps/_workspace/src/code.google.com/p/go.exp/inotify/inotify_linux_test.go b/Godeps/_workspace/src/code.google.com/p/go.exp/inotify/inotify_linux_test.go new file mode 100644 index 00000000000..1685b772ec1 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.exp/inotify/inotify_linux_test.go @@ -0,0 +1,107 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package inotify + +import ( + "io/ioutil" + "os" + "sync/atomic" + "testing" + "time" +) + +func TestInotifyEvents(t *testing.T) { + // Create an inotify watcher instance and initialize it + watcher, err := NewWatcher() + if err != nil { + t.Fatalf("NewWatcher failed: %s", err) + } + + dir, err := ioutil.TempDir("", "inotify") + if err != nil { + t.Fatalf("TempDir failed: %s", err) + } + defer os.RemoveAll(dir) + + // Add a watch for "_test" + err = watcher.Watch(dir) + if err != nil { + t.Fatalf("Watch failed: %s", err) + } + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Error { + t.Fatalf("error received: %s", err) + } + }() + + testFile := dir + "/TestInotifyEvents.testfile" + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Event + var eventsReceived int32 = 0 + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == testFile { + atomic.AddInt32(&eventsReceived, 1) + t.Logf("event received: %s", event) + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the inotify event queue + _, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file: %s", err) + } + + // We expect this event to be received almost immediately, but let's wait 1 s to be sure + time.Sleep(1 * time.Second) + if atomic.AddInt32(&eventsReceived, 0) == 0 { + t.Fatal("inotify event hasn't been received after 1 second") + } + + // Try closing the inotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(1 * time.Second): + t.Fatal("event stream was not closed after 1 second") + } +} + +func TestInotifyClose(t *testing.T) { + watcher, _ := NewWatcher() + watcher.Close() + + done := make(chan bool) + go func() { + watcher.Close() + done <- true + }() + + select { + case <-done: + case <-time.After(50 * time.Millisecond): + t.Fatal("double Close() test failed: second Close() call didn't return") + } + + err := watcher.Watch(os.TempDir()) + if err == nil { + t.Fatal("expected error on Watch() after Close(), got nil") + } +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus.go new file mode 100644 index 00000000000..9bdc80c725c --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus.go @@ -0,0 +1,128 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ +package dbus + +import ( + "fmt" + "os" + "strconv" + "strings" + "sync" + + "github.com/godbus/dbus" +) + +const ( + alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` + num = `0123456789` + alphanum = alpha + num + signalBuffer = 100 +) + +// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped +func needsEscape(i int, b byte) bool { + // Escape everything that is not a-z-A-Z-0-9 + // Also escape 0-9 if it's the first character + return strings.IndexByte(alphanum, b) == -1 || + (i == 0 && strings.IndexByte(num, b) != -1) +} + +// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the +// rules that systemd uses for serializing special characters. +func PathBusEscape(path string) string { + // Special case the empty string + if len(path) == 0 { + return "_" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if needsEscape(i, c) { + e := fmt.Sprintf("_%x", c) + n = append(n, []byte(e)...) + } else { + n = append(n, c) + } + } + return string(n) +} + +// Conn is a connection to systemd's dbus endpoint. +type Conn struct { + sysconn *dbus.Conn + sysobj *dbus.Object + jobListener struct { + jobs map[dbus.ObjectPath]chan string + sync.Mutex + } + subscriber struct { + updateCh chan<- *SubStateUpdate + errCh chan<- error + sync.Mutex + ignore map[dbus.ObjectPath]int64 + cleanIgnore int64 + } + dispatch map[string]func(dbus.Signal) +} + +// New() establishes a connection to the system bus and authenticates. +func New() (*Conn, error) { + c := new(Conn) + + if err := c.initConnection(); err != nil { + return nil, err + } + + c.initJobs() + return c, nil +} + +func (c *Conn) initConnection() error { + var err error + c.sysconn, err = dbus.SystemBusPrivate() + if err != nil { + return err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = c.sysconn.Auth(methods) + if err != nil { + c.sysconn.Close() + return err + } + + err = c.sysconn.Hello() + if err != nil { + c.sysconn.Close() + return err + } + + c.sysobj = c.sysconn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) + + // Setup the listeners on jobs so that we can get completions + c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") + c.initSubscription() + c.initDispatch() + + return nil +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus_test.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus_test.go new file mode 100644 index 00000000000..4cfbe8296c7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus_test.go @@ -0,0 +1,79 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbus + +import ( + "testing" +) + +func TestNeedsEscape(t *testing.T) { + // Anything not 0-9a-zA-Z should always be escaped + for want, vals := range map[bool][]byte{ + false: []byte{'a', 'b', 'z', 'A', 'Q', '1', '4', '9'}, + true: []byte{'#', '%', '$', '!', '.', '_', '-', '%', '\\'}, + } { + for i := 1; i < 10; i++ { + for _, b := range vals { + got := needsEscape(i, b) + if got != want { + t.Errorf("needsEscape(%d, %c) returned %t, want %t", i, b, got, want) + } + } + } + } + + // 0-9 in position 0 should be escaped + for want, vals := range map[bool][]byte{ + false: []byte{'A', 'a', 'e', 'x', 'Q', 'Z'}, + true: []byte{'0', '4', '5', '9'}, + } { + for _, b := range vals { + got := needsEscape(0, b) + if got != want { + t.Errorf("needsEscape(0, %c) returned %t, want %t", b, got, want) + } + } + } + +} + +func TestPathBusEscape(t *testing.T) { + for in, want := range map[string]string{ + "": "_", + "foo.service": "foo_2eservice", + "foobar": "foobar", + "woof@woof.service": "woof_40woof_2eservice", + "0123456": "_30123456", + "account_db.service": "account_5fdb_2eservice", + "got-dashes": "got_2ddashes", + } { + got := PathBusEscape(in) + if got != want { + t.Errorf("bad result for PathBusEscape(%s): got %q, want %q", in, got, want) + } + } + +} + +// TestNew ensures that New() works without errors. +func TestNew(t *testing.T) { + _, err := New() + + if err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods.go new file mode 100644 index 00000000000..3d9bff7a864 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods.go @@ -0,0 +1,406 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbus + +import ( + "errors" + + "github.com/godbus/dbus" +) + +func (c *Conn) initJobs() { + c.jobListener.jobs = make(map[dbus.ObjectPath]chan string) +} + +func (c *Conn) jobComplete(signal *dbus.Signal) { + var id uint32 + var job dbus.ObjectPath + var unit string + var result string + dbus.Store(signal.Body, &id, &job, &unit, &result) + c.jobListener.Lock() + out, ok := c.jobListener.jobs[job] + if ok { + out <- result + delete(c.jobListener.jobs, job) + } + c.jobListener.Unlock() +} + +func (c *Conn) startJob(job string, args ...interface{}) (<-chan string, error) { + c.jobListener.Lock() + defer c.jobListener.Unlock() + + ch := make(chan string, 1) + var path dbus.ObjectPath + err := c.sysobj.Call(job, 0, args...).Store(&path) + if err != nil { + return nil, err + } + c.jobListener.jobs[path] = ch + return ch, nil +} + +func (c *Conn) runJob(job string, args ...interface{}) (string, error) { + respCh, err := c.startJob(job, args...) + if err != nil { + return "", err + } + return <-respCh, nil +} + +// StartUnit enqueues a start job and depending jobs, if any (unless otherwise +// specified by the mode string). +// +// Takes the unit to activate, plus a mode string. The mode needs to be one of +// replace, fail, isolate, ignore-dependencies, ignore-requirements. If +// "replace" the call will start the unit and its dependencies, possibly +// replacing already queued jobs that conflict with this. If "fail" the call +// will start the unit and its dependencies, but will fail if this would change +// an already queued job. If "isolate" the call will start the unit in question +// and terminate all units that aren't dependencies of it. If +// "ignore-dependencies" it will start a unit but ignore all its dependencies. +// If "ignore-requirements" it will start a unit but only ignore the +// requirement dependencies. It is not recommended to make use of the latter +// two options. +// +// Result string: one of done, canceled, timeout, failed, dependency, skipped. +// done indicates successful execution of a job. canceled indicates that a job +// has been canceled before it finished execution. timeout indicates that the +// job timeout was reached. failed indicates that the job failed. dependency +// indicates that a job this job has been depending on failed and the job hence +// has been removed too. skipped indicates that a job was skipped because it +// didn't apply to the units current state. +func (c *Conn) StartUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.StartUnit", name, mode) +} + +// StopUnit is similar to StartUnit but stops the specified unit rather +// than starting it. +func (c *Conn) StopUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.StopUnit", name, mode) +} + +// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. +func (c *Conn) ReloadUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +} + +// RestartUnit restarts a service. If a service is restarted that isn't +// running it will be started. +func (c *Conn) RestartUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +} + +// TryRestartUnit is like RestartUnit, except that a service that isn't running +// is not affected by the restart. +func (c *Conn) TryRestartUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) +} + +// ReloadOrRestart attempts a reload if the unit supports it and use a restart +// otherwise. +func (c *Conn) ReloadOrRestartUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +} + +// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try" +// flavored restart otherwise. +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) +} + +// StartTransientUnit() may be used to create and start a transient unit, which +// will be released as soon as it is not running or referenced anymore or the +// system is rebooted. name is the unit name including suffix, and must be +// unique. mode is the same as in StartUnit(), properties contains properties +// of the unit. +func (c *Conn) StartTransientUnit(name string, mode string, properties ...Property) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +} + +// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's +// processes are killed. +func (c *Conn) KillUnit(name string, signal int32) { + c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() +} + +// ResetFailedUnit resets the "failed" state of a specific unit. +func (c *Conn) ResetFailedUnit(name string) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() +} + +// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface +func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) { + var err error + var props map[string]dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) + if err != nil { + return nil, err + } + + out := make(map[string]interface{}, len(props)) + for k, v := range props { + out[k] = v.Value() + } + + return out, nil +} + +// GetUnitProperties takes the unit name and returns all of its dbus object properties. +func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { + return c.getProperties(unit, "org.freedesktop.systemd1.Unit") +} + +func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) { + var err error + var prop dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: propertyName, Value: prop}, nil +} + +func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName) +} + +// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope +// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit +func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { + return c.getProperties(unit, "org.freedesktop.systemd1."+unitType) +} + +// SetUnitProperties() may be used to modify certain unit properties at runtime. +// Not all properties may be changed at runtime, but many resource management +// settings (primarily those in systemd.cgroup(5)) may. The changes are applied +// instantly, and stored on disk for future boots, unless runtime is true, in which +// case the settings only apply until the next reboot. name is the name of the unit +// to modify. properties are the settings to set, encoded as an array of property +// name and value pairs. +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() +} + +func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName) +} + +// ListUnits returns an array with all currently loaded units. Note that +// units may be known by multiple names at the same time, and hence there might +// be more unit names loaded than actual units behind them. +func (c *Conn) ListUnits() ([]UnitStatus, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]UnitStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + err = dbus.Store(resultInterface, statusInterface...) + if err != nil { + return nil, err + } + + return status, nil +} + +type UnitStatus struct { + Name string // The primary unit name as string + Description string // The human readable description string + LoadState string // The load state (i.e. whether the unit file has been loaded successfully) + ActiveState string // The active state (i.e. whether the unit is currently started or not) + SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) + Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. + Path dbus.ObjectPath // The unit object path + JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise + JobType string // The job type as string + JobPath dbus.ObjectPath // The job object path +} + +type LinkUnitFileChange EnableUnitFileChange + +// LinkUnitFiles() links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +// EnableUnitFiles() may be used to enable one or more units in the system (by +// creating symlinks to them in /etc or /run). +// +// It takes a list of unit files to enable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and two booleans: the first controls whether the unit shall +// be enabled for runtime only (true, /run), or persistently (false, /etc). +// The second one controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns one boolean and an array with the changes made. The +// boolean signals whether the unit files contained any enablement +// information (i.e. an [Install]) section. The changes list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + var carries_install_info bool + + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) + if err != nil { + return false, nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]EnableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return false, nil, err + } + + return carries_install_info, changes, nil +} + +type EnableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// DisableUnitFiles() may be used to disable one or more units in the system (by +// removing symlinks to them from /etc or /run). +// +// It takes a list of unit files to disable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and one boolean: whether the unit was enabled for runtime +// only (true, /run), or persistently (false, /etc). +// +// This call returns an array with the changes made. The changes list +// consists of structures with three strings: the type of the change (one of +// symlink or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]DisableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type DisableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Reload instructs systemd to scan for and reload unit files. This is +// equivalent to a 'systemctl daemon-reload'. +func (c *Conn) Reload() error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() +} + +func unitPath(name string) dbus.ObjectPath { + return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods_test.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods_test.go new file mode 100644 index 00000000000..8c7ab93eb36 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods_test.go @@ -0,0 +1,332 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbus + +import ( + "fmt" + "math/rand" + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/godbus/dbus" +) + +func setupConn(t *testing.T) *Conn { + conn, err := New() + if err != nil { + t.Fatal(err) + } + + return conn +} + +func findFixture(target string, t *testing.T) string { + abs, err := filepath.Abs("../fixtures/" + target) + if err != nil { + t.Fatal(err) + } + return abs +} + +func setupUnit(target string, conn *Conn, t *testing.T) { + // Blindly stop the unit in case it is running + conn.StopUnit(target, "replace") + + // Blindly remove the symlink in case it exists + targetRun := filepath.Join("/run/systemd/system/", target) + os.Remove(targetRun) +} + +func linkUnit(target string, conn *Conn, t *testing.T) { + abs := findFixture(target, t) + fixture := []string{abs} + + changes, err := conn.LinkUnitFiles(fixture, true, true) + if err != nil { + t.Fatal(err) + } + + if len(changes) < 1 { + t.Fatalf("Expected one change, got %v", changes) + } + + runPath := filepath.Join("/run/systemd/system/", target) + if changes[0].Filename != runPath { + t.Fatal("Unexpected target filename") + } +} + +// Ensure that basic unit starting and stopping works. +func TestStartStopUnit(t *testing.T) { + target := "start-stop.service" + conn := setupConn(t) + + setupUnit(target, conn, t) + linkUnit(target, conn, t) + + // 2. Start the unit + job, err := conn.StartUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + if job != "done" { + t.Fatal("Job is not done:", job) + } + + units, err := conn.ListUnits() + + var unit *UnitStatus + for _, u := range units { + if u.Name == target { + unit = &u + } + } + + if unit == nil { + t.Fatalf("Test unit not found in list") + } + + if unit.ActiveState != "active" { + t.Fatalf("Test unit not active") + } + + // 3. Stop the unit + job, err = conn.StopUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + units, err = conn.ListUnits() + + unit = nil + for _, u := range units { + if u.Name == target { + unit = &u + } + } + + if unit != nil { + t.Fatalf("Test unit found in list, should be stopped") + } +} + +// Enables a unit and then immediately tears it down +func TestEnableDisableUnit(t *testing.T) { + target := "enable-disable.service" + conn := setupConn(t) + + setupUnit(target, conn, t) + abs := findFixture(target, t) + runPath := filepath.Join("/run/systemd/system/", target) + + // 1. Enable the unit + install, changes, err := conn.EnableUnitFiles([]string{abs}, true, true) + if err != nil { + t.Fatal(err) + } + + if install != false { + t.Fatal("Install was true") + } + + if len(changes) < 1 { + t.Fatalf("Expected one change, got %v", changes) + } + + if changes[0].Filename != runPath { + t.Fatal("Unexpected target filename") + } + + // 2. Disable the unit + dChanges, err := conn.DisableUnitFiles([]string{abs}, true) + if err != nil { + t.Fatal(err) + } + + if len(dChanges) != 1 { + t.Fatalf("Changes should include the path, %v", dChanges) + } + if dChanges[0].Filename != runPath { + t.Fatalf("Change should include correct filename, %+v", dChanges[0]) + } + if dChanges[0].Destination != "" { + t.Fatalf("Change destination should be empty, %+v", dChanges[0]) + } +} + +// TestGetUnitProperties reads the `-.mount` which should exist on all systemd +// systems and ensures that one of its properties is valid. +func TestGetUnitProperties(t *testing.T) { + conn := setupConn(t) + + unit := "-.mount" + + info, err := conn.GetUnitProperties(unit) + if err != nil { + t.Fatal(err) + } + + names := info["Wants"].([]string) + + if len(names) < 1 { + t.Fatal("/ is unwanted") + } + + if names[0] != "system.slice" { + t.Fatal("unexpected wants for /") + } + + prop, err := conn.GetUnitProperty(unit, "Wants") + if err != nil { + t.Fatal(err) + } + + if prop.Name != "Wants" { + t.Fatal("unexpected property name") + } + + val := prop.Value.Value().([]string) + if !reflect.DeepEqual(val, names) { + t.Fatal("unexpected property value") + } +} + +// TestGetUnitPropertiesRejectsInvalidName attempts to get the properties for a +// unit with an invalid name. This test should be run with --test.timeout set, +// as a fail will manifest as GetUnitProperties hanging indefinitely. +func TestGetUnitPropertiesRejectsInvalidName(t *testing.T) { + conn := setupConn(t) + + unit := "//invalid#$^/" + + _, err := conn.GetUnitProperties(unit) + if err == nil { + t.Fatal("Expected an error, got nil") + } + + _, err = conn.GetUnitProperty(unit, "Wants") + if err == nil { + t.Fatal("Expected an error, got nil") + } +} + +// TestSetUnitProperties changes a cgroup setting on the `tmp.mount` +// which should exist on all systemd systems and ensures that the +// property was set. +func TestSetUnitProperties(t *testing.T) { + conn := setupConn(t) + + unit := "tmp.mount" + + if err := conn.SetUnitProperties(unit, true, Property{"CPUShares", dbus.MakeVariant(uint64(1023))}); err != nil { + t.Fatal(err) + } + + info, err := conn.GetUnitTypeProperties(unit, "Mount") + if err != nil { + t.Fatal(err) + } + + value := info["CPUShares"].(uint64) + if value != 1023 { + t.Fatal("CPUShares of unit is not 1023:", value) + } +} + +// Ensure that basic transient unit starting and stopping works. +func TestStartStopTransientUnit(t *testing.T) { + conn := setupConn(t) + + props := []Property{ + PropExecStart([]string{"/bin/sleep", "400"}, false), + } + target := fmt.Sprintf("testing-transient-%d.service", rand.Int()) + + // Start the unit + job, err := conn.StartTransientUnit(target, "replace", props...) + if err != nil { + t.Fatal(err) + } + + if job != "done" { + t.Fatal("Job is not done:", job) + } + + units, err := conn.ListUnits() + + var unit *UnitStatus + for _, u := range units { + if u.Name == target { + unit = &u + } + } + + if unit == nil { + t.Fatalf("Test unit not found in list") + } + + if unit.ActiveState != "active" { + t.Fatalf("Test unit not active") + } + + // 3. Stop the unit + job, err = conn.StopUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + units, err = conn.ListUnits() + + unit = nil + for _, u := range units { + if u.Name == target { + unit = &u + } + } + + if unit != nil { + t.Fatalf("Test unit found in list, should be stopped") + } +} + +func TestConnJobListener(t *testing.T) { + target := "start-stop.service" + conn := setupConn(t) + + setupUnit(target, conn, t) + linkUnit(target, conn, t) + + jobSize := len(conn.jobListener.jobs) + + _, err := conn.StartUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + _, err = conn.StopUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + currentJobSize := len(conn.jobListener.jobs) + if jobSize != currentJobSize { + t.Fatal("JobListener jobs leaked") + } +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/properties.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/properties.go new file mode 100644 index 00000000000..a06ccda761d --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/properties.go @@ -0,0 +1,220 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbus + +import ( + "github.com/godbus/dbus" +) + +// From the systemd docs: +// +// The properties array of StartTransientUnit() may take many of the settings +// that may also be configured in unit files. Not all parameters are currently +// accepted though, but we plan to cover more properties with future release. +// Currently you may set the Description, Slice and all dependency types of +// units, as well as RemainAfterExit, ExecStart for service units, +// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, +// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, +// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, +// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map +// directly to their counterparts in unit files and as normal D-Bus object +// properties. The exception here is the PIDs field of scope units which is +// used for construction of the scope only and specifies the initial PIDs to +// add to the scope object. + +type Property struct { + Name string + Value dbus.Variant +} + +type PropertyCollection struct { + Name string + Properties []Property +} + +type execStart struct { + Path string // the binary path to execute + Args []string // an array with all arguments to pass to the executed command, starting with argument 0 + UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly +} + +// PropExecStart sets the ExecStart service property. The first argument is a +// slice with the binary path to execute followed by the arguments to pass to +// the executed command. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= +func PropExecStart(command []string, uncleanIsFailure bool) Property { + execStarts := []execStart{ + execStart{ + Path: command[0], + Args: command, + UncleanIsFailure: uncleanIsFailure, + }, + } + + return Property{ + Name: "ExecStart", + Value: dbus.MakeVariant(execStarts), + } +} + +// PropRemainAfterExit sets the RemainAfterExit service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= +func PropRemainAfterExit(b bool) Property { + return Property{ + Name: "RemainAfterExit", + Value: dbus.MakeVariant(b), + } +} + +// PropDescription sets the Description unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= +func PropDescription(desc string) Property { + return Property{ + Name: "Description", + Value: dbus.MakeVariant(desc), + } +} + +func propDependency(name string, units []string) Property { + return Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +// PropRequires sets the Requires unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= +func PropRequires(units ...string) Property { + return propDependency("Requires", units) +} + +// PropRequiresOverridable sets the RequiresOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= +func PropRequiresOverridable(units ...string) Property { + return propDependency("RequiresOverridable", units) +} + +// PropRequisite sets the Requisite unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= +func PropRequisite(units ...string) Property { + return propDependency("Requisite", units) +} + +// PropRequisiteOverridable sets the RequisiteOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= +func PropRequisiteOverridable(units ...string) Property { + return propDependency("RequisiteOverridable", units) +} + +// PropWants sets the Wants unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= +func PropWants(units ...string) Property { + return propDependency("Wants", units) +} + +// PropBindsTo sets the BindsTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= +func PropBindsTo(units ...string) Property { + return propDependency("BindsTo", units) +} + +// PropRequiredBy sets the RequiredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= +func PropRequiredBy(units ...string) Property { + return propDependency("RequiredBy", units) +} + +// PropRequiredByOverridable sets the RequiredByOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= +func PropRequiredByOverridable(units ...string) Property { + return propDependency("RequiredByOverridable", units) +} + +// PropWantedBy sets the WantedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= +func PropWantedBy(units ...string) Property { + return propDependency("WantedBy", units) +} + +// PropBoundBy sets the BoundBy unit property. See +// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= +func PropBoundBy(units ...string) Property { + return propDependency("BoundBy", units) +} + +// PropConflicts sets the Conflicts unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= +func PropConflicts(units ...string) Property { + return propDependency("Conflicts", units) +} + +// PropConflictedBy sets the ConflictedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= +func PropConflictedBy(units ...string) Property { + return propDependency("ConflictedBy", units) +} + +// PropBefore sets the Before unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= +func PropBefore(units ...string) Property { + return propDependency("Before", units) +} + +// PropAfter sets the After unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= +func PropAfter(units ...string) Property { + return propDependency("After", units) +} + +// PropOnFailure sets the OnFailure unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= +func PropOnFailure(units ...string) Property { + return propDependency("OnFailure", units) +} + +// PropTriggers sets the Triggers unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= +func PropTriggers(units ...string) Property { + return propDependency("Triggers", units) +} + +// PropTriggeredBy sets the TriggeredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= +func PropTriggeredBy(units ...string) Property { + return propDependency("TriggeredBy", units) +} + +// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= +func PropPropagatesReloadTo(units ...string) Property { + return propDependency("PropagatesReloadTo", units) +} + +// PropRequiresMountsFor sets the RequiresMountsFor unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= +func PropRequiresMountsFor(units ...string) Property { + return propDependency("RequiresMountsFor", units) +} + +// PropSlice sets the Slice unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= +func PropSlice(slice string) Property { + return Property{ + Name: "Slice", + Value: dbus.MakeVariant(slice), + } +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set.go new file mode 100644 index 00000000000..45ad1fb3991 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set.go @@ -0,0 +1,33 @@ +package dbus + +type set struct { + data map[string]bool +} + +func (s *set) Add(value string) { + s.data[value] = true +} + +func (s *set) Remove(value string) { + delete(s.data, value) +} + +func (s *set) Contains(value string) (exists bool) { + _, exists = s.data[value] + return +} + +func (s *set) Length() (int) { + return len(s.data) +} + +func (s *set) Values() (values []string) { + for val, _ := range s.data { + values = append(values, val) + } + return +} + +func newSet() (*set) { + return &set{make(map[string] bool)} +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set_test.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set_test.go new file mode 100644 index 00000000000..c4435f8800c --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set_test.go @@ -0,0 +1,39 @@ +package dbus + +import ( + "testing" +) + +// TestBasicSetActions asserts that Add & Remove behavior is correct +func TestBasicSetActions(t *testing.T) { + s := newSet() + + if s.Contains("foo") { + t.Fatal("set should not contain 'foo'") + } + + s.Add("foo") + + if !s.Contains("foo") { + t.Fatal("set should contain 'foo'") + } + + v := s.Values() + if len(v) != 1 { + t.Fatal("set.Values did not report correct number of values") + } + if v[0] != "foo" { + t.Fatal("set.Values did not report value") + } + + s.Remove("foo") + + if s.Contains("foo") { + t.Fatal("set should not contain 'foo'") + } + + v = s.Values() + if len(v) != 0 { + t.Fatal("set.Values did not report correct number of values") + } +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription.go new file mode 100644 index 00000000000..fcd29b6e8fa --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription.go @@ -0,0 +1,251 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbus + +import ( + "errors" + "time" + + "github.com/godbus/dbus" +) + +const ( + cleanIgnoreInterval = int64(10 * time.Second) + ignoreInterval = int64(30 * time.Millisecond) +) + +// Subscribe sets up this connection to subscribe to all systemd dbus events. +// This is required before calling SubscribeUnits. When the connection closes +// systemd will automatically stop sending signals so there is no need to +// explicitly call Unsubscribe(). +func (c *Conn) Subscribe() error { + c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") + c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") + + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() + if err != nil { + return err + } + + return nil +} + +// Unsubscribe this connection from systemd dbus events. +func (c *Conn) Unsubscribe() error { + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() + if err != nil { + return err + } + + return nil +} + +func (c *Conn) initSubscription() { + c.subscriber.ignore = make(map[dbus.ObjectPath]int64) +} + +func (c *Conn) initDispatch() { + ch := make(chan *dbus.Signal, signalBuffer) + + c.sysconn.Signal(ch) + + go func() { + for { + signal, ok := <-ch + if !ok { + return + } + + switch signal.Name { + case "org.freedesktop.systemd1.Manager.JobRemoved": + c.jobComplete(signal) + + unitName := signal.Body[2].(string) + var unitPath dbus.ObjectPath + c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) + if unitPath != dbus.ObjectPath("") { + c.sendSubStateUpdate(unitPath) + } + case "org.freedesktop.systemd1.Manager.UnitNew": + c.sendSubStateUpdate(signal.Body[1].(dbus.ObjectPath)) + case "org.freedesktop.DBus.Properties.PropertiesChanged": + if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { + // we only care about SubState updates, which are a Unit property + c.sendSubStateUpdate(signal.Path) + } + } + } + }() +} + +// Returns two unbuffered channels which will receive all changed units every +// interval. Deleted units are sent as nil. +func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { + return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) +} + +// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer +// size of the channels, the comparison function for detecting changes and a filter +// function for cutting down on the noise that your channel receives. +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func (string) bool) (<-chan map[string]*UnitStatus, <-chan error) { + old := make(map[string]*UnitStatus) + statusChan := make(chan map[string]*UnitStatus, buffer) + errChan := make(chan error, buffer) + + go func() { + for { + timerChan := time.After(interval) + + units, err := c.ListUnits() + if err == nil { + cur := make(map[string]*UnitStatus) + for i := range units { + if filterUnit != nil && filterUnit(units[i].Name) { + continue + } + cur[units[i].Name] = &units[i] + } + + // add all new or changed units + changed := make(map[string]*UnitStatus) + for n, u := range cur { + if oldU, ok := old[n]; !ok || isChanged(oldU, u) { + changed[n] = u + } + delete(old, n) + } + + // add all deleted units + for oldN := range old { + changed[oldN] = nil + } + + old = cur + + if len(changed) != 0 { + statusChan <- changed + } + } else { + errChan <- err + } + + <-timerChan + } + }() + + return statusChan, errChan +} + +type SubStateUpdate struct { + UnitName string + SubState string +} + +// SetSubStateSubscriber writes to updateCh when any unit's substate changes. +// Although this writes to updateCh on every state change, the reported state +// may be more recent than the change that generated it (due to an unavoidable +// race in the systemd dbus interface). That is, this method provides a good +// way to keep a current view of all units' states, but is not guaranteed to +// show every state transition they go through. Furthermore, state changes +// will only be written to the channel with non-blocking writes. If updateCh +// is full, it attempts to write an error to errCh; if errCh is full, the error +// passes silently. +func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { + c.subscriber.Lock() + defer c.subscriber.Unlock() + c.subscriber.updateCh = updateCh + c.subscriber.errCh = errCh +} + +func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) { + c.subscriber.Lock() + defer c.subscriber.Unlock() + if c.subscriber.updateCh == nil { + return + } + + if c.shouldIgnore(path) { + return + } + + info, err := c.GetUnitProperties(string(path)) + if err != nil { + select { + case c.subscriber.errCh <- err: + default: + } + } + + name := info["Id"].(string) + substate := info["SubState"].(string) + + update := &SubStateUpdate{name, substate} + select { + case c.subscriber.updateCh <- update: + default: + select { + case c.subscriber.errCh <- errors.New("update channel full!"): + default: + } + } + + c.updateIgnore(path, info) +} + +// The ignore functions work around a wart in the systemd dbus interface. +// Requesting the properties of an unloaded unit will cause systemd to send a +// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's +// properties on UnitNew (as that's the only indication of a new unit coming up +// for the first time), we would enter an infinite loop if we did not attempt +// to detect and ignore these spurious signals. The signal themselves are +// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an +// unloaded unit's signals for a short time after requesting its properties. +// This means that we will miss e.g. a transient unit being restarted +// *immediately* upon failure and also a transient unit being started +// immediately after requesting its status (with systemctl status, for example, +// because this causes a UnitNew signal to be sent which then causes us to fetch +// the properties). + +func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { + t, ok := c.subscriber.ignore[path] + return ok && t >= time.Now().UnixNano() +} + +func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { + c.cleanIgnore() + + // unit is unloaded - it will trigger bad systemd dbus behavior + if info["LoadState"].(string) == "not-found" { + c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval + } +} + +// without this, ignore would grow unboundedly over time +func (c *Conn) cleanIgnore() { + now := time.Now().UnixNano() + if c.subscriber.cleanIgnore < now { + c.subscriber.cleanIgnore = now + cleanIgnoreInterval + + for p, t := range c.subscriber.ignore { + if t < now { + delete(c.subscriber.ignore, p) + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set.go new file mode 100644 index 00000000000..296e6ddaff6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set.go @@ -0,0 +1,43 @@ +package dbus + +import ( + "time" +) + +// SubscriptionSet returns a subscription set which is like conn.Subscribe but +// can filter to only return events for a set of units. +type SubscriptionSet struct { + *set + conn *Conn +} + +func (s *SubscriptionSet) filter(unit string) bool { + return !s.Contains(unit) +} + +// Subscribe starts listening for dbus events for all of the units in the set. +// Returns channels identical to conn.SubscribeUnits. +func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { + // TODO: Make fully evented by using systemd 209 with properties changed values + return s.conn.SubscribeUnitsCustom(time.Second, 0, + mismatchUnitStatus, + func(unit string) bool { return s.filter(unit) }, + ) +} + +// NewSubscriptionSet returns a new subscription set. +func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { + return &SubscriptionSet{newSet(), conn} +} + +// mismatchUnitStatus returns true if the provided UnitStatus objects +// are not equivalent. false is returned if the objects are equivalent. +// Only the Name, Description and state-related fields are used in +// the comparison. +func mismatchUnitStatus(u1, u2 *UnitStatus) bool { + return u1.Name != u2.Name || + u1.Description != u2.Description || + u1.LoadState != u2.LoadState || + u1.ActiveState != u2.ActiveState || + u1.SubState != u2.SubState +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go new file mode 100644 index 00000000000..4ecd15376d3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go @@ -0,0 +1,66 @@ +package dbus + +import ( + "testing" + "time" +) + +// TestSubscribeUnit exercises the basics of subscription of a particular unit. +func TestSubscriptionSetUnit(t *testing.T) { + target := "subscribe-events-set.service" + + conn, err := New() + + if err != nil { + t.Fatal(err) + } + + err = conn.Subscribe() + if err != nil { + t.Fatal(err) + } + + subSet := conn.NewSubscriptionSet() + evChan, errChan := subSet.Subscribe() + + subSet.Add(target) + setupUnit(target, conn, t) + linkUnit(target, conn, t) + + job, err := conn.StartUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + if job != "done" { + t.Fatal("Couldn't start", target) + } + + timeout := make(chan bool, 1) + go func() { + time.Sleep(3 * time.Second) + close(timeout) + }() + + for { + select { + case changes := <-evChan: + tCh, ok := changes[target] + + if !ok { + t.Fatal("Unexpected event:", changes) + } + + if tCh.ActiveState == "active" && tCh.Name == target { + goto success + } + case err = <-errChan: + t.Fatal(err) + case <-timeout: + t.Fatal("Reached timeout") + } + } + +success: + return +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_test.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_test.go new file mode 100644 index 00000000000..f2b5dfc28c1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_test.go @@ -0,0 +1,91 @@ +package dbus + +import ( + "testing" + "time" +) + +// TestSubscribe exercises the basics of subscription +func TestSubscribe(t *testing.T) { + conn, err := New() + + if err != nil { + t.Fatal(err) + } + + err = conn.Subscribe() + if err != nil { + t.Fatal(err) + } + + err = conn.Unsubscribe() + if err != nil { + t.Fatal(err) + } +} + +// TestSubscribeUnit exercises the basics of subscription of a particular unit. +func TestSubscribeUnit(t *testing.T) { + target := "subscribe-events.service" + + conn, err := New() + + if err != nil { + t.Fatal(err) + } + + err = conn.Subscribe() + if err != nil { + t.Fatal(err) + } + + err = conn.Unsubscribe() + if err != nil { + t.Fatal(err) + } + + evChan, errChan := conn.SubscribeUnits(time.Second) + + setupUnit(target, conn, t) + linkUnit(target, conn, t) + + job, err := conn.StartUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + if job != "done" { + t.Fatal("Couldn't start", target) + } + + timeout := make(chan bool, 1) + go func() { + time.Sleep(3 * time.Second) + close(timeout) + }() + + for { + select { + case changes := <-evChan: + tCh, ok := changes[target] + + // Just continue until we see our event. + if !ok { + continue + } + + if tCh.ActiveState == "active" && tCh.Name == target { + goto success + } + case err = <-errChan: + t.Fatal(err) + case <-timeout: + t.Fatal("Reached timeout") + } + } + +success: + return +} + + diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/MAINTAINERS new file mode 100644 index 00000000000..1e998f8ac1a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/MAINTAINERS @@ -0,0 +1 @@ +Michael Crosby (@crosbymichael) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go new file mode 100644 index 00000000000..17dbd7a64cf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go @@ -0,0 +1,69 @@ +package mount + +import ( + "strings" +) + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + flags := map[string]struct { + clear bool + flag int + }{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, + } + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go new file mode 100644 index 00000000000..a59b58960bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go @@ -0,0 +1,35 @@ +// +build freebsd,cgo + +package mount + +/* +#include +*/ +import "C" + +const ( + RDONLY = C.MNT_RDONLY + NOSUID = C.MNT_NOSUID + NOEXEC = C.MNT_NOEXEC + SYNCHRONOUS = C.MNT_SYNCHRONOUS + NOATIME = C.MNT_NOATIME + + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go new file mode 100644 index 00000000000..9986621c8f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go @@ -0,0 +1,30 @@ +package mount + +import ( + "syscall" +) + +const ( + RDONLY = syscall.MS_RDONLY + NOSUID = syscall.MS_NOSUID + NODEV = syscall.MS_NODEV + NOEXEC = syscall.MS_NOEXEC + SYNCHRONOUS = syscall.MS_SYNCHRONOUS + DIRSYNC = syscall.MS_DIRSYNC + REMOUNT = syscall.MS_REMOUNT + MANDLOCK = syscall.MS_MANDLOCK + NOATIME = syscall.MS_NOATIME + NODIRATIME = syscall.MS_NODIRATIME + BIND = syscall.MS_BIND + RBIND = syscall.MS_BIND | syscall.MS_REC + UNBINDABLE = syscall.MS_UNBINDABLE + RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC + PRIVATE = syscall.MS_PRIVATE + RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + SLAVE = syscall.MS_SLAVE + RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + SHARED = syscall.MS_SHARED + RSHARED = syscall.MS_SHARED | syscall.MS_REC + RELATIME = syscall.MS_RELATIME + STRICTATIME = syscall.MS_STRICTATIME +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go new file mode 100644 index 00000000000..c4f82176b81 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go @@ -0,0 +1,29 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount + +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 +) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go new file mode 100644 index 00000000000..5ca731601f2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go @@ -0,0 +1,70 @@ +package mount + +import ( + "time" +) + +func GetMounts() ([]*MountInfo, error) { + return parseMountTable() +} + +// Looks at /proc/self/mountinfo to determine of the specified +// mountpoint has been mounted +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} + +// Mount the specified options at the target path only if +// the target is not mounted +// Options must be specified as fstab style +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// Mount the specified options at the target path +// reguardless if the target is mounted or not +// Options must be specified as fstab style +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + if err := mount(device, target, mType, uintptr(flag), data); err != nil { + return err + } + return nil +} + +// Unmount the target only if it is mounted +func Unmount(target string) error { + if mounted, err := Mounted(target); err != nil || !mounted { + return err + } + return ForceUnmount(target) +} + +// Unmount the target reguardless if it is mounted or not +func ForceUnmount(target string) (err error) { + // Simple retry logic for unmount + for i := 0; i < 10; i++ { + if err = unmount(target, 0); err == nil { + return nil + } + time.Sleep(100 * time.Millisecond) + } + return +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount_test.go new file mode 100644 index 00000000000..5c7f1b86a0a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount_test.go @@ -0,0 +1,137 @@ +package mount + +import ( + "os" + "path" + "testing" +) + +func TestMountOptionsParsing(t *testing.T) { + options := "noatime,ro,size=10k" + + flag, data := parseOptions(options) + + if data != "size=10k" { + t.Fatalf("Expected size=10 got %s", data) + } + + expectedFlag := NOATIME | RDONLY + + if flag != expectedFlag { + t.Fatalf("Expected %d got %d", expectedFlag, flag) + } +} + +func TestMounted(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + mounted, err := Mounted(targetDir) + if err != nil { + t.Fatal(err) + } + if !mounted { + t.Fatalf("Expected %s to be mounted", targetDir) + } + if _, err := os.Stat(targetDir); err != nil { + t.Fatal(err) + } +} + +func TestMountReadonly(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + f, err = os.OpenFile(targetPath, os.O_RDWR, 0777) + if err == nil { + t.Fatal("Should not be able to open a ro file as rw") + } +} + +func TestGetMounts(t *testing.T) { + mounts, err := GetMounts() + if err != nil { + t.Fatal(err) + } + + root := false + for _, entry := range mounts { + if entry.Mountpoint == "/" { + root = true + } + } + + if !root { + t.Fatal("/ should be mounted at least") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go new file mode 100644 index 00000000000..bb870e6f59b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go @@ -0,0 +1,59 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "syscall" + "unsafe" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go new file mode 100644 index 00000000000..dd4280c7778 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go @@ -0,0 +1,21 @@ +package mount + +import ( + "syscall" +) + +func mount(device, target, mType string, flag uintptr, data string) error { + if err := syscall.Mount(device, target, mType, flag, data); err != nil { + return err + } + + // If we have a bind mount or remount, remount... + if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { + return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go new file mode 100644 index 00000000000..eb93365eb74 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go new file mode 100644 index 00000000000..ec8e8bca2a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go @@ -0,0 +1,7 @@ +package mount + +type MountInfo struct { + Id, Parent, Major, Minor int + Root, Mountpoint, Opts, Optional string + Fstype, Source, VfsOpts string +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 00000000000..2fe91862d84 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,40 @@ +package mount + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts +func parseMountTable() ([]*MountInfo, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*MountInfo + for _, entry := range entries { + var mountinfo MountInfo + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + out = append(out, &mountinfo) + } + return out, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go new file mode 100644 index 00000000000..0eb018e231a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -0,0 +1,92 @@ +// +build linux + +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts +func parseMountTable() ([]*MountInfo, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*MountInfo, error) { + var ( + s = bufio.NewScanner(r) + out = []*MountInfo{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &MountInfo{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.Id, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} + +// PidMountInfo collects the mounts for a specific Pid +func PidMountInfo(pid int) ([]*MountInfo, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go new file mode 100644 index 00000000000..e92b7e2c741 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go @@ -0,0 +1,477 @@ +// +build linux + +package mount + +import ( + "bytes" + "testing" +) + +const ( + fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw + 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel + 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 + 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw + 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw + 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel + 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 + 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 + 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 + 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd + 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw + 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children + 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children + 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children + 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children + 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children + 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children + 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children + 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children + 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children + 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered + 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct + 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel + 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel + 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel + 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw + 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw + 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw + 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw + 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered + 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered + 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered + 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered + 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 + 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw + 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered + 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered + 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered + 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered + 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered + 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered + 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered + 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered + 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered + 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered + 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered + 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered + 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered + 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered + 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered + 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered + 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered + 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered + 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered + 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered + 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered + 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered + 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` + + ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 +20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered +21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 +22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children +27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu +29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw +31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct +32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory +33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices +34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer +35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio +36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event +37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb +38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 +40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 +41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 +42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 +43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 +44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 +45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 +46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 +47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 +48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 +49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 +50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 +51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 +52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 +53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 +54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 +55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 +56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 +57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 +58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 +59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 +60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 +61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 +62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 +63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 +64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 +65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 +66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 +67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 +68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 +69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 +70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 +71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 +72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 +73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 +74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 +75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 +76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 +77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 +78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 +79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 +80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 +81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 +82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 +83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 +84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 +85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 +86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 +87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 +88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 +89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 +90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 +91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 +92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 +93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 +94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 +95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 +96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 +97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 +98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 +99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 +100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 +101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 +102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 +103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 +104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 +105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 +106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 +107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 +108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 +109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 +110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 +111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 +112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 +113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 +114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 +115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 +116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 +117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 +118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 +119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 +120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 +121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 +122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 +123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 +124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 +125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 +126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 +127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 +128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 +129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 +130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 +131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 +132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 +133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 +134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 +135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 +136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 +137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 +138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 +139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 +140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 +141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 +142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 +143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 +144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` + + gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 +18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 +19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw +22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw +24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 +25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc +26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children +27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children +28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children +29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children +30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children +31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children +32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children +33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro +34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota +35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw +36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw +42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw +43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw +44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 +68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c +85 68 8:6 /var/lib/docker/init/dockerinit-0.7.2-dev//deleted /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerinit rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c +39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c +40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c +41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c +45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c +46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c +47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c +48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c +49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c +50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c +51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c +52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c +53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c +54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c +55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c +56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c +57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c +59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c +60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c +61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c +62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c +63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c +64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c +65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c +66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c +70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c +71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c +72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c +73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c +76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c +77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c +78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c +79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c +80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c +81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c +82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c +83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c +84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c +94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c +95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c +96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c +97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c +98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c +102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c +103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c +104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c +105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c +106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c +107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c +108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c +109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c +110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c +111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c +112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c +113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c +114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c +117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c +118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c +119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c +120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c +121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c +122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c +123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c +126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c +127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c +128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c +130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c +131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c +132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c +133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c +134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c +135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c +136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c +137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c +138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c +139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c +140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c +141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c +142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c +143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c +144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c +147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c +150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c +151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c +152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c +153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c +154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c +155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c +156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c +157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c +158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c +159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c +160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c +162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c +163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c +164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c +165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c +166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c +167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c +168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c +169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c +170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c +171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c +172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c +173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c +174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c +184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c +187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c +188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c +189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c +190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c +191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c +192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c +193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c +194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c +195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c +196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c +197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c +198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c +199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c +200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c +201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c +202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c +203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c +204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c +205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c +206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c +207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c +208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c +209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c +210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c +211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c +212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c +213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c +214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c +215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c +216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c +217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c +218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c +219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c +220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c +221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c +222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c +223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c +224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c +225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c +226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c +227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c +228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c +229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c +230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c +231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c +232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c +233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c +234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c +235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c +237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c +238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c +239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c +240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c +241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c +242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c +243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c +244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c +245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c +246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c +247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c +249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c +250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c +251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c +252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c +253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c +254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c +255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c +256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c +257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c +259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c +260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c +261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c +262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c +263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c +264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c +58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c +67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c +265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c +270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c +273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c +278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c +281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c +286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c +289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c +99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` +) + +func TestParseFedoraMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseUbuntuMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(ubuntuMountInfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseGentooMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(gentooMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseFedoraMountinfoFields(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + infos, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } + expectedLength := 58 + if len(infos) != expectedLength { + t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos)) + } + mi := MountInfo{ + Id: 15, + Parent: 35, + Major: 0, + Minor: 3, + Root: "/", + Mountpoint: "/proc", + Opts: "rw,nosuid,nodev,noexec,relatime", + Optional: "shared:5", + Fstype: "proc", + Source: "proc", + VfsOpts: "rw", + } + + if *infos[0] != mi { + t.Fatalf("expected %#v, got %#v", mi, infos[0]) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 00000000000..352336b9a34 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount + +import ( + "fmt" + "runtime" +) + +func parseMountTable() ([]*MountInfo, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 00000000000..cd9b86cefac --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,54 @@ +// +build linux + +package mount + +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + mounted, err := Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + mounted, err = Mounted(mountPoint) + if err != nil { + return err + } + + return ForceMount("", mountPoint, "none", options) +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go new file mode 100644 index 00000000000..0986bd9c75d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go @@ -0,0 +1,331 @@ +// +build linux + +package mount + +import ( + "os" + "path" + "syscall" + "testing" +) + +// nothing is propogated in or out +func TestSubtreePrivate(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target private + if err := MakePrivate(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside1CheckPath) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +// Testing that when a target is a shared mount, +// then child mounts propogate to the source +func TestSubtreeShared(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outsideDir = path.Join(tmp, "outside") + + outsidePath = path.Join(outsideDir, "file.txt") + sourceCheckPath = path.Join(sourceDir, "a", "file.txt") + ) + + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outsideDir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outsidePath); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the target + if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // NOW, check that the file from the outside directory is avaible in the source directory + if _, err := os.Stat(sourceCheckPath); err != nil { + t.Fatal(err) + } +} + +// testing that mounts to a shared source show up in the slave target, +// and that mounts into a slave target do _not_ show up in the shared source +func TestSubtreeSharedSlave(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target slave + if err := MakeSlave(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil { + t.Fatal(err) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +func TestSubtreeUnbindable(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + ) + if err := os.MkdirAll(sourceDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(targetDir, 0777); err != nil { + t.Fatal(err) + } + + // next, make the source unbindable + if err := MakeUnbindable(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // then attempt to mount it to target. It should fail + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not have been bindable", sourceDir) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() +} + +func createFile(path string) error { + f, err := os.Create(path) + if err != nil { + return err + } + f.WriteString("hello world!") + return f.Close() +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE new file mode 100644 index 00000000000..9e4bd4dbee9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD new file mode 100644 index 00000000000..ac74d8f0496 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/MAINTAINERS new file mode 100644 index 00000000000..51a41a5b605 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/MAINTAINERS @@ -0,0 +1,3 @@ +Tibor Vass (@tiborvass) +Cristian Staretu (@unclejack) +Tianon Gravi (@tianon) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/README.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/README.md new file mode 100644 index 00000000000..0d1dbb70e64 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/README.md @@ -0,0 +1,5 @@ +Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks +from the [Go standard library](https://golang.org/pkg/path/filepath). + +The code from filepath.EvalSymlinks has been adapted in fs.go. +Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs.go new file mode 100644 index 00000000000..b4bdff24dd3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs.go @@ -0,0 +1,131 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +// This code is a modified version of path/filepath/symlink.go from the Go standard library. + +package symlink + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" +) + +// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an absolute path +func FollowSymlinkInScope(path, root string) (string, error) { + path, err := filepath.Abs(path) + if err != nil { + return "", err + } + root, err = filepath.Abs(root) + if err != nil { + return "", err + } + return evalSymlinksInScope(path, root) +} + +// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return +// a result guaranteed to be contained within the scope `root`, at the time of the call. +// Symlinks in `root` are not evaluated and left as-is. +// Errors encountered while attempting to evaluate symlinks in path will be returned. +// Non-existing paths are valid and do not constitute an error. +// `path` has to contain `root` as a prefix, or else an error will be returned. +// Trying to break out from `root` does not constitute an error. +// +// Example: +// If /foo/bar -> /outside, +// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" +// +// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks +// are created and not to create subsequently, additional symlinks that could potentially make a +// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") +// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should +// no longer be considered safely contained in "/foo". +func evalSymlinksInScope(path, root string) (string, error) { + root = filepath.Clean(root) + if path == root { + return path, nil + } + if !strings.HasPrefix(path, root) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + const maxIter = 255 + originalPath := path + // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" + path = path[len(root):] + if root == string(filepath.Separator) { + path = string(filepath.Separator) + path + } + if !strings.HasPrefix(path, string(filepath.Separator)) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + path = filepath.Clean(path) + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + // b here will always be considered to be the "current absolute path inside + // root" when we append paths to it, we also append a slash and use + // filepath.Clean after the loop to trim the trailing slash + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) + } + + // find next path component, p + i := strings.IndexRune(path, filepath.Separator) + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + continue + } + + // this takes a b.String() like "b/../" and a p like "c" and turns it + // into "/b/../c" which then gets filepath.Cleaned into "/c" and then + // root gets prepended and we Clean again (to remove any trailing slash + // if the first Clean gave us just "/") + cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) + if cleanP == string(filepath.Separator) { + // never Lstat "/" itself + b.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + fi, err := os.Lstat(fullP) + if os.IsNotExist(err) { + // if p does not exist, accept it + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p + string(filepath.Separator)) + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(fullP) + if err != nil { + return "", err + } + if filepath.IsAbs(dest) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + + // see note above on "fullP := ..." for why this is double-cleaned and + // what's happening here + return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs_test.go new file mode 100644 index 00000000000..89209484a38 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs_test.go @@ -0,0 +1,402 @@ +// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE + +package symlink + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +type dirOrLink struct { + path string + target string +} + +func makeFs(tmpdir string, fs []dirOrLink) error { + for _, s := range fs { + s.path = filepath.Join(tmpdir, s.path) + if s.target == "" { + os.MkdirAll(s.path, 0755) + continue + } + if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { + return err + } + if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) { + return err + } + } + return nil +} + +func testSymlink(tmpdir, path, expected, scope string) error { + rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope)) + if err != nil { + return err + } + expected, err = filepath.Abs(filepath.Join(tmpdir, expected)) + if err != nil { + return err + } + if expected != rewrite { + return fmt.Errorf("Expected %q got %q", expected, rewrite) + } + return nil +} + +func TestFollowSymlinkAbsolute(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{ + {path: "linkdir", target: "realdir"}, + {path: "linkdir/foo/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkInvalidScopePathPair(t *testing.T) { + if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { + t.Fatal("expected an error") + } +} + +func TestFollowSymlinkLastLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting allowing symlink e lead us to ../b + // normalize to the "testdata/fs/a" + if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata/fs" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChain(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink g (pointed at by symlink h) take out of scope + // TODO: we should probably normalize to scope here because ../[....]/root + // is out of scope and we leak information + if err := makeFs(tmpdir, []dirOrLink{ + {path: "testdata/fs/b/h", target: "../g"}, + {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutPath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink -> ../directory/file escape from scope + // normalize to "testdata/fs/j" + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkToRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSlashDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we stay in scope without leaking information + // this also checks for escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath2(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkScopeLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root2/foo", target: "../bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRootScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + expected, err := filepath.EvalSymlinks(tmpdir) + if err != nil { + t.Fatal(err) + } + rewrite, err := FollowSymlinkInScope(tmpdir, "/") + if err != nil { + t.Fatal(err) + } + if rewrite != expected { + t.Fatalf("expected %q got %q", expected, rewrite) + } +} + +func TestFollowSymlinkEmpty(t *testing.T) { + res, err := FollowSymlinkInScope("", "") + if err != nil { + t.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if res != wd { + t.Fatalf("expected %q got %q", wd, res) + } +} + +func TestFollowSymlinkCircular(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for foo -> foo") + } + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/bar", target: "baz"}, + {path: "root/baz", target: "../bak"}, + {path: "root/bak", target: "/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for bar -> baz -> bak -> bar") + } +} + +func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root/a", target: "r/s"}, + {path: "root/r", target: "../root/t"}, + {path: "root/root/t/s/b", target: "/../u"}, + {path: "root/u/c", target: "."}, + {path: "root/u/x/y", target: "../v"}, + {path: "root/u/v", target: "/../w"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutNonExistent(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/slash", target: "/"}, + {path: "root/sym", target: "/idontexist/../slash"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkNoLexicalCleaning(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/sym", target: "/foo/bar"}, + {path: "root/hello", target: "/sym/../baz"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/.drone.yml b/Godeps/_workspace/src/github.com/docker/libcontainer/.drone.yml new file mode 100644 index 00000000000..80d298f2186 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/.drone.yml @@ -0,0 +1,9 @@ +image: dockercore/libcontainer +script: +# Setup the DockerInDocker environment. + - /dind + - sed -i 's!docker/docker!docker/libcontainer!' /go/src/github.com/docker/docker/hack/make/.validate + - bash /go/src/github.com/docker/docker/hack/make/validate-dco + - bash /go/src/github.com/docker/docker/hack/make/validate-gofmt + - export GOPATH="$GOPATH:/go:$(pwd)/vendor" # Drone mucks with our GOPATH + - make direct-test diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/libcontainer/CONTRIBUTING.md new file mode 100644 index 00000000000..667cc5a63fa --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/CONTRIBUTING.md @@ -0,0 +1,257 @@ +# The libcontainer Contributors' Guide + +Want to hack on libcontainer? Awesome! Here are instructions to get you +started. They are probably not perfect, please let us know if anything +feels wrong or incomplete. + +## Reporting Issues + +When reporting [issues](https://github.com/docker/libcontainer/issues) +on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc), +the output of `uname -a`. Please include the steps required to reproduce +the problem if possible and applicable. +This information will help us review and fix your issue faster. + +## Development Environment + +### Requirements + +For best results, use a Linux development environment. +The following packages are required to compile libcontainer natively. + +- Golang 1.3 +- GCC +- git +- cgutils + +You can develop on OSX, but you are limited to Dockerfile-based builds only. + +### Building libcontainer from Dockerfile + + make all + +This is the easiest way of building libcontainer. +As this build is done using Docker, you can even run this from [OSX](https://github.com/boot2docker/boot2docker) + +### Testing changes with "nsinit" + + make sh + +This will create an container that runs `nsinit exec sh` on a busybox rootfs with the configuration from ['minimal.json'](https://github.com/docker/libcontainer/blob/master/sample_configs/minimal.json). +Like the previous command, you can run this on OSX too! + +### Building libcontainer directly + +> Note: You should add the `vendor` directory to your GOPATH to use the vendored libraries + + ./update-vendor.sh + go get -d ./... + make direct-build + # Run the tests + make direct-test-short | egrep --color 'FAIL|$' + # Run all the test + make direct-test | egrep --color 'FAIL|$' + +### Testing Changes with "nsinit" directly + +To test a change: + + # Install nsinit + make direct-install + + # Optional, add a docker0 bridge + ip link add docker0 type bridge + ifconfig docker0 172.17.0.1/16 up + + mkdir testfs + curl -sSL https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.02/rootfs.tar | tar -xC testfs + cd testfs + cp container.json + nsinit exec sh + +## Contribution Guidelines + +### Pull requests are always welcome + +We are always thrilled to receive pull requests, and do our best to +process them as fast as possible. Not sure if that typo is worth a pull +request? Do it! We will appreciate it. + +If your pull request is not accepted on the first try, don't be +discouraged! If there's a problem with the implementation, hopefully you +received feedback on what to improve. + +We're trying very hard to keep libcontainer lean and focused. We don't want it +to do everything for everybody. This means that we might decide against +incorporating a new feature. However, there might be a way to implement +that feature *on top of* libcontainer. + +### Discuss your design on the mailing list + +We recommend discussing your plans [on the mailing +list](https://groups.google.com/forum/?fromgroups#!forum/libcontainer) +before starting to code - especially for more ambitious contributions. +This gives other contributors a chance to point you in the right +direction, give feedback on your design, and maybe point out if someone +else is working on the same thing. + +### Create issues... + +Any significant improvement should be documented as [a GitHub +issue](https://github.com/docker/libcontainer/issues) before anybody +starts working on it. + +### ...but check for existing issues first! + +Please take a moment to check that an issue doesn't already exist +documenting your bug report or improvement proposal. If it does, it +never hurts to add a quick "+1" or "I have this problem too". This will +help prioritize the most common problems and requests. + +### Conventions + +Fork the repo and make changes on your fork in a feature branch: + +- If it's a bugfix branch, name it XXX-something where XXX is the number of the + issue +- If it's a feature branch, create an enhancement issue to announce your + intentions, and name it XXX-something where XXX is the number of the issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. Run the full test suite on +your branch before submitting a pull request. + +Update the documentation when creating or modifying features. Test +your documentation changes for clarity, concision, and correctness, as +well as a clean documentation build. See ``docs/README.md`` for more +information on building the docs and how docs get released. + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plugins that do this automatically. + +Pull requests descriptions should be as clear as possible and include a +reference to all the issues that they address. + +Pull requests must not contain commits from other users or branches. + +Commit messages must start with a capitalized and short summary (max. 50 +chars) written in the imperative, followed by an optional, more detailed +explanatory text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Be +sure to post a comment after pushing. The new commits will show up in the pull +request automatically, but the reviewers will not be notified unless you +comment. + +Before the pull request is merged, make sure that you squash your commits into +logical units of work using `git rebase -i` and `git push -f`. After every +commit the test suite should be passing. Include documentation changes in the +same commit so that a revert would remove all traces of the feature or fix. + +Commits that fix or close an issue should include a reference like `Closes #XXX` +or `Fixes #XXX`, which will automatically close the issue when merged. + +### Testing + +Make sure you include suitable tests, preferably unit tests, in your pull request +and that all the tests pass. + +*Instructions for running tests to be added.* + +### Merge approval + +libcontainer maintainers use LGTM (looks good to me) in comments on the code review +to indicate acceptance. + +A change requires LGTMs from at lease two maintainers. One of those must come from +a maintainer of the component affected. For example, if a change affects `netlink/` +and `security`, it needs at least one LGTM from a maintainer of each. Maintainers +only need one LGTM as presumably they LGTM their own change. + +For more details see [MAINTAINERS.md](MAINTAINERS.md) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the +patch, which certifies that you wrote it or otherwise have the right to +pass it on as an open-source patch. The rules are pretty simple: if you +can certify the below (from +[developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Docker-DCO-1.1-Signed-off-by: Joe Smith (github: github_handle) + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +One way to automate this, is customise your get ``commit.template`` by adding +a ``prepare-commit-msg`` hook to your libcontainer checkout: + +``` +curl -o .git/hooks/prepare-commit-msg https://raw.githubusercontent.com/docker/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg +``` + +* Note: the above script expects to find your GitHub user name in ``git config --get github.user`` + +#### Small patch exception + +There are several exceptions to the signing requirement. Currently these are: + +* Your patch fixes spelling or grammar errors. +* Your patch is a single line change to documentation contained in the + `docs` directory. +* Your patch fixes Markdown formatting or syntax errors in the + documentation contained in the `docs` directory. + +If you have any questions, please refer to the FAQ in the [docs](to be written) + +### How can I become a maintainer? + +* Step 1: learn the component inside out +* Step 2: make yourself useful by contributing code, bugfixes, support etc. +* Step 3: volunteer on the irc channel (#libcontainer@freenode) + +Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available. +You don't have to be a maintainer to make a difference on the project! + diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/Dockerfile b/Godeps/_workspace/src/github.com/docker/libcontainer/Dockerfile new file mode 100644 index 00000000000..0771c808ea9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/Dockerfile @@ -0,0 +1,23 @@ +FROM golang:1.4 + +RUN go get golang.org/x/tools/cmd/cover + +ENV GOPATH $GOPATH:/go/src/github.com/docker/libcontainer/vendor +RUN go get github.com/docker/docker/pkg/term + +# setup a playground for us to spawn containers in +RUN mkdir /busybox && \ + curl -sSL 'https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.02/rootfs.tar' | tar -xC /busybox + +RUN curl -sSL https://raw.githubusercontent.com/docker/docker/master/project/dind -o /dind && \ + chmod +x /dind + +COPY . /go/src/github.com/docker/libcontainer +WORKDIR /go/src/github.com/docker/libcontainer +RUN cp sample_configs/minimal.json /busybox/container.json + +RUN go get -d -v ./... +RUN make direct-install + +ENTRYPOINT ["/dind"] +CMD ["make", "direct-test"] diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/LICENSE b/Godeps/_workspace/src/github.com/docker/libcontainer/LICENSE new file mode 100644 index 00000000000..27448585ad4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS new file mode 100644 index 00000000000..5235131722b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS @@ -0,0 +1,6 @@ +Michael Crosby (@crosbymichael) +Rohit Jnagal (@rjnagal) +Victor Marmol (@vmarmol) +Mrunal Patel (@mrunalp) +Alexandr Morozov (@LK4D4) +update-vendor.sh: Tianon Gravi (@tianon) diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS_GUIDE.md b/Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS_GUIDE.md new file mode 100644 index 00000000000..2ac9ca21f4e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/MAINTAINERS_GUIDE.md @@ -0,0 +1,99 @@ +# The libcontainer Maintainers' Guide + +## Introduction + +Dear maintainer. Thank you for investing the time and energy to help +make libcontainer as useful as possible. Maintaining a project is difficult, +sometimes unrewarding work. Sure, you will get to contribute cool +features to the project. But most of your time will be spent reviewing, +cleaning up, documenting, answering questions, justifying design +decisions - while everyone has all the fun! But remember - the quality +of the maintainers work is what distinguishes the good projects from the +great. So please be proud of your work, even the unglamourous parts, +and encourage a culture of appreciation and respect for *every* aspect +of improving the project - not just the hot new features. + +This document is a manual for maintainers old and new. It explains what +is expected of maintainers, how they should work, and what tools are +available to them. + +This is a living document - if you see something out of date or missing, +speak up! + +## What are a maintainer's responsibility? + +It is every maintainer's responsibility to: + +* 1) Expose a clear roadmap for improving their component. +* 2) Deliver prompt feedback and decisions on pull requests. +* 3) Be available to anyone with questions, bug reports, criticism etc. + on their component. This includes IRC, GitHub requests and the mailing + list. +* 4) Make sure their component respects the philosophy, design and + roadmap of the project. + +## How are decisions made? + +Short answer: with pull requests to the libcontainer repository. + +libcontainer is an open-source project with an open design philosophy. This +means that the repository is the source of truth for EVERY aspect of the +project, including its philosophy, design, roadmap and APIs. *If it's +part of the project, it's in the repo. It's in the repo, it's part of +the project.* + +As a result, all decisions can be expressed as changes to the +repository. An implementation change is a change to the source code. An +API change is a change to the API specification. A philosophy change is +a change to the philosophy manifesto. And so on. + +All decisions affecting libcontainer, big and small, follow the same 3 steps: + +* Step 1: Open a pull request. Anyone can do this. + +* Step 2: Discuss the pull request. Anyone can do this. + +* Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do +this (see below "Who decides what?") + + +## Who decides what? + +All decisions are pull requests, and the relevant maintainers make +decisions by accepting or refusing the pull request. Review and acceptance +by anyone is denoted by adding a comment in the pull request: `LGTM`. +However, only currently listed `MAINTAINERS` are counted towards the required +two LGTMs. + +libcontainer follows the timeless, highly efficient and totally unfair system +known as [Benevolent dictator for life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with Michael Crosby in the role of BDFL. +This means that all decisions are made by default by Michael. Since making +every decision himself would be highly un-scalable, in practice decisions +are spread across multiple maintainers. + +The relevant maintainers for a pull request can be worked out in two steps: + +* Step 1: Determine the subdirectories affected by the pull request. This + might be `netlink/` and `security/`, or any other part of the repo. + +* Step 2: Find the `MAINTAINERS` file which affects this directory. If the + directory itself does not have a `MAINTAINERS` file, work your way up + the repo hierarchy until you find one. + +### I'm a maintainer, and I'm going on holiday + +Please let your co-maintainers and other contributors know by raising a pull +request that comments out your `MAINTAINERS` file entry using a `#`. + +### I'm a maintainer, should I make pull requests too? + +Yes. Nobody should ever push to master directly. All changes should be +made through a pull request. + +### Who assigns maintainers? + +Michael has final `LGTM` approval for all pull requests to `MAINTAINERS` files. + +### How is this process changed? + +Just like everything else: by making a pull request :) diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/Makefile b/Godeps/_workspace/src/github.com/docker/libcontainer/Makefile new file mode 100644 index 00000000000..f94171b0fcc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/Makefile @@ -0,0 +1,24 @@ + +all: + docker build -t dockercore/libcontainer . + +test: + # we need NET_ADMIN for the netlink tests and SYS_ADMIN for mounting + docker run --rm -it --privileged dockercore/libcontainer + +sh: + docker run --rm -it --privileged -w /busybox dockercore/libcontainer nsinit exec sh + +GO_PACKAGES = $(shell find . -not \( -wholename ./vendor -prune -o -wholename ./.git -prune \) -name '*.go' -print0 | xargs -0n1 dirname | sort -u) + +direct-test: + go test $(TEST_TAGS) -cover -v $(GO_PACKAGES) + +direct-test-short: + go test $(TEST_TAGS) -cover -test.short -v $(GO_PACKAGES) + +direct-build: + go build -v $(GO_PACKAGES) + +direct-install: + go install -v $(GO_PACKAGES) diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/NOTICE b/Godeps/_workspace/src/github.com/docker/libcontainer/NOTICE new file mode 100644 index 00000000000..ca1635f8964 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/NOTICE @@ -0,0 +1,16 @@ +libcontainer +Copyright 2012-2014 Docker, Inc. + +This product includes software developed at Docker, Inc. (http://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see http://www.bis.doc.gov + +See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/PRINCIPLES.md b/Godeps/_workspace/src/github.com/docker/libcontainer/PRINCIPLES.md new file mode 100644 index 00000000000..42396c0eecd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/PRINCIPLES.md @@ -0,0 +1,19 @@ +# libcontainer Principles + +In the design and development of libcontainer we try to follow these principles: + +(Work in progress) + +* Don't try to replace every tool. Instead, be an ingredient to improve them. +* Less code is better. +* Fewer components are better. Do you really need to add one more class? +* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. +* Don't do later what you can do now. "//FIXME: refactor" is not acceptable in new code. +* When hesitating between two options, choose the one that is easier to reverse. +* "No" is temporary; "Yes" is forever. If you're not sure about a new feature, say no. You can change your mind later. +* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. +* The fewer moving parts in a container, the better. +* Don't merge it unless you document it. +* Don't document it unless you can keep it up-to-date. +* Don't merge it unless you test it! +* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/README.md b/Godeps/_workspace/src/github.com/docker/libcontainer/README.md new file mode 100644 index 00000000000..37047e68c86 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/README.md @@ -0,0 +1,62 @@ +## libcontainer - reference implementation for containers [![Build Status](https://ci.dockerproject.com/github.com/docker/libcontainer/status.svg?branch=master)](https://ci.dockerproject.com/github.com/docker/libcontainer) + +### Note on API changes: + +Please bear with us while we work on making the libcontainer API stable and something that we can support long term. We are currently discussing the API with the community, therefore, if you currently depend on libcontainer please pin your dependency at a specific tag or commit id. Please join the discussion and help shape the API. + +#### Background + +libcontainer specifies configuration options for what a container is. It provides a native Go implementation for using Linux namespaces with no external dependencies. libcontainer provides many convenience functions for working with namespaces, networking, and management. + + +#### Container +A container is a self contained execution environment that shares the kernel of the host system and which is (optionally) isolated from other containers in the system. + +libcontainer may be used to execute a process in a container. If a user tries to run a new process inside an existing container, the new process is added to the processes executing in the container. + + +#### Root file system + +A container runs with a directory known as its *root file system*, or *rootfs*, mounted as the file system root. The rootfs is usually a full system tree. + + +#### Configuration + +A container is initially configured by supplying configuration data when the container is created. + + +#### nsinit + +`nsinit` is a cli application which demonstrates the use of libcontainer. It is able to spawn new containers or join existing containers, based on the current directory. + +To use `nsinit`, cd into a Linux rootfs and copy a `container.json` file into the directory with your specified configuration. Environment, networking, and different capabilities for the container are specified in this file. The configuration is used for each process executed inside the container. + +See the `sample_configs` folder for examples of what the container configuration should look like. + +To execute `/bin/bash` in the current directory as a container just run the following **as root**: +```bash +nsinit exec /bin/bash +``` + +If you wish to spawn another process inside the container while your current bash session is running, run the same command again to get another bash shell (or change the command). If the original process (PID 1) dies, all other processes spawned inside the container will be killed and the namespace will be removed. + +You can identify if a process is running in a container by looking to see if `state.json` is in the root of the directory. + +You may also specify an alternate root place where the `container.json` file is read and where the `state.json` file will be saved. + +#### Future +See the [roadmap](ROADMAP.md). + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + +## Hacking on libcontainer + +First of all, please familiarise yourself with the [libcontainer Principles](PRINCIPLES.md). + +If you're a *contributor* or aspiring contributor, you should read the [Contributors' Guide](CONTRIBUTING.md). + +If you're a *maintainer* or aspiring maintainer, you should read the [Maintainers' Guide](MAINTAINERS_GUIDE.md) and +"How can I become a maintainer?" in the Contributors' Guide. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/ROADMAP.md b/Godeps/_workspace/src/github.com/docker/libcontainer/ROADMAP.md new file mode 100644 index 00000000000..08deb9adaf8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/ROADMAP.md @@ -0,0 +1,16 @@ +# libcontainer: what's next? + +This document is a high-level overview of where we want to take libcontainer next. +It is a curated selection of planned improvements which are either important, difficult, or both. + +For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/docker/libcontainer/issues). + +To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request. + +## Broader kernel support + +Our goal is to make libcontainer run everywhere, but currently libcontainer requires Linux version 3.8 or higher. If you’re deploying new machines for the purpose of running libcontainer, this is a fairly easy requirement to meet. However, if you’re adding libcontainer to an existing deployment, you may not have the flexibility to update and patch the kernel. + +## Cross-architecture support + +Our goal is to make libcontainer run everywhere. However currently libcontainer only runs on x86_64 systems. We plan on expanding architecture support, so that libcontainer containers can be created and used on more architectures. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/SPEC.md b/Godeps/_workspace/src/github.com/docker/libcontainer/SPEC.md new file mode 100644 index 00000000000..d83d758dddb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/SPEC.md @@ -0,0 +1,346 @@ +## Container Specification - v1 + +This is the standard configuration for version 1 containers. It includes +namespaces, standard filesystem setup, a default Linux capability set, and +information about resource reservations. It also has information about any +populated environment settings for the processes running inside a container. + +Along with the configuration of how a container is created the standard also +discusses actions that can be performed on a container to manage and inspect +information about the processes running inside. + +The v1 profile is meant to be able to accommodate the majority of applications +with a strong security configuration. + +### System Requirements and Compatibility + +Minimum requirements: +* Kernel version - 3.8 recommended 2.6.2x minimum(with backported patches) +* Mounted cgroups with each subsystem in its own hierarchy + + +### Namespaces + +| Flag | Enabled | +| ------------ | ------- | +| CLONE_NEWPID | 1 | +| CLONE_NEWUTS | 1 | +| CLONE_NEWIPC | 1 | +| CLONE_NEWNET | 1 | +| CLONE_NEWNS | 1 | +| CLONE_NEWUSER | 0 | + +In v1 the user namespace is not enabled by default for support of older kernels +where the user namespace feature is not fully implemented. Namespaces are +created for the container via the `clone` syscall. + + +### Filesystem + +A root filesystem must be provided to a container for execution. The container +will use this root filesystem (rootfs) to jail and spawn processes inside where +the binaries and system libraries are local to that directory. Any binaries +to be executed must be contained within this rootfs. + +Mounts that happen inside the container are automatically cleaned up when the +container exits as the mount namespace is destroyed and the kernel will +unmount all the mounts that were setup within that namespace. + +For a container to execute properly there are certain filesystems that +are required to be mounted within the rootfs that the runtime will setup. + +| Path | Type | Flags | Data | +| ----------- | ------ | -------------------------------------- | --------------------------------------- | +| /proc | proc | MS_NOEXEC,MS_NOSUID,MS_NODEV | | +| /dev | tmpfs | MS_NOEXEC,MS_STRICTATIME | mode=755 | +| /dev/shm | shm | MS_NOEXEC,MS_NOSUID,MS_NODEV | mode=1777,size=65536k | +| /dev/mqueue | mqueue | MS_NOEXEC,MS_NOSUID,MS_NODEV | | +| /dev/pts | devpts | MS_NOEXEC,MS_NOSUID | newinstance,ptmxmode=0666,mode=620,gid5 | +| /sys | sysfs | MS_NOEXEC,MS_NOSUID,MS_NODEV,MS_RDONLY | | + + +After a container's filesystems are mounted within the newly created +mount namespace `/dev` will need to be populated with a set of device nodes. +It is expected that a rootfs does not need to have any device nodes specified +for `/dev` witin the rootfs as the container will setup the correct devices +that are required for executing a container's process. + +| Path | Mode | Access | +| ------------ | ---- | ---------- | +| /dev/null | 0666 | rwm | +| /dev/zero | 0666 | rwm | +| /dev/full | 0666 | rwm | +| /dev/tty | 0666 | rwm | +| /dev/random | 0666 | rwm | +| /dev/urandom | 0666 | rwm | +| /dev/fuse | 0666 | rwm | + + +**ptmx** +`/dev/ptmx` will need to be a symlink to the host's `/dev/ptmx` within +the container. + +The use of a pseudo TTY is optional within a container and it should support both. +If a pseudo is provided to the container `/dev/console` will need to be +setup by binding the console in `/dev/` after it has been populated and mounted +in tmpfs. + +| Source | Destination | UID GID | Mode | Type | +| --------------- | ------------ | ------- | ---- | ---- | +| *pty host path* | /dev/console | 0 0 | 0600 | bind | + + +After `/dev/null` has been setup we check for any external links between +the container's io, STDIN, STDOUT, STDERR. If the container's io is pointing +to `/dev/null` outside the container we close and `dup2` the the `/dev/null` +that is local to the container's rootfs. + + +After the container has `/proc` mounted a few standard symlinks are setup +within `/dev/` for the io. + +| Source | Destination | +| ------------ | ----------- | +| /proc/1/fd | /dev/fd | +| /proc/1/fd/0 | /dev/stdin | +| /proc/1/fd/1 | /dev/stdout | +| /proc/1/fd/2 | /dev/stderr | + +A `pivot_root` is used to change the root for the process, effectively +jailing the process inside the rootfs. + +```c +put_old = mkdir(...); +pivot_root(rootfs, put_old); +chdir("/"); +unmount(put_old, MS_DETACH); +rmdir(put_old); +``` + +For container's running with a rootfs inside `ramfs` a `MS_MOVE` combined +with a `chroot` is required as `pivot_root` is not supported in `ramfs`. + +```c +mount(rootfs, "/", NULL, MS_MOVE, NULL); +chroot("."); +chdir("/"); +``` + +The `umask` is set back to `0022` after the filesystem setup has been completed. + +### Resources + +Cgroups are used to handle resource allocation for containers. This includes +system resources like cpu, memory, and device access. + +| Subsystem | Enabled | +| ---------- | ------- | +| devices | 1 | +| memory | 1 | +| cpu | 1 | +| cpuacct | 1 | +| cpuset | 1 | +| blkio | 1 | +| perf_event | 1 | +| freezer | 1 | + + +All cgroup subsystem are joined so that statistics can be collected from +each of the subsystems. Freezer does not expose any stats but is joined +so that containers can be paused and resumed. + +The parent process of the container's init must place the init pid inside +the correct cgroups before the initialization begins. This is done so +that no processes or threads escape the cgroups. This sync is +done via a pipe ( specified in the runtime section below ) that the container's +init process will block waiting for the parent to finish setup. + +### Security + +The standard set of Linux capabilities that are set in a container +provide a good default for security and flexibility for the applications. + + +| Capability | Enabled | +| -------------------- | ------- | +| CAP_NET_RAW | 1 | +| CAP_NET_BIND_SERVICE | 1 | +| CAP_AUDIT_WRITE | 1 | +| CAP_DAC_OVERRIDE | 1 | +| CAP_SETFCAP | 1 | +| CAP_SETPCAP | 1 | +| CAP_SETGID | 1 | +| CAP_SETUID | 1 | +| CAP_MKNOD | 1 | +| CAP_CHOWN | 1 | +| CAP_FOWNER | 1 | +| CAP_FSETID | 1 | +| CAP_KILL | 1 | +| CAP_SYS_CHROOT | 1 | +| CAP_NET_BROADCAST | 0 | +| CAP_SYS_MODULE | 0 | +| CAP_SYS_RAWIO | 0 | +| CAP_SYS_PACCT | 0 | +| CAP_SYS_ADMIN | 0 | +| CAP_SYS_NICE | 0 | +| CAP_SYS_RESOURCE | 0 | +| CAP_SYS_TIME | 0 | +| CAP_SYS_TTY_CONFIG | 0 | +| CAP_AUDIT_CONTROL | 0 | +| CAP_MAC_OVERRIDE | 0 | +| CAP_MAC_ADMIN | 0 | +| CAP_NET_ADMIN | 0 | +| CAP_SYSLOG | 0 | +| CAP_DAC_READ_SEARCH | 0 | +| CAP_LINUX_IMMUTABLE | 0 | +| CAP_IPC_LOCK | 0 | +| CAP_IPC_OWNER | 0 | +| CAP_SYS_PTRACE | 0 | +| CAP_SYS_BOOT | 0 | +| CAP_LEASE | 0 | +| CAP_WAKE_ALARM | 0 | +| CAP_BLOCK_SUSPE | 0 | + + +Additional security layers like [apparmor](https://wiki.ubuntu.com/AppArmor) +and [selinux](http://selinuxproject.org/page/Main_Page) can be used with +the containers. A container should support setting an apparmor profile or +selinux process and mount labels if provided in the configuration. + +Standard apparmor profile: +```c +#include +profile flags=(attach_disconnected,mediate_deleted) { + #include + network, + capability, + file, + umount, + + mount fstype=tmpfs, + mount fstype=mqueue, + mount fstype=fuse.*, + mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/, + mount fstype=efivarfs -> /sys/firmware/efi/efivars/, + mount fstype=fusectl -> /sys/fs/fuse/connections/, + mount fstype=securityfs -> /sys/kernel/security/, + mount fstype=debugfs -> /sys/kernel/debug/, + mount fstype=proc -> /proc/, + mount fstype=sysfs -> /sys/, + + deny @{PROC}/sys/fs/** wklx, + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx, + deny @{PROC}/sys/kernel/*/** wklx, + + deny mount options=(ro, remount) -> /, + deny mount fstype=debugfs -> /var/lib/ureadahead/debugfs/, + deny mount fstype=devpts, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/efi/efivars/** rwklx, + deny /sys/kernel/security/** rwklx, +} +``` + +*TODO: seccomp work is being done to find a good default config* + +### Runtime and Init Process + +During container creation the parent process needs to talk to the container's init +process and have a form of synchronization. This is accomplished by creating +a pipe that is passed to the container's init. When the init process first spawns +it will block on its side of the pipe until the parent closes its side. This +allows the parent to have time to set the new process inside a cgroup hierarchy +and/or write any uid/gid mappings required for user namespaces. +The pipe is passed to the init process via FD 3. + +The application consuming libcontainer should be compiled statically. libcontainer +does not define any init process and the arguments provided are used to `exec` the +process inside the application. There should be no long running init within the +container spec. + +If a pseudo tty is provided to a container it will open and `dup2` the console +as the container's STDIN, STDOUT, STDERR as well as mounting the console +as `/dev/console`. + +An extra set of mounts are provided to a container and setup for use. A container's +rootfs can contain some non portable files inside that can cause side effects during +execution of a process. These files are usually created and populated with the container +specific information via the runtime. + +**Extra runtime files:** +* /etc/hosts +* /etc/resolv.conf +* /etc/hostname +* /etc/localtime + + +#### Defaults + +There are a few defaults that can be overridden by users, but in their omission +these apply to processes within a container. + +| Type | Value | +| ------------------- | ------------------------------ | +| Parent Death Signal | SIGKILL | +| UID | 0 | +| GID | 0 | +| GROUPS | 0, NULL | +| CWD | "/" | +| $HOME | Current user's home dir or "/" | +| Readonly rootfs | false | +| Pseudo TTY | false | + + +## Actions + +After a container is created there is a standard set of actions that can +be done to the container. These actions are part of the public API for +a container. + +| Action | Description | +| -------------- | ------------------------------------------------------------------ | +| Get processes | Return all the pids for processes running inside a container | +| Get Stats | Return resource statistics for the container as a whole | +| Wait | Wait waits on the container's init process ( pid 1 ) | +| Wait Process | Wait on any of the container's processes returning the exit status | +| Destroy | Kill the container's init process and remove any filesystem state | +| Signal | Send a signal to the container's init process | +| Signal Process | Send a signal to any of the container's processes | +| Pause | Pause all processes inside the container | +| Resume | Resume all processes inside the container if paused | +| Exec | Execute a new process inside of the container ( requires setns ) | + +### Execute a new process inside of a running container. + +User can execute a new process inside of a running container. Any binaries to be +executed must be accessible within the container's rootfs. + +The started process will run inside the container's rootfs. Any changes +made by the process to the container's filesystem will persist after the +process finished executing. + +The started process will join all the container's existing namespaces. When the +container is paused, the process will also be paused and will resume when +the container is unpaused. The started process will only run when the container's +primary process (PID 1) is running, and will not be restarted when the container +is restarted. + +#### Planned additions + +The started process will have its own cgroups nested inside the container's +cgroups. This is used for process tracking and optionally resource allocation +handling for the new process. Freezer cgroup is required, the rest of the cgroups +are optional. The process executor must place its pid inside the correct +cgroups before starting the process. This is done so that no child processes or +threads can escape the cgroups. + +When the process is stopped, the process executor will try (in a best-effort way) +to stop all its children and remove the sub-cgroups. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/api_temp.go b/Godeps/_workspace/src/github.com/docker/libcontainer/api_temp.go new file mode 100644 index 00000000000..5c682ee3441 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/api_temp.go @@ -0,0 +1,21 @@ +/* +Temporary API endpoint for libcontainer while the full API is finalized (api.go). +*/ +package libcontainer + +import ( + "github.com/docker/libcontainer/cgroups/fs" + "github.com/docker/libcontainer/network" +) + +// TODO(vmarmol): Complete Stats() in final libcontainer API and move users to that. +// DEPRECATED: The below portions are only to be used during the transition to the official API. +// Returns all available stats for the given container. +func GetStats(container *Config, state *State) (stats *ContainerStats, err error) { + stats = &ContainerStats{} + if stats.CgroupStats, err = fs.GetStats(state.CgroupPaths); err != nil { + return stats, err + } + stats.NetworkStats, err = network.GetStats(&state.NetworkState) + return stats, err +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor.go b/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor.go new file mode 100644 index 00000000000..fb1574dfc6c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor.go @@ -0,0 +1,35 @@ +// +build apparmor,linux + +package apparmor + +// #cgo LDFLAGS: -lapparmor +// #include +// #include +import "C" +import ( + "io/ioutil" + "os" + "unsafe" +) + +func IsEnabled() bool { + if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" { + buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") + return err == nil && len(buf) > 1 && buf[0] == 'Y' + } + return false +} + +func ApplyProfile(name string) error { + if name == "" { + return nil + } + + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + + if _, err := C.aa_change_onexec(cName); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor_disabled.go b/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor_disabled.go new file mode 100644 index 00000000000..937bf915c75 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/apparmor_disabled.go @@ -0,0 +1,11 @@ +// +build !apparmor !linux + +package apparmor + +func IsEnabled() bool { + return false +} + +func ApplyProfile(name string) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/gen.go b/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/gen.go new file mode 100644 index 00000000000..825e646d920 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/gen.go @@ -0,0 +1,94 @@ +package apparmor + +import ( + "io" + "os" + "text/template" +) + +type data struct { + Name string + Imports []string + InnerImports []string +} + +const baseTemplate = ` +{{range $value := .Imports}} +{{$value}} +{{end}} + +profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { +{{range $value := .InnerImports}} + {{$value}} +{{end}} + + network, + capability, + file, + umount, + + mount fstype=tmpfs, + mount fstype=mqueue, + mount fstype=fuse.*, + mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/, + mount fstype=efivarfs -> /sys/firmware/efi/efivars/, + mount fstype=fusectl -> /sys/fs/fuse/connections/, + mount fstype=securityfs -> /sys/kernel/security/, + mount fstype=debugfs -> /sys/kernel/debug/, + mount fstype=proc -> /proc/, + mount fstype=sysfs -> /sys/, + + deny @{PROC}/sys/fs/** wklx, + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx, + deny @{PROC}/sys/kernel/*/** wklx, + + deny mount options=(ro, remount) -> /, + deny mount fstype=debugfs -> /var/lib/ureadahead/debugfs/, + deny mount fstype=devpts, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/efi/efivars/** rwklx, + deny /sys/kernel/security/** rwklx, +} +` + +func generateProfile(out io.Writer) error { + compiled, err := template.New("apparmor_profile").Parse(baseTemplate) + if err != nil { + return err + } + data := &data{ + Name: "docker-default", + } + if tuntablesExists() { + data.Imports = append(data.Imports, "#include ") + } else { + data.Imports = append(data.Imports, "@{PROC}=/proc/") + } + if abstrctionsEsists() { + data.InnerImports = append(data.InnerImports, "#include ") + } + if err := compiled.Execute(out, data); err != nil { + return err + } + return nil +} + +// check if the tunables/global exist +func tuntablesExists() bool { + _, err := os.Stat("/etc/apparmor.d/tunables/global") + return err == nil +} + +// check if abstractions/base exist +func abstrctionsEsists() bool { + _, err := os.Stat("/etc/apparmor.d/abstractions/base") + return err == nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/setup.go b/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/setup.go new file mode 100644 index 00000000000..8ed5437470e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/apparmor/setup.go @@ -0,0 +1,44 @@ +package apparmor + +import ( + "fmt" + "os" + "os/exec" + "path" +) + +const ( + DefaultProfilePath = "/etc/apparmor.d/docker" +) + +func InstallDefaultProfile() error { + if !IsEnabled() { + return nil + } + + // Make sure /etc/apparmor.d exists + if err := os.MkdirAll(path.Dir(DefaultProfilePath), 0755); err != nil { + return err + } + + f, err := os.OpenFile(DefaultProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + if err := generateProfile(f); err != nil { + f.Close() + return err + } + f.Close() + + cmd := exec.Command("/sbin/apparmor_parser", "-r", "-W", "docker") + // to use the parser directly we have to make sure we are in the correct + // dir with the profile + cmd.Dir = "/etc/apparmor.d" + + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Error loading docker apparmor profile: %s (%s)", err, output) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups.go new file mode 100644 index 00000000000..106698d18fb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups.go @@ -0,0 +1,56 @@ +package cgroups + +import ( + "fmt" + + "github.com/docker/libcontainer/devices" +) + +type FreezerState string + +const ( + Undefined FreezerState = "" + Frozen FreezerState = "FROZEN" + Thawed FreezerState = "THAWED" +) + +type NotFoundError struct { + Subsystem string +} + +func (e *NotFoundError) Error() string { + return fmt.Sprintf("mountpoint for %s not found", e.Subsystem) +} + +func NewNotFoundError(sub string) error { + return &NotFoundError{ + Subsystem: sub, + } +} + +func IsNotFound(err error) bool { + if err == nil { + return false + } + + _, ok := err.(*NotFoundError) + return ok +} + +type Cgroup struct { + Name string `json:"name,omitempty"` + Parent string `json:"parent,omitempty"` // name of parent cgroup or slice + + AllowAllDevices bool `json:"allow_all_devices,omitempty"` // If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list. + AllowedDevices []*devices.Device `json:"allowed_devices,omitempty"` + Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes) + MemoryReservation int64 `json:"memory_reservation,omitempty"` // Memory reservation or soft_limit (in bytes) + MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap + CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers) + CpuQuota int64 `json:"cpu_quota,omitempty"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period. + CpuPeriod int64 `json:"cpu_period,omitempty"` // CPU period to be used for hardcapping (in usecs). 0 to use system default. + CpusetCpus string `json:"cpuset_cpus,omitempty"` // CPU to use + CpusetMems string `json:"cpuset_mems,omitempty"` // MEM to use + Freezer FreezerState `json:"freezer,omitempty"` // set the freeze value for the process + Slice string `json:"slice,omitempty"` // Parent slice to use for systemd +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups_test.go new file mode 100644 index 00000000000..e8c52938ce9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups_test.go @@ -0,0 +1,27 @@ +package cgroups + +import ( + "bytes" + "testing" +) + +const ( + cgroupsContents = `11:hugetlb:/ +10:perf_event:/ +9:blkio:/ +8:net_cls:/ +7:freezer:/ +6:devices:/ +5:memory:/ +4:cpuacct,cpu:/ +3:cpuset:/ +2:name=systemd:/user.slice/user-1000.slice/session-16.scope` +) + +func TestParseCgroups(t *testing.T) { + r := bytes.NewBuffer([]byte(cgroupsContents)) + _, err := ParseCgroupFile("blkio", r) + if err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go new file mode 100644 index 00000000000..f05377f25b8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go @@ -0,0 +1,229 @@ +package fs + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + "github.com/docker/libcontainer/cgroups" +) + +var ( + subsystems = map[string]subsystem{ + "devices": &DevicesGroup{}, + "memory": &MemoryGroup{}, + "cpu": &CpuGroup{}, + "cpuset": &CpusetGroup{}, + "cpuacct": &CpuacctGroup{}, + "blkio": &BlkioGroup{}, + "perf_event": &PerfEventGroup{}, + "freezer": &FreezerGroup{}, + } + CgroupProcesses = "cgroup.procs" +) + +// The absolute path to the root of the cgroup hierarchies. +var cgroupRoot string + +// TODO(vmarmol): Report error here, we'll probably need to wait for the new API. +func init() { + // we can pick any subsystem to find the root + cpuRoot, err := cgroups.FindCgroupMountpoint("cpu") + if err != nil { + return + } + cgroupRoot = filepath.Dir(cpuRoot) + + if _, err := os.Stat(cgroupRoot); err != nil { + return + } +} + +type subsystem interface { + // Returns the stats, as 'stats', corresponding to the cgroup under 'path'. + GetStats(path string, stats *cgroups.Stats) error + // Removes the cgroup represented by 'data'. + Remove(*data) error + // Creates and joins the cgroup represented by data. + Set(*data) error +} + +type data struct { + root string + cgroup string + c *cgroups.Cgroup + pid int +} + +func Apply(c *cgroups.Cgroup, pid int) (map[string]string, error) { + d, err := getCgroupData(c, pid) + if err != nil { + return nil, err + } + + paths := make(map[string]string) + defer func() { + if err != nil { + cgroups.RemovePaths(paths) + } + }() + for name, sys := range subsystems { + if err := sys.Set(d); err != nil { + return nil, err + } + // FIXME: Apply should, ideally, be reentrant or be broken up into a separate + // create and join phase so that the cgroup hierarchy for a container can be + // created then join consists of writing the process pids to cgroup.procs + p, err := d.path(name) + if err != nil { + if cgroups.IsNotFound(err) { + continue + } + return nil, err + } + paths[name] = p + } + return paths, nil +} + +// Symmetrical public function to update device based cgroups. Also available +// in the systemd implementation. +func ApplyDevices(c *cgroups.Cgroup, pid int) error { + d, err := getCgroupData(c, pid) + if err != nil { + return err + } + + devices := subsystems["devices"] + + return devices.Set(d) +} + +func GetStats(systemPaths map[string]string) (*cgroups.Stats, error) { + stats := cgroups.NewStats() + for name, path := range systemPaths { + sys, ok := subsystems[name] + if !ok || !cgroups.PathExists(path) { + continue + } + if err := sys.GetStats(path, stats); err != nil { + return nil, err + } + } + + return stats, nil +} + +// Freeze toggles the container's freezer cgroup depending on the state +// provided +func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { + d, err := getCgroupData(c, 0) + if err != nil { + return err + } + + c.Freezer = state + + freezer := subsystems["freezer"] + + return freezer.Set(d) +} + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + d, err := getCgroupData(c, 0) + if err != nil { + return nil, err + } + + dir, err := d.path("devices") + if err != nil { + return nil, err + } + + return cgroups.ReadProcsFile(dir) +} + +func getCgroupData(c *cgroups.Cgroup, pid int) (*data, error) { + if cgroupRoot == "" { + return nil, fmt.Errorf("failed to find the cgroup root") + } + + cgroup := c.Name + if c.Parent != "" { + cgroup = filepath.Join(c.Parent, cgroup) + } + + return &data{ + root: cgroupRoot, + cgroup: cgroup, + c: c, + pid: pid, + }, nil +} + +func (raw *data) parent(subsystem string) (string, error) { + initPath, err := cgroups.GetInitCgroupDir(subsystem) + if err != nil { + return "", err + } + return filepath.Join(raw.root, subsystem, initPath), nil +} + +func (raw *data) path(subsystem string) (string, error) { + // If the cgroup name/path is absolute do not look relative to the cgroup of the init process. + if filepath.IsAbs(raw.cgroup) { + path := filepath.Join(raw.root, subsystem, raw.cgroup) + + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + return "", cgroups.NewNotFoundError(subsystem) + } + + return "", err + } + + return path, nil + } + + parent, err := raw.parent(subsystem) + if err != nil { + return "", err + } + + return filepath.Join(parent, raw.cgroup), nil +} + +func (raw *data) join(subsystem string) (string, error) { + path, err := raw.path(subsystem) + if err != nil { + return "", err + } + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return "", err + } + if err := writeFile(path, CgroupProcesses, strconv.Itoa(raw.pid)); err != nil { + return "", err + } + return path, nil +} + +func writeFile(dir, file, data string) error { + return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) +} + +func readFile(dir, file string) (string, error) { + data, err := ioutil.ReadFile(filepath.Join(dir, file)) + return string(data), err +} + +func removePath(p string, err error) error { + if err != nil { + return err + } + if p != "" { + return os.RemoveAll(p) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio.go new file mode 100644 index 00000000000..ce824d56c27 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio.go @@ -0,0 +1,187 @@ +package fs + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/libcontainer/cgroups" +) + +type BlkioGroup struct { +} + +func (s *BlkioGroup) Set(d *data) error { + // we just want to join this group even though we don't set anything + if _, err := d.join("blkio"); err != nil && !cgroups.IsNotFound(err) { + return err + } + + return nil +} + +func (s *BlkioGroup) Remove(d *data) error { + return removePath(d.path("blkio")) +} + +/* +examples: + + blkio.sectors + 8:0 6792 + + blkio.io_service_bytes + 8:0 Read 1282048 + 8:0 Write 2195456 + 8:0 Sync 2195456 + 8:0 Async 1282048 + 8:0 Total 3477504 + Total 3477504 + + blkio.io_serviced + 8:0 Read 124 + 8:0 Write 104 + 8:0 Sync 104 + 8:0 Async 124 + 8:0 Total 228 + Total 228 + + blkio.io_queued + 8:0 Read 0 + 8:0 Write 0 + 8:0 Sync 0 + 8:0 Async 0 + 8:0 Total 0 + Total 0 +*/ + +func splitBlkioStatLine(r rune) bool { + return r == ' ' || r == ':' +} + +func getBlkioStat(path string) ([]cgroups.BlkioStatEntry, error) { + var blkioStats []cgroups.BlkioStatEntry + f, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + return blkioStats, nil + } + return nil, err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + // format: dev type amount + fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine) + if len(fields) < 3 { + if len(fields) == 2 && fields[0] == "Total" { + // skip total line + continue + } else { + return nil, fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text()) + } + } + + v, err := strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + major := v + + v, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + minor := v + + op := "" + valueField := 2 + if len(fields) == 4 { + op = fields[2] + valueField = 3 + } + v, err = strconv.ParseUint(fields[valueField], 10, 64) + if err != nil { + return nil, err + } + blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v}) + } + + return blkioStats, nil +} + +func (s *BlkioGroup) GetStats(path string, stats *cgroups.Stats) error { + // Try to read CFQ stats available on all CFQ enabled kernels first + if blkioStats, err := getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err == nil && blkioStats != nil { + return getCFQStats(path, stats) + } + return getStats(path, stats) // Use generic stats as fallback +} + +func getCFQStats(path string, stats *cgroups.Stats) error { + var blkioStats []cgroups.BlkioStatEntry + var err error + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil { + return err + } + stats.BlkioStats.SectorsRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_bytes_recursive")); err != nil { + return err + } + stats.BlkioStats.IoServiceBytesRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err != nil { + return err + } + stats.BlkioStats.IoServicedRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_queued_recursive")); err != nil { + return err + } + stats.BlkioStats.IoQueuedRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_time_recursive")); err != nil { + return err + } + stats.BlkioStats.IoServiceTimeRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_wait_time_recursive")); err != nil { + return err + } + stats.BlkioStats.IoWaitTimeRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_merged_recursive")); err != nil { + return err + } + stats.BlkioStats.IoMergedRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.time_recursive")); err != nil { + return err + } + stats.BlkioStats.IoTimeRecursive = blkioStats + + return nil +} + +func getStats(path string, stats *cgroups.Stats) error { + var blkioStats []cgroups.BlkioStatEntry + var err error + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_service_bytes")); err != nil { + return err + } + stats.BlkioStats.IoServiceBytesRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_serviced")); err != nil { + return err + } + stats.BlkioStats.IoServicedRecursive = blkioStats + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go new file mode 100644 index 00000000000..6cd38cbaba5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go @@ -0,0 +1,414 @@ +package fs + +import ( + "testing" + + "github.com/docker/libcontainer/cgroups" +) + +const ( + sectorsRecursiveContents = `8:0 1024` + serviceBytesRecursiveContents = `8:0 Read 100 +8:0 Write 200 +8:0 Sync 300 +8:0 Async 500 +8:0 Total 500 +Total 500` + servicedRecursiveContents = `8:0 Read 10 +8:0 Write 40 +8:0 Sync 20 +8:0 Async 30 +8:0 Total 50 +Total 50` + queuedRecursiveContents = `8:0 Read 1 +8:0 Write 4 +8:0 Sync 2 +8:0 Async 3 +8:0 Total 5 +Total 5` + serviceTimeRecursiveContents = `8:0 Read 173959 +8:0 Write 0 +8:0 Sync 0 +8:0 Async 173959 +8:0 Total 17395 +Total 17395` + waitTimeRecursiveContents = `8:0 Read 15571 +8:0 Write 0 +8:0 Sync 0 +8:0 Async 15571 +8:0 Total 15571` + mergedRecursiveContents = `8:0 Read 5 +8:0 Write 10 +8:0 Sync 0 +8:0 Async 0 +8:0 Total 15 +Total 15` + timeRecursiveContents = `8:0 8` + throttleServiceBytes = `8:0 Read 11030528 +8:0 Write 23 +8:0 Sync 42 +8:0 Async 11030528 +8:0 Total 11030528 +252:0 Read 11030528 +252:0 Write 23 +252:0 Sync 42 +252:0 Async 11030528 +252:0 Total 11030528 +Total 22061056` + throttleServiced = `8:0 Read 164 +8:0 Write 23 +8:0 Sync 42 +8:0 Async 164 +8:0 Total 164 +252:0 Read 164 +252:0 Write 23 +252:0 Sync 42 +252:0 Async 164 +252:0 Total 164 +Total 328` +) + +func appendBlkioStatEntry(blkioStatEntries *[]cgroups.BlkioStatEntry, major, minor, value uint64, op string) { + *blkioStatEntries = append(*blkioStatEntries, cgroups.BlkioStatEntry{Major: major, Minor: minor, Value: value, Op: op}) +} + +func TestBlkioStats(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } + + // Verify expected stats. + expectedStats := cgroups.BlkioStats{} + appendBlkioStatEntry(&expectedStats.SectorsRecursive, 8, 0, 1024, "") + + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 100, "Read") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 200, "Write") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 300, "Sync") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Async") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Total") + + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 10, "Read") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 40, "Write") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 20, "Sync") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 30, "Async") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 50, "Total") + + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 1, "Read") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 4, "Write") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 2, "Sync") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 3, "Async") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 5, "Total") + + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Read") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Write") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Sync") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Async") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 17395, "Total") + + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Read") + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Write") + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Sync") + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Async") + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Total") + + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 5, "Read") + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 10, "Write") + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Sync") + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Async") + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 15, "Total") + + appendBlkioStatEntry(&expectedStats.IoTimeRecursive, 8, 0, 8, "") + + expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats) +} + +func TestBlkioStatsNoSectorsFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoServiceBytesFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoServicedFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoQueuedFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoServiceTimeFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoWaitTimeFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoMergedFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoTimeFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsUnexpectedNumberOfFields(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": "8:0 Read 100 100", + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected to fail, but did not") + } +} + +func TestBlkioStatsUnexpectedFieldType(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": "8:0 Read Write", + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected to fail, but did not") + } +} + +func TestNonCFQBlkioStats(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": "", + "blkio.io_serviced_recursive": "", + "blkio.io_queued_recursive": "", + "blkio.sectors_recursive": "", + "blkio.io_service_time_recursive": "", + "blkio.io_wait_time_recursive": "", + "blkio.io_merged_recursive": "", + "blkio.time_recursive": "", + "blkio.throttle.io_service_bytes": throttleServiceBytes, + "blkio.throttle.io_serviced": throttleServiced, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } + + // Verify expected stats. + expectedStats := cgroups.BlkioStats{} + + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Read") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 23, "Write") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 42, "Sync") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Async") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Total") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Read") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 23, "Write") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 42, "Sync") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Async") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Total") + + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Read") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 23, "Write") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 42, "Sync") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Async") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Total") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Read") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 23, "Write") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 42, "Sync") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Async") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Total") + + expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu.go new file mode 100644 index 00000000000..efac9ed16ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu.go @@ -0,0 +1,72 @@ +package fs + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + + "github.com/docker/libcontainer/cgroups" +) + +type CpuGroup struct { +} + +func (s *CpuGroup) Set(d *data) error { + // We always want to join the cpu group, to allow fair cpu scheduling + // on a container basis + dir, err := d.join("cpu") + if err != nil { + return err + } + if d.c.CpuShares != 0 { + if err := writeFile(dir, "cpu.shares", strconv.FormatInt(d.c.CpuShares, 10)); err != nil { + return err + } + } + if d.c.CpuPeriod != 0 { + if err := writeFile(dir, "cpu.cfs_period_us", strconv.FormatInt(d.c.CpuPeriod, 10)); err != nil { + return err + } + } + if d.c.CpuQuota != 0 { + if err := writeFile(dir, "cpu.cfs_quota_us", strconv.FormatInt(d.c.CpuQuota, 10)); err != nil { + return err + } + } + return nil +} + +func (s *CpuGroup) Remove(d *data) error { + return removePath(d.path("cpu")) +} + +func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error { + f, err := os.Open(filepath.Join(path, "cpu.stat")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + t, v, err := getCgroupParamKeyValue(sc.Text()) + if err != nil { + return err + } + switch t { + case "nr_periods": + stats.CpuStats.ThrottlingData.Periods = v + + case "nr_throttled": + stats.CpuStats.ThrottlingData.ThrottledPeriods = v + + case "throttled_time": + stats.CpuStats.ThrottlingData.ThrottledTime = v + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu_test.go new file mode 100644 index 00000000000..2470e689562 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu_test.go @@ -0,0 +1,69 @@ +package fs + +import ( + "fmt" + "testing" + + "github.com/docker/libcontainer/cgroups" +) + +func TestCpuStats(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + + const ( + kNrPeriods = 2000 + kNrThrottled = 200 + kThrottledTime = uint64(18446744073709551615) + ) + + cpuStatContent := fmt.Sprintf("nr_periods %d\n nr_throttled %d\n throttled_time %d\n", + kNrPeriods, kNrThrottled, kThrottledTime) + helper.writeFileContents(map[string]string{ + "cpu.stat": cpuStatContent, + }) + + cpu := &CpuGroup{} + actualStats := *cgroups.NewStats() + err := cpu.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } + + expectedStats := cgroups.ThrottlingData{ + Periods: kNrPeriods, + ThrottledPeriods: kNrThrottled, + ThrottledTime: kThrottledTime} + + expectThrottlingDataEquals(t, expectedStats, actualStats.CpuStats.ThrottlingData) +} + +func TestNoCpuStatFile(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + + cpu := &CpuGroup{} + actualStats := *cgroups.NewStats() + err := cpu.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal("Expected not to fail, but did") + } +} + +func TestInvalidCpuStat(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + cpuStatContent := `nr_periods 2000 + nr_throttled 200 + throttled_time fortytwo` + helper.writeFileContents(map[string]string{ + "cpu.stat": cpuStatContent, + }) + + cpu := &CpuGroup{} + actualStats := *cgroups.NewStats() + err := cpu.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failed stat parsing.") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuacct.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuacct.go new file mode 100644 index 00000000000..14b55ccd4e0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuacct.go @@ -0,0 +1,110 @@ +package fs + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/system" +) + +const ( + cgroupCpuacctStat = "cpuacct.stat" + nanosecondsInSecond = 1000000000 +) + +var clockTicks = uint64(system.GetClockTicks()) + +type CpuacctGroup struct { +} + +func (s *CpuacctGroup) Set(d *data) error { + // we just want to join this group even though we don't set anything + if _, err := d.join("cpuacct"); err != nil && !cgroups.IsNotFound(err) { + return err + } + + return nil +} + +func (s *CpuacctGroup) Remove(d *data) error { + return removePath(d.path("cpuacct")) +} + +func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error { + userModeUsage, kernelModeUsage, err := getCpuUsageBreakdown(path) + if err != nil { + return err + } + + totalUsage, err := getCgroupParamUint(path, "cpuacct.usage") + if err != nil { + return err + } + + percpuUsage, err := getPercpuUsage(path) + if err != nil { + return err + } + + stats.CpuStats.CpuUsage.TotalUsage = totalUsage + stats.CpuStats.CpuUsage.PercpuUsage = percpuUsage + stats.CpuStats.CpuUsage.UsageInUsermode = userModeUsage + stats.CpuStats.CpuUsage.UsageInKernelmode = kernelModeUsage + return nil +} + +// Returns user and kernel usage breakdown in nanoseconds. +func getCpuUsageBreakdown(path string) (uint64, uint64, error) { + userModeUsage := uint64(0) + kernelModeUsage := uint64(0) + const ( + userField = "user" + systemField = "system" + ) + + // Expected format: + // user + // system + data, err := ioutil.ReadFile(filepath.Join(path, cgroupCpuacctStat)) + if err != nil { + return 0, 0, err + } + fields := strings.Fields(string(data)) + if len(fields) != 4 { + return 0, 0, fmt.Errorf("failure - %s is expected to have 4 fields", filepath.Join(path, cgroupCpuacctStat)) + } + if fields[0] != userField { + return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[0], cgroupCpuacctStat, userField) + } + if fields[2] != systemField { + return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[2], cgroupCpuacctStat, systemField) + } + if userModeUsage, err = strconv.ParseUint(fields[1], 10, 64); err != nil { + return 0, 0, err + } + if kernelModeUsage, err = strconv.ParseUint(fields[3], 10, 64); err != nil { + return 0, 0, err + } + + return (userModeUsage * nanosecondsInSecond) / clockTicks, (kernelModeUsage * nanosecondsInSecond) / clockTicks, nil +} + +func getPercpuUsage(path string) ([]uint64, error) { + percpuUsage := []uint64{} + data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.usage_percpu")) + if err != nil { + return percpuUsage, err + } + for _, value := range strings.Fields(string(data)) { + value, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return percpuUsage, fmt.Errorf("Unable to convert param value to uint64: %s", err) + } + percpuUsage = append(percpuUsage, value) + } + return percpuUsage, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go new file mode 100644 index 00000000000..ff67a53e87d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go @@ -0,0 +1,122 @@ +package fs + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + "github.com/docker/libcontainer/cgroups" +) + +type CpusetGroup struct { +} + +func (s *CpusetGroup) Set(d *data) error { + dir, err := d.path("cpuset") + if err != nil { + return err + } + return s.SetDir(dir, d.c.CpusetCpus, d.c.CpusetMems, d.pid) +} + +func (s *CpusetGroup) Remove(d *data) error { + return removePath(d.path("cpuset")) +} + +func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error { + return nil +} + +func (s *CpusetGroup) SetDir(dir, cpus string, mems string, pid int) error { + if err := s.ensureParent(dir); err != nil { + return err + } + + // because we are not using d.join we need to place the pid into the procs file + // unlike the other subsystems + if err := writeFile(dir, "cgroup.procs", strconv.Itoa(pid)); err != nil { + return err + } + + // If we don't use --cpuset-xxx, the default value inherit from parent cgroup + // is set in s.ensureParent, otherwise, use the value we set + if cpus != "" { + if err := writeFile(dir, "cpuset.cpus", cpus); err != nil { + return err + } + } + if mems != "" { + if err := writeFile(dir, "cpuset.mems", mems); err != nil { + return err + } + } + + return nil +} + +func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) { + if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil { + return + } + if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems")); err != nil { + return + } + return cpus, mems, nil +} + +// ensureParent ensures that the parent directory of current is created +// with the proper cpus and mems files copied from it's parent if the values +// are a file with a new line char +func (s *CpusetGroup) ensureParent(current string) error { + parent := filepath.Dir(current) + + if _, err := os.Stat(parent); err != nil { + if !os.IsNotExist(err) { + return err + } + + if err := s.ensureParent(parent); err != nil { + return err + } + } + + if err := os.MkdirAll(current, 0755); err != nil && !os.IsExist(err) { + return err + } + return s.copyIfNeeded(current, parent) +} + +// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent +// directory to the current directory if the file's contents are 0 +func (s *CpusetGroup) copyIfNeeded(current, parent string) error { + var ( + err error + currentCpus, currentMems []byte + parentCpus, parentMems []byte + ) + + if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil { + return err + } + if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil { + return err + } + + if s.isEmpty(currentCpus) { + if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil { + return err + } + } + if s.isEmpty(currentMems) { + if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil { + return err + } + } + return nil +} + +func (s *CpusetGroup) isEmpty(b []byte) bool { + return len(bytes.Trim(b, "\n")) == 0 +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/devices.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/devices.go new file mode 100644 index 00000000000..98d5d2d7dda --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/devices.go @@ -0,0 +1,34 @@ +package fs + +import "github.com/docker/libcontainer/cgroups" + +type DevicesGroup struct { +} + +func (s *DevicesGroup) Set(d *data) error { + dir, err := d.join("devices") + if err != nil { + return err + } + + if !d.c.AllowAllDevices { + if err := writeFile(dir, "devices.deny", "a"); err != nil { + return err + } + + for _, dev := range d.c.AllowedDevices { + if err := writeFile(dir, "devices.allow", dev.GetCgroupAllowString()); err != nil { + return err + } + } + } + return nil +} + +func (s *DevicesGroup) Remove(d *data) error { + return removePath(d.path("devices")) +} + +func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/freezer.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/freezer.go new file mode 100644 index 00000000000..c6b677fa951 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/freezer.go @@ -0,0 +1,50 @@ +package fs + +import ( + "strings" + "time" + + "github.com/docker/libcontainer/cgroups" +) + +type FreezerGroup struct { +} + +func (s *FreezerGroup) Set(d *data) error { + switch d.c.Freezer { + case cgroups.Frozen, cgroups.Thawed: + dir, err := d.path("freezer") + if err != nil { + return err + } + + if err := writeFile(dir, "freezer.state", string(d.c.Freezer)); err != nil { + return err + } + + for { + state, err := readFile(dir, "freezer.state") + if err != nil { + return err + } + if strings.TrimSpace(state) == string(d.c.Freezer) { + break + } + time.Sleep(1 * time.Millisecond) + } + default: + if _, err := d.join("freezer"); err != nil && !cgroups.IsNotFound(err) { + return err + } + } + + return nil +} + +func (s *FreezerGroup) Remove(d *data) error { + return removePath(d.path("freezer")) +} + +func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory.go new file mode 100644 index 00000000000..01713fd7906 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory.go @@ -0,0 +1,98 @@ +package fs + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strconv" + + "github.com/docker/libcontainer/cgroups" +) + +type MemoryGroup struct { +} + +func (s *MemoryGroup) Set(d *data) error { + dir, err := d.join("memory") + // only return an error for memory if it was specified + if err != nil && (d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0) { + return err + } + defer func() { + if err != nil { + os.RemoveAll(dir) + } + }() + + // Only set values if some config was specified. + if d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0 { + if d.c.Memory != 0 { + if err := writeFile(dir, "memory.limit_in_bytes", strconv.FormatInt(d.c.Memory, 10)); err != nil { + return err + } + } + if d.c.MemoryReservation != 0 { + if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(d.c.MemoryReservation, 10)); err != nil { + return err + } + } + // By default, MemorySwap is set to twice the size of RAM. + // If you want to omit MemorySwap, set it to '-1'. + if d.c.MemorySwap == 0 { + if err := writeFile(dir, "memory.memsw.limit_in_bytes", strconv.FormatInt(d.c.Memory*2, 10)); err != nil { + return err + } + } + if d.c.MemorySwap > 0 { + if err := writeFile(dir, "memory.memsw.limit_in_bytes", strconv.FormatInt(d.c.MemorySwap, 10)); err != nil { + return err + } + } + } + return nil +} + +func (s *MemoryGroup) Remove(d *data) error { + return removePath(d.path("memory")) +} + +func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error { + // Set stats from memory.stat. + statsFile, err := os.Open(filepath.Join(path, "memory.stat")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer statsFile.Close() + + sc := bufio.NewScanner(statsFile) + for sc.Scan() { + t, v, err := getCgroupParamKeyValue(sc.Text()) + if err != nil { + return fmt.Errorf("failed to parse memory.stat (%q) - %v", sc.Text(), err) + } + stats.MemoryStats.Stats[t] = v + } + + // Set memory usage and max historical usage. + value, err := getCgroupParamUint(path, "memory.usage_in_bytes") + if err != nil { + return fmt.Errorf("failed to parse memory.usage_in_bytes - %v", err) + } + stats.MemoryStats.Usage = value + value, err = getCgroupParamUint(path, "memory.max_usage_in_bytes") + if err != nil { + return fmt.Errorf("failed to parse memory.max_usage_in_bytes - %v", err) + } + stats.MemoryStats.MaxUsage = value + value, err = getCgroupParamUint(path, "memory.failcnt") + if err != nil { + return fmt.Errorf("failed to parse memory.failcnt - %v", err) + } + stats.MemoryStats.Failcnt = value + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go new file mode 100644 index 00000000000..a21cec75c01 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go @@ -0,0 +1,134 @@ +package fs + +import ( + "testing" + + "github.com/docker/libcontainer/cgroups" +) + +const ( + memoryStatContents = `cache 512 +rss 1024` + memoryUsageContents = "2048\n" + memoryMaxUsageContents = "4096\n" + memoryFailcnt = "100\n" +) + +func TestMemoryStats(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + "memory.failcnt": memoryFailcnt, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } + expectedStats := cgroups.MemoryStats{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Stats: map[string]uint64{"cache": 512, "rss": 1024}} + expectMemoryStatEquals(t, expectedStats, actualStats.MemoryStats) +} + +func TestMemoryStatsNoStatFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } +} + +func TestMemoryStatsNoUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsNoMaxUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": memoryUsageContents, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsBadStatFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": "rss rss", + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsBadUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": "bad", + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsBadMaxUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": "bad", + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/perf_event.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/perf_event.go new file mode 100644 index 00000000000..813274d8cbf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/perf_event.go @@ -0,0 +1,24 @@ +package fs + +import ( + "github.com/docker/libcontainer/cgroups" +) + +type PerfEventGroup struct { +} + +func (s *PerfEventGroup) Set(d *data) error { + // we just want to join this group even though we don't set anything + if _, err := d.join("perf_event"); err != nil && !cgroups.IsNotFound(err) { + return err + } + return nil +} + +func (s *PerfEventGroup) Remove(d *data) error { + return removePath(d.path("perf_event")) +} + +func (s *PerfEventGroup) GetStats(path string, stats *cgroups.Stats) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go new file mode 100644 index 00000000000..c55ba938cbc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go @@ -0,0 +1,97 @@ +package fs + +import ( + "fmt" + "log" + "testing" + + "github.com/docker/libcontainer/cgroups" +) + +func blkioStatEntryEquals(expected, actual []cgroups.BlkioStatEntry) error { + if len(expected) != len(actual) { + return fmt.Errorf("blkioStatEntries length do not match") + } + for i, expValue := range expected { + actValue := actual[i] + if expValue != actValue { + return fmt.Errorf("Expected blkio stat entry %v but found %v", expValue, actValue) + } + } + return nil +} + +func expectBlkioStatsEquals(t *testing.T, expected, actual cgroups.BlkioStats) { + if err := blkioStatEntryEquals(expected.IoServiceBytesRecursive, actual.IoServiceBytesRecursive); err != nil { + log.Printf("blkio IoServiceBytesRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoServicedRecursive, actual.IoServicedRecursive); err != nil { + log.Printf("blkio IoServicedRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoQueuedRecursive, actual.IoQueuedRecursive); err != nil { + log.Printf("blkio IoQueuedRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.SectorsRecursive, actual.SectorsRecursive); err != nil { + log.Printf("blkio SectorsRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoServiceTimeRecursive, actual.IoServiceTimeRecursive); err != nil { + log.Printf("blkio IoServiceTimeRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoWaitTimeRecursive, actual.IoWaitTimeRecursive); err != nil { + log.Printf("blkio IoWaitTimeRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoMergedRecursive, actual.IoMergedRecursive); err != nil { + log.Printf("blkio IoMergedRecursive do not match - %v vs %v\n", expected.IoMergedRecursive, actual.IoMergedRecursive) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoTimeRecursive, actual.IoTimeRecursive); err != nil { + log.Printf("blkio IoTimeRecursive do not match - %s\n", err) + t.Fail() + } +} + +func expectThrottlingDataEquals(t *testing.T, expected, actual cgroups.ThrottlingData) { + if expected != actual { + log.Printf("Expected throttling data %v but found %v\n", expected, actual) + t.Fail() + } +} + +func expectMemoryStatEquals(t *testing.T, expected, actual cgroups.MemoryStats) { + if expected.Usage != actual.Usage { + log.Printf("Expected memory usage %d but found %d\n", expected.Usage, actual.Usage) + t.Fail() + } + if expected.MaxUsage != actual.MaxUsage { + log.Printf("Expected memory max usage %d but found %d\n", expected.MaxUsage, actual.MaxUsage) + t.Fail() + } + for key, expValue := range expected.Stats { + actValue, ok := actual.Stats[key] + if !ok { + log.Printf("Expected memory stat key %s not found\n", key) + t.Fail() + } + if expValue != actValue { + log.Printf("Expected memory stat value %d but found %d\n", expValue, actValue) + t.Fail() + } + } + if expected.Failcnt != actual.Failcnt { + log.Printf("Expected memory failcnt %d but found %d\n", expected.Failcnt, actual.Failcnt) + t.Fail() + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/util_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/util_test.go new file mode 100644 index 00000000000..548870a8a30 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/util_test.go @@ -0,0 +1,60 @@ +/* +Utility for testing cgroup operations. + +Creates a mock of the cgroup filesystem for the duration of the test. +*/ +package fs + +import ( + "fmt" + "io/ioutil" + "os" + "testing" +) + +type cgroupTestUtil struct { + // data to use in tests. + CgroupData *data + + // Path to the mock cgroup directory. + CgroupPath string + + // Temporary directory to store mock cgroup filesystem. + tempDir string + t *testing.T +} + +// Creates a new test util for the specified subsystem +func NewCgroupTestUtil(subsystem string, t *testing.T) *cgroupTestUtil { + d := &data{} + tempDir, err := ioutil.TempDir("", fmt.Sprintf("%s_cgroup_test", subsystem)) + if err != nil { + t.Fatal(err) + } + d.root = tempDir + testCgroupPath, err := d.path(subsystem) + if err != nil { + t.Fatal(err) + } + + // Ensure the full mock cgroup path exists. + err = os.MkdirAll(testCgroupPath, 0755) + if err != nil { + t.Fatal(err) + } + return &cgroupTestUtil{CgroupData: d, CgroupPath: testCgroupPath, tempDir: tempDir, t: t} +} + +func (c *cgroupTestUtil) cleanup() { + os.RemoveAll(c.tempDir) +} + +// Write the specified contents on the mock of the specified cgroup files. +func (c *cgroupTestUtil) writeFileContents(fileContents map[string]string) { + for file, contents := range fileContents { + err := writeFile(c.CgroupPath, file, contents) + if err != nil { + c.t.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils.go new file mode 100644 index 00000000000..f37a3a485a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils.go @@ -0,0 +1,62 @@ +package fs + +import ( + "errors" + "fmt" + "io/ioutil" + "path/filepath" + "strconv" + "strings" +) + +var ( + ErrNotSupportStat = errors.New("stats are not supported for subsystem") + ErrNotValidFormat = errors.New("line is not a valid key value format") +) + +// Saturates negative values at zero and returns a uint64. +// Due to kernel bugs, some of the memory cgroup stats can be negative. +func parseUint(s string, base, bitSize int) (uint64, error) { + value, err := strconv.ParseUint(s, base, bitSize) + if err != nil { + intValue, intErr := strconv.ParseInt(s, base, bitSize) + // 1. Handle negative values greater than MinInt64 (and) + // 2. Handle negative values lesser than MinInt64 + if intErr == nil && intValue < 0 { + return 0, nil + } else if intErr != nil && intErr.(*strconv.NumError).Err == strconv.ErrRange && intValue < 0 { + return 0, nil + } + + return value, err + } + + return value, nil +} + +// Parses a cgroup param and returns as name, value +// i.e. "io_service_bytes 1234" will return as io_service_bytes, 1234 +func getCgroupParamKeyValue(t string) (string, uint64, error) { + parts := strings.Fields(t) + switch len(parts) { + case 2: + value, err := parseUint(parts[1], 10, 64) + if err != nil { + return "", 0, fmt.Errorf("Unable to convert param value (%q) to uint64: %v", parts[1], err) + } + + return parts[0], value, nil + default: + return "", 0, ErrNotValidFormat + } +} + +// Gets a single uint64 value from the specified cgroup file. +func getCgroupParamUint(cgroupPath, cgroupFile string) (uint64, error) { + contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile)) + if err != nil { + return 0, err + } + + return parseUint(strings.TrimSpace(string(contents)), 10, 64) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go new file mode 100644 index 00000000000..8b19a84b279 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go @@ -0,0 +1,95 @@ +package fs + +import ( + "io/ioutil" + "math" + "os" + "path/filepath" + "strconv" + "testing" +) + +const ( + cgroupFile = "cgroup.file" + floatValue = 2048.0 + floatString = "2048" +) + +func TestGetCgroupParamsInt(t *testing.T) { + // Setup tempdir. + tempDir, err := ioutil.TempDir("", "cgroup_utils_test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + tempFile := filepath.Join(tempDir, cgroupFile) + + // Success. + err = ioutil.WriteFile(tempFile, []byte(floatString), 0755) + if err != nil { + t.Fatal(err) + } + value, err := getCgroupParamUint(tempDir, cgroupFile) + if err != nil { + t.Fatal(err) + } else if value != floatValue { + t.Fatalf("Expected %d to equal %f", value, floatValue) + } + + // Success with new line. + err = ioutil.WriteFile(tempFile, []byte(floatString+"\n"), 0755) + if err != nil { + t.Fatal(err) + } + value, err = getCgroupParamUint(tempDir, cgroupFile) + if err != nil { + t.Fatal(err) + } else if value != floatValue { + t.Fatalf("Expected %d to equal %f", value, floatValue) + } + + // Success with negative values + err = ioutil.WriteFile(tempFile, []byte("-12345"), 0755) + if err != nil { + t.Fatal(err) + } + value, err = getCgroupParamUint(tempDir, cgroupFile) + if err != nil { + t.Fatal(err) + } else if value != 0 { + t.Fatalf("Expected %d to equal %d", value, 0) + } + + // Success with negative values lesser than min int64 + s := strconv.FormatFloat(math.MinInt64, 'f', -1, 64) + err = ioutil.WriteFile(tempFile, []byte(s), 0755) + if err != nil { + t.Fatal(err) + } + value, err = getCgroupParamUint(tempDir, cgroupFile) + if err != nil { + t.Fatal(err) + } else if value != 0 { + t.Fatalf("Expected %d to equal %d", value, 0) + } + + // Not a float. + err = ioutil.WriteFile(tempFile, []byte("not-a-float"), 0755) + if err != nil { + t.Fatal(err) + } + _, err = getCgroupParamUint(tempDir, cgroupFile) + if err == nil { + t.Fatal("Expecting error, got none") + } + + // Unknown file. + err = os.Remove(tempFile) + if err != nil { + t.Fatal(err) + } + _, err = getCgroupParamUint(tempDir, cgroupFile) + if err == nil { + t.Fatal("Expecting error, got none") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/stats.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/stats.go new file mode 100644 index 00000000000..dc5dbb3c21b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/stats.go @@ -0,0 +1,73 @@ +package cgroups + +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods,omitempty"` + // Number of periods when the container hit its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time,omitempty"` +} + +// All CPU stats are aggregate since container inception. +type CpuUsage struct { + // Total CPU time consumed. + // Units: nanoseconds. + TotalUsage uint64 `json:"total_usage,omitempty"` + // Total CPU time consumed per core. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + // Time spent by tasks of the cgroup in kernel mode. + // Units: nanoseconds. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + // Time spent by tasks of the cgroup in user mode. + // Units: nanoseconds. + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +type CpuStats struct { + CpuUsage CpuUsage `json:"cpu_usage,omitempty"` + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +type MemoryStats struct { + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt"` +} + +type BlkioStatEntry struct { + Major uint64 `json:"major,omitempty"` + Minor uint64 `json:"minor,omitempty"` + Op string `json:"op,omitempty"` + Value uint64 `json:"value,omitempty"` +} + +type BlkioStats struct { + // number of bytes tranferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive,omitempty"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive,omitempty"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"` +} + +type Stats struct { + CpuStats CpuStats `json:"cpu_stats,omitempty"` + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` +} + +func NewStats() *Stats { + memoryStats := MemoryStats{Stats: make(map[string]uint64)} + return &Stats{MemoryStats: memoryStats} +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go new file mode 100644 index 00000000000..4b9a2f5b74b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go @@ -0,0 +1,29 @@ +// +build !linux + +package systemd + +import ( + "fmt" + + "github.com/docker/libcontainer/cgroups" +) + +func UseSystemd() bool { + return false +} + +func Apply(c *cgroups.Cgroup, pid int) (map[string]string, error) { + return nil, fmt.Errorf("Systemd not supported") +} + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + return nil, fmt.Errorf("Systemd not supported") +} + +func ApplyDevices(c *cgroups.Cgroup, pid int) error { + return fmt.Errorf("Systemd not supported") +} + +func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { + return fmt.Errorf("Systemd not supported") +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go new file mode 100644 index 00000000000..41dce3117d4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go @@ -0,0 +1,317 @@ +// +build linux + +package systemd + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + systemd "github.com/coreos/go-systemd/dbus" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/cgroups/fs" + "github.com/godbus/dbus" +) + +type systemdCgroup struct { + cgroup *cgroups.Cgroup +} + +type subsystem interface { + GetStats(string, *cgroups.Stats) error +} + +var ( + connLock sync.Mutex + theConn *systemd.Conn + hasStartTransientUnit bool +) + +func newProp(name string, units interface{}) systemd.Property { + return systemd.Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +func UseSystemd() bool { + s, err := os.Stat("/run/systemd/system") + if err != nil || !s.IsDir() { + return false + } + + connLock.Lock() + defer connLock.Unlock() + + if theConn == nil { + var err error + theConn, err = systemd.New() + if err != nil { + return false + } + + // Assume we have StartTransientUnit + hasStartTransientUnit = true + + // But if we get UnknownMethod error we don't + if _, err := theConn.StartTransientUnit("test.scope", "invalid"); err != nil { + if dbusError, ok := err.(dbus.Error); ok { + if dbusError.Name == "org.freedesktop.DBus.Error.UnknownMethod" { + hasStartTransientUnit = false + } + } + } + } + return hasStartTransientUnit +} + +func getIfaceForUnit(unitName string) string { + if strings.HasSuffix(unitName, ".scope") { + return "Scope" + } + if strings.HasSuffix(unitName, ".service") { + return "Service" + } + return "Unit" +} + +func Apply(c *cgroups.Cgroup, pid int) (map[string]string, error) { + var ( + unitName = getUnitName(c) + slice = "system.slice" + properties []systemd.Property + res = &systemdCgroup{} + ) + + res.cgroup = c + + if c.Slice != "" { + slice = c.Slice + } + + properties = append(properties, + systemd.PropSlice(slice), + systemd.PropDescription("docker container "+c.Name), + newProp("PIDs", []uint32{uint32(pid)}), + ) + + // Always enable accounting, this gets us the same behaviour as the fs implementation, + // plus the kernel has some problems with joining the memory cgroup at a later time. + properties = append(properties, + newProp("MemoryAccounting", true), + newProp("CPUAccounting", true), + newProp("BlockIOAccounting", true)) + + if c.Memory != 0 { + properties = append(properties, + newProp("MemoryLimit", uint64(c.Memory))) + } + // TODO: MemoryReservation and MemorySwap not available in systemd + + if c.CpuShares != 0 { + properties = append(properties, + newProp("CPUShares", uint64(c.CpuShares))) + } + + if _, err := theConn.StartTransientUnit(unitName, "replace", properties...); err != nil { + return nil, err + } + + if !c.AllowAllDevices { + if err := joinDevices(c, pid); err != nil { + return nil, err + } + } + + // -1 disables memorySwap + if c.MemorySwap >= 0 && (c.Memory != 0 || c.MemorySwap > 0) { + if err := joinMemory(c, pid); err != nil { + return nil, err + } + + } + + // we need to manually join the freezer and cpuset cgroup in systemd + // because it does not currently support it via the dbus api. + if err := joinFreezer(c, pid); err != nil { + return nil, err + } + + if err := joinCpuset(c, pid); err != nil { + return nil, err + } + + paths := make(map[string]string) + for _, sysname := range []string{ + "devices", + "memory", + "cpu", + "cpuset", + "cpuacct", + "blkio", + "perf_event", + "freezer", + } { + subsystemPath, err := getSubsystemPath(res.cgroup, sysname) + if err != nil { + // Don't fail if a cgroup hierarchy was not found, just skip this subsystem + if cgroups.IsNotFound(err) { + continue + } + return nil, err + } + paths[sysname] = subsystemPath + } + return paths, nil +} + +func writeFile(dir, file, data string) error { + return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) +} + +func joinFreezer(c *cgroups.Cgroup, pid int) error { + path, err := getSubsystemPath(c, "freezer") + if err != nil { + return err + } + + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return err + } + + return ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700) +} + +func getSubsystemPath(c *cgroups.Cgroup, subsystem string) (string, error) { + mountpoint, err := cgroups.FindCgroupMountpoint(subsystem) + if err != nil { + return "", err + } + + initPath, err := cgroups.GetInitCgroupDir(subsystem) + if err != nil { + return "", err + } + + slice := "system.slice" + if c.Slice != "" { + slice = c.Slice + } + + return filepath.Join(mountpoint, initPath, slice, getUnitName(c)), nil +} + +func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { + path, err := getSubsystemPath(c, "freezer") + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(path, "freezer.state"), []byte(state), 0); err != nil { + return err + } + for { + state_, err := ioutil.ReadFile(filepath.Join(path, "freezer.state")) + if err != nil { + return err + } + if string(state) == string(bytes.TrimSpace(state_)) { + break + } + time.Sleep(1 * time.Millisecond) + } + return nil +} + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + path, err := getSubsystemPath(c, "cpu") + if err != nil { + return nil, err + } + + return cgroups.ReadProcsFile(path) +} + +func getUnitName(c *cgroups.Cgroup) string { + return fmt.Sprintf("%s-%s.scope", c.Parent, c.Name) +} + +// Atm we can't use the systemd device support because of two missing things: +// * Support for wildcards to allow mknod on any device +// * Support for wildcards to allow /dev/pts support +// +// The second is available in more recent systemd as "char-pts", but not in e.g. v208 which is +// in wide use. When both these are availalable we will be able to switch, but need to keep the old +// implementation for backwards compat. +// +// Note: we can't use systemd to set up the initial limits, and then change the cgroup +// because systemd will re-write the device settings if it needs to re-apply the cgroup context. +// This happens at least for v208 when any sibling unit is started. +func joinDevices(c *cgroups.Cgroup, pid int) error { + path, err := getSubsystemPath(c, "devices") + if err != nil { + return err + } + + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return err + } + + if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil { + return err + } + + if err := writeFile(path, "devices.deny", "a"); err != nil { + return err + } + + for _, dev := range c.AllowedDevices { + if err := writeFile(path, "devices.allow", dev.GetCgroupAllowString()); err != nil { + return err + } + } + + return nil +} + +// Symmetrical public function to update device based cgroups. Also available +// in the fs implementation. +func ApplyDevices(c *cgroups.Cgroup, pid int) error { + return joinDevices(c, pid) +} + +func joinMemory(c *cgroups.Cgroup, pid int) error { + memorySwap := c.MemorySwap + + if memorySwap == 0 { + // By default, MemorySwap is set to twice the size of RAM. + memorySwap = c.Memory * 2 + } + + path, err := getSubsystemPath(c, "memory") + if err != nil { + return err + } + + return ioutil.WriteFile(filepath.Join(path, "memory.memsw.limit_in_bytes"), []byte(strconv.FormatInt(memorySwap, 10)), 0700) +} + +// systemd does not atm set up the cpuset controller, so we must manually +// join it. Additionally that is a very finicky controller where each +// level must have a full setup as the default for a new directory is "no cpus" +func joinCpuset(c *cgroups.Cgroup, pid int) error { + path, err := getSubsystemPath(c, "cpuset") + if err != nil { + return err + } + + s := &fs.CpusetGroup{} + + return s.SetDir(path, c.CpusetCpus, c.CpusetMems, pid) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/utils.go b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/utils.go new file mode 100644 index 00000000000..a360904cce6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/utils.go @@ -0,0 +1,223 @@ +package cgroups + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/pkg/mount" +) + +// https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt +func FindCgroupMountpoint(subsystem string) (string, error) { + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + + for _, mount := range mounts { + if mount.Fstype == "cgroup" { + for _, opt := range strings.Split(mount.VfsOpts, ",") { + if opt == subsystem { + return mount.Mountpoint, nil + } + } + } + } + + return "", NewNotFoundError(subsystem) +} + +type Mount struct { + Mountpoint string + Subsystems []string +} + +func (m Mount) GetThisCgroupDir() (string, error) { + if len(m.Subsystems) == 0 { + return "", fmt.Errorf("no subsystem for mount") + } + + return GetThisCgroupDir(m.Subsystems[0]) +} + +func GetCgroupMounts() ([]Mount, error) { + mounts, err := mount.GetMounts() + if err != nil { + return nil, err + } + + all, err := GetAllSubsystems() + if err != nil { + return nil, err + } + + allMap := make(map[string]bool) + for _, s := range all { + allMap[s] = true + } + + res := []Mount{} + for _, mount := range mounts { + if mount.Fstype == "cgroup" { + m := Mount{Mountpoint: mount.Mountpoint} + + for _, opt := range strings.Split(mount.VfsOpts, ",") { + if strings.HasPrefix(opt, "name=") { + m.Subsystems = append(m.Subsystems, opt) + } + if allMap[opt] { + m.Subsystems = append(m.Subsystems, opt) + } + } + res = append(res, m) + } + } + return res, nil +} + +// Returns all the cgroup subsystems supported by the kernel +func GetAllSubsystems() ([]string, error) { + f, err := os.Open("/proc/cgroups") + if err != nil { + return nil, err + } + defer f.Close() + + subsystems := []string{} + + s := bufio.NewScanner(f) + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + text := s.Text() + if text[0] != '#' { + parts := strings.Fields(text) + if len(parts) >= 4 && parts[3] != "0" { + subsystems = append(subsystems, parts[0]) + } + } + } + return subsystems, nil +} + +// Returns the relative path to the cgroup docker is running in. +func GetThisCgroupDir(subsystem string) (string, error) { + f, err := os.Open("/proc/self/cgroup") + if err != nil { + return "", err + } + defer f.Close() + + return ParseCgroupFile(subsystem, f) +} + +func GetInitCgroupDir(subsystem string) (string, error) { + f, err := os.Open("/proc/1/cgroup") + if err != nil { + return "", err + } + defer f.Close() + + return ParseCgroupFile(subsystem, f) +} + +func ReadProcsFile(dir string) ([]int, error) { + f, err := os.Open(filepath.Join(dir, "cgroup.procs")) + if err != nil { + return nil, err + } + defer f.Close() + + var ( + s = bufio.NewScanner(f) + out = []int{} + ) + + for s.Scan() { + if t := s.Text(); t != "" { + pid, err := strconv.Atoi(t) + if err != nil { + return nil, err + } + out = append(out, pid) + } + } + return out, nil +} + +func ParseCgroupFile(subsystem string, r io.Reader) (string, error) { + s := bufio.NewScanner(r) + + for s.Scan() { + if err := s.Err(); err != nil { + return "", err + } + + text := s.Text() + parts := strings.Split(text, ":") + + for _, subs := range strings.Split(parts[1], ",") { + if subs == subsystem { + return parts[2], nil + } + } + } + + return "", NewNotFoundError(subsystem) +} + +func PathExists(path string) bool { + if _, err := os.Stat(path); err != nil { + return false + } + return true +} + +func EnterPid(cgroupPaths map[string]string, pid int) error { + for _, path := range cgroupPaths { + if PathExists(path) { + if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), + []byte(strconv.Itoa(pid)), 0700); err != nil { + return err + } + } + } + return nil +} + +// RemovePaths iterates over the provided paths removing them. +// We trying to remove all paths five times with increasing delay between tries. +// If after all there are not removed cgroups - appropriate error will be +// returned. +func RemovePaths(paths map[string]string) (err error) { + delay := 10 * time.Millisecond + for i := 0; i < 5; i++ { + if i != 0 { + time.Sleep(delay) + delay *= 2 + } + for s, p := range paths { + os.RemoveAll(p) + // TODO: here probably should be logging + _, err := os.Stat(p) + // We need this strange way of checking cgroups existence because + // RemoveAll almost always returns error, even on already removed + // cgroups + if os.IsNotExist(err) { + delete(paths, s) + } + } + if len(paths) == 0 { + return nil + } + } + return fmt.Errorf("Failed to remove paths: %s", paths) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/config.go b/Godeps/_workspace/src/github.com/docker/libcontainer/config.go new file mode 100644 index 00000000000..7ab9a9a76a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/config.go @@ -0,0 +1,150 @@ +package libcontainer + +import ( + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/mount" + "github.com/docker/libcontainer/network" +) + +type MountConfig mount.MountConfig + +type Network network.Network + +type NamespaceType string + +const ( + NEWNET NamespaceType = "NEWNET" + NEWPID NamespaceType = "NEWPID" + NEWNS NamespaceType = "NEWNS" + NEWUTS NamespaceType = "NEWUTS" + NEWIPC NamespaceType = "NEWIPC" + NEWUSER NamespaceType = "NEWUSER" +) + +// Namespace defines configuration for each namespace. It specifies an +// alternate path that is able to be joined via setns. +type Namespace struct { + Type NamespaceType `json:"type"` + Path string `json:"path,omitempty"` +} + +type Namespaces []Namespace + +func (n *Namespaces) Remove(t NamespaceType) bool { + i := n.index(t) + if i == -1 { + return false + } + *n = append((*n)[:i], (*n)[i+1:]...) + return true +} + +func (n *Namespaces) Add(t NamespaceType, path string) { + i := n.index(t) + if i == -1 { + *n = append(*n, Namespace{Type: t, Path: path}) + return + } + (*n)[i].Path = path +} + +func (n *Namespaces) index(t NamespaceType) int { + for i, ns := range *n { + if ns.Type == t { + return i + } + } + return -1 +} + +func (n *Namespaces) Contains(t NamespaceType) bool { + return n.index(t) != -1 +} + +// Config defines configuration options for executing a process inside a contained environment. +type Config struct { + // Mount specific options. + MountConfig *MountConfig `json:"mount_config,omitempty"` + + // Pathname to container's root filesystem + RootFs string `json:"root_fs,omitempty"` + + // Hostname optionally sets the container's hostname if provided + Hostname string `json:"hostname,omitempty"` + + // User will set the uid and gid of the executing process running inside the container + User string `json:"user,omitempty"` + + // WorkingDir will change the processes current working directory inside the container's rootfs + WorkingDir string `json:"working_dir,omitempty"` + + // Env will populate the processes environment with the provided values + // Any values from the parent processes will be cleared before the values + // provided in Env are provided to the process + Env []string `json:"environment,omitempty"` + + // Tty when true will allocate a pty slave on the host for access by the container's process + // and ensure that it is mounted inside the container's rootfs + Tty bool `json:"tty,omitempty"` + + // Namespaces specifies the container's namespaces that it should setup when cloning the init process + // If a namespace is not provided that namespace is shared from the container's parent process + Namespaces Namespaces `json:"namespaces,omitempty"` + + // Capabilities specify the capabilities to keep when executing the process inside the container + // All capbilities not specified will be dropped from the processes capability mask + Capabilities []string `json:"capabilities,omitempty"` + + // Networks specifies the container's network setup to be created + Networks []*Network `json:"networks,omitempty"` + + // Routes can be specified to create entries in the route table as the container is started + Routes []*Route `json:"routes,omitempty"` + + // Cgroups specifies specific cgroup settings for the various subsystems that the container is + // placed into to limit the resources the container has available + Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` + + // AppArmorProfile specifies the profile to apply to the process running in the container and is + // change at the time the process is execed + AppArmorProfile string `json:"apparmor_profile,omitempty"` + + // ProcessLabel specifies the label to apply to the process running in the container. It is + // commonly used by selinux + ProcessLabel string `json:"process_label,omitempty"` + + // RestrictSys will remount /proc/sys, /sys, and mask over sysrq-trigger as well as /proc/irq and + // /proc/bus + RestrictSys bool `json:"restrict_sys,omitempty"` + + // Rlimits specifies the resource limits, such as max open files, to set in the container + // If Rlimits are not set, the container will inherit rlimits from the parent process + Rlimits []Rlimit `json:"rlimits,omitempty"` +} + +// Routes can be specified to create entries in the route table as the container is started +// +// All of destination, source, and gateway should be either IPv4 or IPv6. +// One of the three options must be present, and ommitted entries will use their +// IP family default for the route table. For IPv4 for example, setting the +// gateway to 1.2.3.4 and the interface to eth0 will set up a standard +// destination of 0.0.0.0(or *) when viewed in the route table. +type Route struct { + // Sets the destination and mask, should be a CIDR. Accepts IPv4 and IPv6 + Destination string `json:"destination,omitempty"` + + // Sets the source and mask, should be a CIDR. Accepts IPv4 and IPv6 + Source string `json:"source,omitempty"` + + // Sets the gateway. Accepts IPv4 and IPv6 + Gateway string `json:"gateway,omitempty"` + + // The device to set this route up for, for example: eth0 + InterfaceName string `json:"interface_name,omitempty"` +} + +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/config_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/config_test.go new file mode 100644 index 00000000000..f2287fc7414 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/config_test.go @@ -0,0 +1,172 @@ +package libcontainer + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/docker/libcontainer/devices" +) + +// Checks whether the expected capability is specified in the capabilities. +func contains(expected string, values []string) bool { + for _, v := range values { + if v == expected { + return true + } + } + return false +} + +func containsDevice(expected *devices.Device, values []*devices.Device) bool { + for _, d := range values { + if d.Path == expected.Path && + d.CgroupPermissions == expected.CgroupPermissions && + d.FileMode == expected.FileMode && + d.MajorNumber == expected.MajorNumber && + d.MinorNumber == expected.MinorNumber && + d.Type == expected.Type { + return true + } + } + return false +} + +func loadConfig(name string) (*Config, error) { + f, err := os.Open(filepath.Join("sample_configs", name)) + if err != nil { + return nil, err + } + defer f.Close() + + var container *Config + if err := json.NewDecoder(f).Decode(&container); err != nil { + return nil, err + } + + return container, nil +} + +func TestConfigJsonFormat(t *testing.T) { + container, err := loadConfig("attach_to_bridge.json") + if err != nil { + t.Fatal(err) + } + + if container.Hostname != "koye" { + t.Log("hostname is not set") + t.Fail() + } + + if !container.Tty { + t.Log("tty should be set to true") + t.Fail() + } + + if !container.Namespaces.Contains(NEWNET) { + t.Log("namespaces should contain NEWNET") + t.Fail() + } + + if container.Namespaces.Contains(NEWUSER) { + t.Log("namespaces should not contain NEWUSER") + t.Fail() + } + + if contains("SYS_ADMIN", container.Capabilities) { + t.Log("SYS_ADMIN should not be enabled in capabilities mask") + t.Fail() + } + + if !contains("MKNOD", container.Capabilities) { + t.Log("MKNOD should be enabled in capabilities mask") + t.Fail() + } + + if !contains("SYS_CHROOT", container.Capabilities) { + t.Log("capabilities mask should contain SYS_CHROOT") + t.Fail() + } + + for _, n := range container.Networks { + if n.Type == "veth" { + if n.Bridge != "docker0" { + t.Logf("veth bridge should be docker0 but received %q", n.Bridge) + t.Fail() + } + + if n.Address != "172.17.0.101/16" { + t.Logf("veth address should be 172.17.0.101/61 but received %q", n.Address) + t.Fail() + } + + if n.VethPrefix != "veth" { + t.Logf("veth prefix should be veth but received %q", n.VethPrefix) + t.Fail() + } + + if n.Gateway != "172.17.42.1" { + t.Logf("veth gateway should be 172.17.42.1 but received %q", n.Gateway) + t.Fail() + } + + if n.Mtu != 1500 { + t.Logf("veth mtu should be 1500 but received %d", n.Mtu) + t.Fail() + } + + break + } + } + + for _, d := range devices.DefaultSimpleDevices { + if !containsDevice(d, container.MountConfig.DeviceNodes) { + t.Logf("expected device configuration for %s", d.Path) + t.Fail() + } + } + + if !container.RestrictSys { + t.Log("expected restrict sys to be true") + t.Fail() + } +} + +func TestApparmorProfile(t *testing.T) { + container, err := loadConfig("apparmor.json") + if err != nil { + t.Fatal(err) + } + + if container.AppArmorProfile != "docker-default" { + t.Fatalf("expected apparmor profile to be docker-default but received %q", container.AppArmorProfile) + } +} + +func TestSelinuxLabels(t *testing.T) { + container, err := loadConfig("selinux.json") + if err != nil { + t.Fatal(err) + } + label := "system_u:system_r:svirt_lxc_net_t:s0:c164,c475" + + if container.ProcessLabel != label { + t.Fatalf("expected process label %q but received %q", label, container.ProcessLabel) + } + if container.MountConfig.MountLabel != label { + t.Fatalf("expected mount label %q but received %q", label, container.MountConfig.MountLabel) + } +} + +func TestRemoveNamespace(t *testing.T) { + ns := Namespaces{ + {Type: NEWNET}, + } + if !ns.Remove(NEWNET) { + t.Fatal("NEWNET was not removed") + } + if len(ns) != 0 { + t.Fatalf("namespaces should have 0 items but reports %d", len(ns)) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/console/console.go b/Godeps/_workspace/src/github.com/docker/libcontainer/console/console.go new file mode 100644 index 00000000000..438e670420b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/console/console.go @@ -0,0 +1,128 @@ +// +build linux + +package console + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "unsafe" + + "github.com/docker/libcontainer/label" +) + +// Setup initializes the proper /dev/console inside the rootfs path +func Setup(rootfs, consolePath, mountLabel string) error { + oldMask := syscall.Umask(0000) + defer syscall.Umask(oldMask) + + if err := os.Chmod(consolePath, 0600); err != nil { + return err + } + + if err := os.Chown(consolePath, 0, 0); err != nil { + return err + } + + if err := label.SetFileLabel(consolePath, mountLabel); err != nil { + return fmt.Errorf("set file label %s %s", consolePath, err) + } + + dest := filepath.Join(rootfs, "dev/console") + + f, err := os.Create(dest) + if err != nil && !os.IsExist(err) { + return fmt.Errorf("create %s %s", dest, err) + } + + if f != nil { + f.Close() + } + + if err := syscall.Mount(consolePath, dest, "bind", syscall.MS_BIND, ""); err != nil { + return fmt.Errorf("bind %s to %s %s", consolePath, dest, err) + } + + return nil +} + +func OpenAndDup(consolePath string) error { + slave, err := OpenTerminal(consolePath, syscall.O_RDWR) + if err != nil { + return fmt.Errorf("open terminal %s", err) + } + + if err := syscall.Dup2(int(slave.Fd()), 0); err != nil { + return err + } + + if err := syscall.Dup2(int(slave.Fd()), 1); err != nil { + return err + } + + return syscall.Dup2(int(slave.Fd()), 2) +} + +// Unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// Unlockpt should be called before opening the slave side of a pseudoterminal. +func Unlockpt(f *os.File) error { + var u int32 + + return Ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) +} + +// Ptsname retrieves the name of the first available pts for the given master. +func Ptsname(f *os.File) (string, error) { + var n int32 + + if err := Ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { + return "", err + } + + return fmt.Sprintf("/dev/pts/%d", n), nil +} + +// CreateMasterAndConsole will open /dev/ptmx on the host and retreive the +// pts name for use as the pty slave inside the container +func CreateMasterAndConsole() (*os.File, string, error) { + master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) + if err != nil { + return nil, "", err + } + + console, err := Ptsname(master) + if err != nil { + return nil, "", err + } + + if err := Unlockpt(master); err != nil { + return nil, "", err + } + + return master, console, nil +} + +// OpenPtmx opens /dev/ptmx, i.e. the PTY master. +func OpenPtmx() (*os.File, error) { + // O_NOCTTY and O_CLOEXEC are not present in os package so we use the syscall's one for all. + return os.OpenFile("/dev/ptmx", syscall.O_RDONLY|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) +} + +// OpenTerminal is a clone of os.OpenFile without the O_CLOEXEC +// used to open the pty slave inside the container namespace +func OpenTerminal(name string, flag int) (*os.File, error) { + r, e := syscall.Open(name, flag, 0) + if e != nil { + return nil, &os.PathError{Op: "open", Path: name, Err: e} + } + return os.NewFile(uintptr(r), name), nil +} + +func Ioctl(fd uintptr, flag, data uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/container.go b/Godeps/_workspace/src/github.com/docker/libcontainer/container.go new file mode 100644 index 00000000000..307e8cbcbb1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/container.go @@ -0,0 +1,78 @@ +/* +NOTE: The API is in flux and mainly not implemented. Proceed with caution until further notice. +*/ +package libcontainer + +// A libcontainer container object. +// +// Each container is thread-safe within the same process. Since a container can +// be destroyed by a separate process, any function may return that the container +// was not found. +type Container interface { + // Returns the ID of the container + ID() string + + // Returns the current run state of the container. + // + // Errors: + // ContainerDestroyed - Container no longer exists, + // SystemError - System error. + RunState() (*RunState, Error) + + // Returns the current config of the container. + Config() *Config + + // Start a process inside the container. Returns the PID of the new process (in the caller process's namespace) and a channel that will return the exit status of the process whenever it dies. + // + // Errors: + // ContainerDestroyed - Container no longer exists, + // ConfigInvalid - config is invalid, + // ContainerPaused - Container is paused, + // SystemError - System error. + Start(config *ProcessConfig) (pid int, exitChan chan int, err Error) + + // Destroys the container after killing all running processes. + // + // Any event registrations are removed before the container is destroyed. + // No error is returned if the container is already destroyed. + // + // Errors: + // SystemError - System error. + Destroy() Error + + // Returns the PIDs inside this container. The PIDs are in the namespace of the calling process. + // + // Errors: + // ContainerDestroyed - Container no longer exists, + // SystemError - System error. + // + // Some of the returned PIDs may no longer refer to processes in the Container, unless + // the Container state is PAUSED in which case every PID in the slice is valid. + Processes() ([]int, Error) + + // Returns statistics for the container. + // + // Errors: + // ContainerDestroyed - Container no longer exists, + // SystemError - System error. + Stats() (*ContainerStats, Error) + + // If the Container state is RUNNING or PAUSING, sets the Container state to PAUSING and pauses + // the execution of any user processes. Asynchronously, when the container finished being paused the + // state is changed to PAUSED. + // If the Container state is PAUSED, do nothing. + // + // Errors: + // ContainerDestroyed - Container no longer exists, + // SystemError - System error. + Pause() Error + + // If the Container state is PAUSED, resumes the execution of any user processes in the + // Container before setting the Container state to RUNNING. + // If the Container state is RUNNING, do nothing. + // + // Errors: + // ContainerDestroyed - Container no longer exists, + // SystemError - System error. + Resume() Error +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/devices/defaults.go b/Godeps/_workspace/src/github.com/docker/libcontainer/devices/defaults.go new file mode 100644 index 00000000000..e0ad0b08f86 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/devices/defaults.go @@ -0,0 +1,159 @@ +package devices + +var ( + // These are devices that are to be both allowed and created. + + DefaultSimpleDevices = []*Device{ + // /dev/null and zero + { + Path: "/dev/null", + Type: 'c', + MajorNumber: 1, + MinorNumber: 3, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + { + Path: "/dev/zero", + Type: 'c', + MajorNumber: 1, + MinorNumber: 5, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + + { + Path: "/dev/full", + Type: 'c', + MajorNumber: 1, + MinorNumber: 7, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + + // consoles and ttys + { + Path: "/dev/tty", + Type: 'c', + MajorNumber: 5, + MinorNumber: 0, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + + // /dev/urandom,/dev/random + { + Path: "/dev/urandom", + Type: 'c', + MajorNumber: 1, + MinorNumber: 9, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + { + Path: "/dev/random", + Type: 'c', + MajorNumber: 1, + MinorNumber: 8, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + } + + DefaultAllowedDevices = append([]*Device{ + // allow mknod for any device + { + Type: 'c', + MajorNumber: Wildcard, + MinorNumber: Wildcard, + CgroupPermissions: "m", + }, + { + Type: 'b', + MajorNumber: Wildcard, + MinorNumber: Wildcard, + CgroupPermissions: "m", + }, + + { + Path: "/dev/console", + Type: 'c', + MajorNumber: 5, + MinorNumber: 1, + CgroupPermissions: "rwm", + }, + { + Path: "/dev/tty0", + Type: 'c', + MajorNumber: 4, + MinorNumber: 0, + CgroupPermissions: "rwm", + }, + { + Path: "/dev/tty1", + Type: 'c', + MajorNumber: 4, + MinorNumber: 1, + CgroupPermissions: "rwm", + }, + // /dev/pts/ - pts namespaces are "coming soon" + { + Path: "", + Type: 'c', + MajorNumber: 136, + MinorNumber: Wildcard, + CgroupPermissions: "rwm", + }, + { + Path: "", + Type: 'c', + MajorNumber: 5, + MinorNumber: 2, + CgroupPermissions: "rwm", + }, + + // tuntap + { + Path: "", + Type: 'c', + MajorNumber: 10, + MinorNumber: 200, + CgroupPermissions: "rwm", + }, + + /*// fuse + { + Path: "", + Type: 'c', + MajorNumber: 10, + MinorNumber: 229, + CgroupPermissions: "rwm", + }, + + // rtc + { + Path: "", + Type: 'c', + MajorNumber: 254, + MinorNumber: 0, + CgroupPermissions: "rwm", + }, + */ + }, DefaultSimpleDevices...) + + DefaultAutoCreatedDevices = append([]*Device{ + { + // /dev/fuse is created but not allowed. + // This is to allow java to work. Because java + // Insists on there being a /dev/fuse + // https://github.com/docker/docker/issues/514 + // https://github.com/docker/docker/issues/2393 + // + Path: "/dev/fuse", + Type: 'c', + MajorNumber: 10, + MinorNumber: 229, + CgroupPermissions: "rwm", + }, + }, DefaultSimpleDevices...) +) diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices.go b/Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices.go new file mode 100644 index 00000000000..8e86d952929 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices.go @@ -0,0 +1,132 @@ +package devices + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" +) + +const ( + Wildcard = -1 +) + +var ( + ErrNotADeviceNode = errors.New("not a device node") +) + +// Testing dependencies +var ( + osLstat = os.Lstat + ioutilReadDir = ioutil.ReadDir +) + +type Device struct { + Type rune `json:"type,omitempty"` + Path string `json:"path,omitempty"` // It is fine if this is an empty string in the case that you are using Wildcards + MajorNumber int64 `json:"major_number,omitempty"` // Use the wildcard constant for wildcards. + MinorNumber int64 `json:"minor_number,omitempty"` // Use the wildcard constant for wildcards. + CgroupPermissions string `json:"cgroup_permissions,omitempty"` // Typically just "rwm" + FileMode os.FileMode `json:"file_mode,omitempty"` // The permission bits of the file's mode + Uid uint32 `json:"uid,omitempty"` + Gid uint32 `json:"gid,omitempty"` +} + +func GetDeviceNumberString(deviceNumber int64) string { + if deviceNumber == Wildcard { + return "*" + } else { + return fmt.Sprintf("%d", deviceNumber) + } +} + +func (device *Device) GetCgroupAllowString() string { + return fmt.Sprintf("%c %s:%s %s", device.Type, GetDeviceNumberString(device.MajorNumber), GetDeviceNumberString(device.MinorNumber), device.CgroupPermissions) +} + +// Given the path to a device and it's cgroup_permissions(which cannot be easilly queried) look up the information about a linux device and return that information as a Device struct. +func GetDevice(path, cgroupPermissions string) (*Device, error) { + fileInfo, err := osLstat(path) + if err != nil { + return nil, err + } + + var ( + devType rune + mode = fileInfo.Mode() + fileModePermissionBits = os.FileMode.Perm(mode) + ) + + switch { + case mode&os.ModeDevice == 0: + return nil, ErrNotADeviceNode + case mode&os.ModeCharDevice != 0: + fileModePermissionBits |= syscall.S_IFCHR + devType = 'c' + default: + fileModePermissionBits |= syscall.S_IFBLK + devType = 'b' + } + + stat_t, ok := fileInfo.Sys().(*syscall.Stat_t) + if !ok { + return nil, fmt.Errorf("cannot determine the device number for device %s", path) + } + devNumber := int(stat_t.Rdev) + + return &Device{ + Type: devType, + Path: path, + MajorNumber: Major(devNumber), + MinorNumber: Minor(devNumber), + CgroupPermissions: cgroupPermissions, + FileMode: fileModePermissionBits, + Uid: stat_t.Uid, + Gid: stat_t.Gid, + }, nil +} + +func GetHostDeviceNodes() ([]*Device, error) { + return getDeviceNodes("/dev") +} + +func getDeviceNodes(path string) ([]*Device, error) { + files, err := ioutilReadDir(path) + if err != nil { + return nil, err + } + + out := []*Device{} + for _, f := range files { + switch { + case f.IsDir(): + switch f.Name() { + case "pts", "shm", "fd", "mqueue": + continue + default: + sub, err := getDeviceNodes(filepath.Join(path, f.Name())) + if err != nil { + return nil, err + } + + out = append(out, sub...) + continue + } + case f.Name() == "console": + continue + } + + device, err := GetDevice(filepath.Join(path, f.Name()), "rwm") + if err != nil { + if err == ErrNotADeviceNode { + continue + } + return nil, err + } + out = append(out, device) + } + + return out, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices_test.go new file mode 100644 index 00000000000..fec40022375 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices_test.go @@ -0,0 +1,61 @@ +package devices + +import ( + "errors" + "os" + "testing" +) + +func TestGetDeviceLstatFailure(t *testing.T) { + testError := errors.New("test error") + + // Override os.Lstat to inject error. + osLstat = func(path string) (os.FileInfo, error) { + return nil, testError + } + + _, err := GetDevice("", "") + if err != testError { + t.Fatalf("Unexpected error %v, expected %v", err, testError) + } +} + +func TestGetHostDeviceNodesIoutilReadDirFailure(t *testing.T) { + testError := errors.New("test error") + + // Override ioutil.ReadDir to inject error. + ioutilReadDir = func(dirname string) ([]os.FileInfo, error) { + return nil, testError + } + + _, err := GetHostDeviceNodes() + if err != testError { + t.Fatalf("Unexpected error %v, expected %v", err, testError) + } +} + +func TestGetHostDeviceNodesIoutilReadDirDeepFailure(t *testing.T) { + testError := errors.New("test error") + called := false + + // Override ioutil.ReadDir to inject error after the first call. + ioutilReadDir = func(dirname string) ([]os.FileInfo, error) { + if called { + return nil, testError + } + called = true + + // Provoke a second call. + fi, err := os.Lstat("/tmp") + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + + return []os.FileInfo{fi}, nil + } + + _, err := GetHostDeviceNodes() + if err != testError { + t.Fatalf("Unexpected error %v, expected %v", err, testError) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/devices/number.go b/Godeps/_workspace/src/github.com/docker/libcontainer/devices/number.go new file mode 100644 index 00000000000..3aae380bb1c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/devices/number.go @@ -0,0 +1,26 @@ +package devices + +/* + +This code provides support for manipulating linux device numbers. It should be replaced by normal syscall functions once http://code.google.com/p/go/issues/detail?id=8106 is solved. + +You can read what they are here: + + - http://www.makelinux.net/ldd3/chp-3-sect-2 + - http://www.linux-tutorial.info/modules.php?name=MContent&pageid=94 + +Note! These are NOT the same as the MAJOR(dev_t device);, MINOR(dev_t device); and MKDEV(int major, int minor); functions as defined in as the representation of device numbers used by go is different than the one used internally to the kernel! - https://github.com/torvalds/linux/blob/master/include/linux/kdev_t.h#L9 + +*/ + +func Major(devNumber int) int64 { + return int64((devNumber >> 8) & 0xfff) +} + +func Minor(devNumber int) int64 { + return int64((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00)) +} + +func Mkdev(majorNumber int64, minorNumber int64) int { + return int((majorNumber << 8) | (minorNumber & 0xff) | ((minorNumber & 0xfff00) << 12)) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/error.go b/Godeps/_workspace/src/github.com/docker/libcontainer/error.go new file mode 100644 index 00000000000..5ff56d80baa --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/error.go @@ -0,0 +1,37 @@ +package libcontainer + +// API error code type. +type ErrorCode int + +// API error codes. +const ( + // Factory errors + IdInUse ErrorCode = iota + InvalidIdFormat + // TODO: add Load errors + + // Container errors + ContainerDestroyed + ContainerPaused + + // Common errors + ConfigInvalid + SystemError +) + +// API Error type. +type Error interface { + error + + // Returns the stack trace, if any, which identifies the + // point at which the error occurred. + Stack() []byte + + // Returns a verbose string including the error message + // and a representation of the stack trace suitable for + // printing. + Detail() string + + // Returns the error code for this error. + Code() ErrorCode +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/factory.go b/Godeps/_workspace/src/github.com/docker/libcontainer/factory.go new file mode 100644 index 00000000000..e37773b2bd7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/factory.go @@ -0,0 +1,32 @@ +package libcontainer + +type Factory interface { + + // Creates a new container with the given id and starts the initial process inside it. + // id must be a string containing only letters, digits and underscores and must contain + // between 1 and 1024 characters, inclusive. + // + // The id must not already be in use by an existing container. Containers created using + // a factory with the same path (and file system) must have distinct ids. + // + // Returns the new container with a running process. + // + // Errors: + // IdInUse - id is already in use by a container + // InvalidIdFormat - id has incorrect format + // ConfigInvalid - config is invalid + // SystemError - System error + // + // On error, any partially created container parts are cleaned up (the operation is atomic). + Create(id string, config *Config) (Container, Error) + + // Load takes an ID for an existing container and reconstructs the container + // from the state. + // + // Errors: + // Path does not exist + // Container is stopped + // System error + // TODO: fix description + Load(id string) (Container, Error) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/integration/doc.go b/Godeps/_workspace/src/github.com/docker/libcontainer/integration/doc.go new file mode 100644 index 00000000000..87545bc99c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/integration/doc.go @@ -0,0 +1,2 @@ +// integration is used for integration testing of libcontainer +package integration diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/integration/exec_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/integration/exec_test.go new file mode 100644 index 00000000000..f0728c5817d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/integration/exec_test.go @@ -0,0 +1,178 @@ +package integration + +import ( + "os" + "strings" + "testing" + + "github.com/docker/libcontainer" +) + +func TestExecPS(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + config := newTemplateConfig(rootfs) + buffers, exitCode, err := runContainer(config, "", "ps") + if err != nil { + t.Fatal(err) + } + + if exitCode != 0 { + t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr) + } + + lines := strings.Split(buffers.Stdout.String(), "\n") + if len(lines) < 2 { + t.Fatalf("more than one process running for output %q", buffers.Stdout.String()) + } + expected := `1 root ps` + actual := strings.Trim(lines[1], "\n ") + if actual != expected { + t.Fatalf("expected output %q but received %q", expected, actual) + } +} + +func TestIPCPrivate(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + l, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + t.Fatal(err) + } + + config := newTemplateConfig(rootfs) + buffers, exitCode, err := runContainer(config, "", "readlink", "/proc/self/ns/ipc") + if err != nil { + t.Fatal(err) + } + + if exitCode != 0 { + t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr) + } + + if actual := strings.Trim(buffers.Stdout.String(), "\n"); actual == l { + t.Fatalf("ipc link should be private to the conatiner but equals host %q %q", actual, l) + } +} + +func TestIPCHost(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + l, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + t.Fatal(err) + } + + config := newTemplateConfig(rootfs) + config.Namespaces.Remove(libcontainer.NEWIPC) + buffers, exitCode, err := runContainer(config, "", "readlink", "/proc/self/ns/ipc") + if err != nil { + t.Fatal(err) + } + + if exitCode != 0 { + t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr) + } + + if actual := strings.Trim(buffers.Stdout.String(), "\n"); actual != l { + t.Fatalf("ipc link not equal to host link %q %q", actual, l) + } +} + +func TestIPCJoinPath(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + l, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + t.Fatal(err) + } + + config := newTemplateConfig(rootfs) + config.Namespaces.Add(libcontainer.NEWIPC, "/proc/1/ns/ipc") + + buffers, exitCode, err := runContainer(config, "", "readlink", "/proc/self/ns/ipc") + if err != nil { + t.Fatal(err) + } + + if exitCode != 0 { + t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr) + } + + if actual := strings.Trim(buffers.Stdout.String(), "\n"); actual != l { + t.Fatalf("ipc link not equal to host link %q %q", actual, l) + } +} + +func TestIPCBadPath(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + config := newTemplateConfig(rootfs) + config.Namespaces.Add(libcontainer.NEWIPC, "/proc/1/ns/ipcc") + + _, _, err = runContainer(config, "", "true") + if err == nil { + t.Fatal("container succeded with bad ipc path") + } +} + +func TestRlimit(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + config := newTemplateConfig(rootfs) + out, _, err := runContainer(config, "", "/bin/sh", "-c", "ulimit -n") + if err != nil { + t.Fatal(err) + } + if limit := strings.TrimSpace(out.Stdout.String()); limit != "1024" { + t.Fatalf("expected rlimit to be 1024, got %s", limit) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/integration/execin_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/integration/execin_test.go new file mode 100644 index 00000000000..86d9c5c2605 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/integration/execin_test.go @@ -0,0 +1,140 @@ +package integration + +import ( + "os" + "os/exec" + "strings" + "sync" + "testing" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/namespaces" +) + +func TestExecIn(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + config := newTemplateConfig(rootfs) + if err := writeConfig(config); err != nil { + t.Fatalf("failed to write config %s", err) + } + + containerCmd, statePath, containerErr := startLongRunningContainer(config) + defer func() { + // kill the container + if containerCmd.Process != nil { + containerCmd.Process.Kill() + } + if err := <-containerErr; err != nil { + t.Fatal(err) + } + }() + + // start the exec process + state, err := libcontainer.GetState(statePath) + if err != nil { + t.Fatalf("failed to get state %s", err) + } + buffers := newStdBuffers() + execErr := make(chan error, 1) + go func() { + _, err := namespaces.ExecIn(config, state, []string{"ps"}, + os.Args[0], "exec", buffers.Stdin, buffers.Stdout, buffers.Stderr, + "", nil) + execErr <- err + }() + if err := <-execErr; err != nil { + t.Fatalf("exec finished with error %s", err) + } + + out := buffers.Stdout.String() + if !strings.Contains(out, "sleep 10") || !strings.Contains(out, "ps") { + t.Fatalf("unexpected running process, output %q", out) + } +} + +func TestExecInRlimit(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + config := newTemplateConfig(rootfs) + if err := writeConfig(config); err != nil { + t.Fatalf("failed to write config %s", err) + } + + containerCmd, statePath, containerErr := startLongRunningContainer(config) + defer func() { + // kill the container + if containerCmd.Process != nil { + containerCmd.Process.Kill() + } + if err := <-containerErr; err != nil { + t.Fatal(err) + } + }() + + // start the exec process + state, err := libcontainer.GetState(statePath) + if err != nil { + t.Fatalf("failed to get state %s", err) + } + buffers := newStdBuffers() + execErr := make(chan error, 1) + go func() { + _, err := namespaces.ExecIn(config, state, []string{"/bin/sh", "-c", "ulimit -n"}, + os.Args[0], "exec", buffers.Stdin, buffers.Stdout, buffers.Stderr, + "", nil) + execErr <- err + }() + if err := <-execErr; err != nil { + t.Fatalf("exec finished with error %s", err) + } + + out := buffers.Stdout.String() + if limit := strings.TrimSpace(out); limit != "1024" { + t.Fatalf("expected rlimit to be 1024, got %s", limit) + } +} + +// start a long-running container so we have time to inspect execin processes +func startLongRunningContainer(config *libcontainer.Config) (*exec.Cmd, string, chan error) { + containerErr := make(chan error, 1) + containerCmd := &exec.Cmd{} + var statePath string + + createCmd := func(container *libcontainer.Config, console, dataPath, init string, + pipe *os.File, args []string) *exec.Cmd { + containerCmd = namespaces.DefaultCreateCommand(container, console, dataPath, init, pipe, args) + statePath = dataPath + return containerCmd + } + + var containerStart sync.WaitGroup + containerStart.Add(1) + go func() { + buffers := newStdBuffers() + _, err := namespaces.Exec(config, + buffers.Stdin, buffers.Stdout, buffers.Stderr, + "", config.RootFs, []string{"sleep", "10"}, + createCmd, containerStart.Done) + containerErr <- err + }() + containerStart.Wait() + + return containerCmd, statePath, containerErr +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/integration/init_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/integration/init_test.go new file mode 100644 index 00000000000..3106a5fb1e7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/integration/init_test.go @@ -0,0 +1,76 @@ +package integration + +import ( + "encoding/json" + "log" + "os" + "runtime" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/namespaces" + _ "github.com/docker/libcontainer/namespaces/nsenter" +) + +// init runs the libcontainer initialization code because of the busybox style needs +// to work around the go runtime and the issues with forking +func init() { + if len(os.Args) < 2 { + return + } + // handle init + if len(os.Args) >= 2 && os.Args[1] == "init" { + runtime.LockOSThread() + + container, err := loadConfig() + if err != nil { + log.Fatal(err) + } + + rootfs, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + + if err := namespaces.Init(container, rootfs, "", os.NewFile(3, "pipe"), os.Args[3:]); err != nil { + log.Fatalf("unable to initialize for container: %s", err) + } + os.Exit(1) + } + + // handle execin + if len(os.Args) >= 2 && os.Args[0] == "nsenter-exec" { + runtime.LockOSThread() + + // User args are passed after '--' in the command line. + userArgs := findUserArgs() + + config, err := loadConfigFromFd() + if err != nil { + log.Fatalf("docker-exec: unable to receive config from sync pipe: %s", err) + } + + if err := namespaces.FinalizeSetns(config, userArgs); err != nil { + log.Fatalf("docker-exec: failed to exec: %s", err) + } + os.Exit(1) + } +} + +func findUserArgs() []string { + for i, a := range os.Args { + if a == "--" { + return os.Args[i+1:] + } + } + return []string{} +} + +// loadConfigFromFd loads a container's config from the sync pipe that is provided by +// fd 3 when running a process +func loadConfigFromFd() (*libcontainer.Config, error) { + var config *libcontainer.Config + if err := json.NewDecoder(os.NewFile(3, "child")).Decode(&config); err != nil { + return nil, err + } + return config, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/integration/template_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/integration/template_test.go new file mode 100644 index 00000000000..98846eb199b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/integration/template_test.go @@ -0,0 +1,73 @@ +package integration + +import ( + "syscall" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/devices" +) + +// newTemplateConfig returns a base template for running a container +// +// it uses a network strategy of just setting a loopback interface +// and the default setup for devices +func newTemplateConfig(rootfs string) *libcontainer.Config { + return &libcontainer.Config{ + RootFs: rootfs, + Tty: false, + Capabilities: []string{ + "CHOWN", + "DAC_OVERRIDE", + "FSETID", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL", + "AUDIT_WRITE", + }, + Namespaces: libcontainer.Namespaces([]libcontainer.Namespace{ + {Type: libcontainer.NEWNS}, + {Type: libcontainer.NEWUTS}, + {Type: libcontainer.NEWIPC}, + {Type: libcontainer.NEWPID}, + {Type: libcontainer.NEWNET}, + }), + Cgroups: &cgroups.Cgroup{ + Parent: "integration", + AllowAllDevices: false, + AllowedDevices: devices.DefaultAllowedDevices, + }, + + MountConfig: &libcontainer.MountConfig{ + DeviceNodes: devices.DefaultAutoCreatedDevices, + }, + Hostname: "integration", + Env: []string{ + "HOME=/root", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=integration", + "TERM=xterm", + }, + Networks: []*libcontainer.Network{ + { + Type: "loopback", + Address: "127.0.0.1/0", + Gateway: "localhost", + }, + }, + Rlimits: []libcontainer.Rlimit{ + { + Type: syscall.RLIMIT_NOFILE, + Hard: uint64(1024), + Soft: uint64(1024), + }, + }, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/integration/utils_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/integration/utils_test.go new file mode 100644 index 00000000000..6393fb99829 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/integration/utils_test.go @@ -0,0 +1,95 @@ +package integration + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/namespaces" +) + +func newStdBuffers() *stdBuffers { + return &stdBuffers{ + Stdin: bytes.NewBuffer(nil), + Stdout: bytes.NewBuffer(nil), + Stderr: bytes.NewBuffer(nil), + } +} + +type stdBuffers struct { + Stdin *bytes.Buffer + Stdout *bytes.Buffer + Stderr *bytes.Buffer +} + +func writeConfig(config *libcontainer.Config) error { + f, err := os.OpenFile(filepath.Join(config.RootFs, "container.json"), os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) + if err != nil { + return err + } + defer f.Close() + return json.NewEncoder(f).Encode(config) +} + +func loadConfig() (*libcontainer.Config, error) { + f, err := os.Open(filepath.Join(os.Getenv("data_path"), "container.json")) + if err != nil { + return nil, err + } + defer f.Close() + + var container *libcontainer.Config + if err := json.NewDecoder(f).Decode(&container); err != nil { + return nil, err + } + return container, nil +} + +// newRootFs creates a new tmp directory and copies the busybox root filesystem +func newRootFs() (string, error) { + dir, err := ioutil.TempDir("", "") + if err != nil { + return "", err + } + if err := os.MkdirAll(dir, 0700); err != nil { + return "", err + } + if err := copyBusybox(dir); err != nil { + return "", nil + } + return dir, nil +} + +func remove(dir string) { + os.RemoveAll(dir) +} + +// copyBusybox copies the rootfs for a busybox container created for the test image +// into the new directory for the specific test +func copyBusybox(dest string) error { + out, err := exec.Command("sh", "-c", fmt.Sprintf("cp -R /busybox/* %s/", dest)).CombinedOutput() + if err != nil { + return fmt.Errorf("copy error %q: %q", err, out) + } + return nil +} + +// runContainer runs the container with the specific config and arguments +// +// buffers are returned containing the STDOUT and STDERR output for the run +// along with the exit code and any go error +func runContainer(config *libcontainer.Config, console string, args ...string) (buffers *stdBuffers, exitCode int, err error) { + if err := writeConfig(config); err != nil { + return nil, -1, err + } + + buffers = newStdBuffers() + exitCode, err = namespaces.Exec(config, buffers.Stdin, buffers.Stdout, buffers.Stderr, + console, config.RootFs, args, namespaces.DefaultCreateCommand, nil) + return +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/label/label.go b/Godeps/_workspace/src/github.com/docker/libcontainer/label/label.go new file mode 100644 index 00000000000..5a540fd5a02 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/label/label.go @@ -0,0 +1,61 @@ +// +build !selinux !linux + +package label + +// InitLabels returns the process label and file labels to be used within +// the container. A list of options can be passed into this function to alter +// the labels. +func InitLabels(options []string) (string, string, error) { + return "", "", nil +} + +func GenLabels(options string) (string, string, error) { + return "", "", nil +} + +func FormatMountLabel(src string, mountLabel string) string { + return src +} + +func SetProcessLabel(processLabel string) error { + return nil +} + +func SetFileLabel(path string, fileLabel string) error { + return nil +} + +func SetFileCreateLabel(fileLabel string) error { + return nil +} + +func Relabel(path string, fileLabel string, relabel string) error { + return nil +} + +func GetPidLabel(pid int) (string, error) { + return "", nil +} + +func Init() { +} + +func ReserveLabel(label string) error { + return nil +} + +func UnreserveLabel(label string) error { + return nil +} + +// DupSecOpt takes an process label and returns security options that +// can be used to set duplicate labels on future container processes +func DupSecOpt(src string) []string { + return nil +} + +// DisableSecOpt returns a security opt that can disable labeling +// support for future container processes +func DisableSecOpt() []string { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/label/label_selinux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/label/label_selinux.go new file mode 100644 index 00000000000..5983031ae00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/label/label_selinux.go @@ -0,0 +1,151 @@ +// +build selinux,linux + +package label + +import ( + "fmt" + "strings" + + "github.com/docker/libcontainer/selinux" +) + +// InitLabels returns the process label and file labels to be used within +// the container. A list of options can be passed into this function to alter +// the labels. The labels returned will include a random MCS String, that is +// guaranteed to be unique. +func InitLabels(options []string) (string, string, error) { + if !selinux.SelinuxEnabled() { + return "", "", nil + } + processLabel, mountLabel := selinux.GetLxcContexts() + if processLabel != "" { + pcon := selinux.NewContext(processLabel) + mcon := selinux.NewContext(mountLabel) + for _, opt := range options { + if opt == "disable" { + return "", "", nil + } + if i := strings.Index(opt, ":"); i == -1 { + return "", "", fmt.Errorf("Bad SELinux Option") + } + con := strings.SplitN(opt, ":", 2) + pcon[con[0]] = con[1] + if con[0] == "level" || con[0] == "user" { + mcon[con[0]] = con[1] + } + } + processLabel = pcon.Get() + mountLabel = mcon.Get() + } + return processLabel, mountLabel, nil +} + +// DEPRECATED: The GenLabels function is only to be used during the transition to the official API. +func GenLabels(options string) (string, string, error) { + return InitLabels(strings.Fields(options)) +} + +// FormatMountLabel returns a string to be used by the mount command. +// The format of this string will be used to alter the labeling of the mountpoint. +// The string returned is suitable to be used as the options field of the mount command. +// If you need to have additional mount point options, you can pass them in as +// the first parameter. Second parameter is the label that you wish to apply +// to all content in the mount point. +func FormatMountLabel(src, mountLabel string) string { + if mountLabel != "" { + switch src { + case "": + src = fmt.Sprintf("context=%q", mountLabel) + default: + src = fmt.Sprintf("%s,context=%q", src, mountLabel) + } + } + return src +} + +// SetProcessLabel takes a process label and tells the kernel to assign the +// label to the next program executed by the current process. +func SetProcessLabel(processLabel string) error { + if processLabel == "" { + return nil + } + return selinux.Setexeccon(processLabel) +} + +// GetProcessLabel returns the process label that the kernel will assign +// to the next program executed by the current process. If "" is returned +// this indicates that the default labeling will happen for the process. +func GetProcessLabel() (string, error) { + return selinux.Getexeccon() +} + +// SetFileLabel modifies the "path" label to the specified file label +func SetFileLabel(path string, fileLabel string) error { + if selinux.SelinuxEnabled() && fileLabel != "" { + return selinux.Setfilecon(path, fileLabel) + } + return nil +} + +// Tell the kernel the label for all files to be created +func SetFileCreateLabel(fileLabel string) error { + if selinux.SelinuxEnabled() { + return selinux.Setfscreatecon(fileLabel) + } + return nil +} + +// Change the label of path to the filelabel string. If the relabel string +// is "z", relabel will change the MCS label to s0. This will allow all +// containers to share the content. If the relabel string is a "Z" then +// the MCS label should continue to be used. SELinux will use this field +// to make sure the content can not be shared by other containes. +func Relabel(path string, fileLabel string, relabel string) error { + if fileLabel == "" { + return nil + } + if relabel == "z" { + c := selinux.NewContext(fileLabel) + c["level"] = "s0" + fileLabel = c.Get() + } + return selinux.Chcon(path, fileLabel, true) +} + +// GetPidLabel will return the label of the process running with the specified pid +func GetPidLabel(pid int) (string, error) { + return selinux.Getpidcon(pid) +} + +// Init initialises the labeling system +func Init() { + selinux.SelinuxEnabled() +} + +// ReserveLabel will record the fact that the MCS label has already been used. +// This will prevent InitLabels from using the MCS label in a newly created +// container +func ReserveLabel(label string) error { + selinux.ReserveLabel(label) + return nil +} + +// UnreserveLabel will remove the reservation of the MCS label. +// This will allow InitLabels to use the MCS label in a newly created +// containers +func UnreserveLabel(label string) error { + selinux.FreeLxcContexts(label) + return nil +} + +// DupSecOpt takes an process label and returns security options that +// can be used to set duplicate labels on future container processes +func DupSecOpt(src string) []string { + return selinux.DupSecOpt(src) +} + +// DisableSecOpt returns a security opt that can disable labeling +// support for future container processes +func DisableSecOpt() []string { + return selinux.DisableSecOpt() +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/label/label_selinux_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/label/label_selinux_test.go new file mode 100644 index 00000000000..8629353f247 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/label/label_selinux_test.go @@ -0,0 +1,89 @@ +// +build selinux,linux + +package label + +import ( + "strings" + "testing" + + "github.com/docker/libcontainer/selinux" +) + +func TestInit(t *testing.T) { + if selinux.SelinuxEnabled() { + var testNull []string + plabel, mlabel, err := InitLabels(testNull) + if err != nil { + t.Log("InitLabels Failed") + t.Fatal(err) + } + testDisabled := []string{"disable"} + plabel, mlabel, err = InitLabels(testDisabled) + if err != nil { + t.Log("InitLabels Disabled Failed") + t.Fatal(err) + } + if plabel != "" { + t.Log("InitLabels Disabled Failed") + t.Fatal() + } + testUser := []string{"user:user_u", "role:user_r", "type:user_t", "level:s0:c1,c15"} + plabel, mlabel, err = InitLabels(testUser) + if err != nil { + t.Log("InitLabels User Failed") + t.Fatal(err) + } + if plabel != "user_u:user_r:user_t:s0:c1,c15" || mlabel != "user_u:object_r:svirt_sandbox_file_t:s0:c1,c15" { + t.Log("InitLabels User Match Failed") + t.Log(plabel, mlabel) + t.Fatal(err) + } + + testBadData := []string{"user", "role:user_r", "type:user_t", "level:s0:c1,c15"} + plabel, mlabel, err = InitLabels(testBadData) + if err == nil { + t.Log("InitLabels Bad Failed") + t.Fatal(err) + } + } +} +func TestDuplicateLabel(t *testing.T) { + secopt := DupSecOpt("system_u:system_r:svirt_lxc_net_t:s0:c1,c2") + t.Log(secopt) + for _, opt := range secopt { + con := strings.SplitN(opt, ":", 3) + if len(con) != 3 || con[0] != "label" { + t.Errorf("Invalid DupSecOpt return value") + continue + } + if con[1] == "user" { + if con[2] != "system_u" { + t.Errorf("DupSecOpt Failed user incorrect") + } + continue + } + if con[1] == "role" { + if con[2] != "system_r" { + t.Errorf("DupSecOpt Failed role incorrect") + } + continue + } + if con[1] == "type" { + if con[2] != "svirt_lxc_net_t" { + t.Errorf("DupSecOpt Failed type incorrect") + } + continue + } + if con[1] == "level" { + if con[2] != "s0:c1,c2" { + t.Errorf("DupSecOpt Failed level incorrect") + } + continue + } + t.Errorf("DupSecOpt Failed invalid field %q", con[1]) + } + secopt = DisableSecOpt() + if secopt[0] != "label:disable" { + t.Errorf("DisableSecOpt Failed level incorrect") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/init.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/init.go new file mode 100644 index 00000000000..a2c3d520268 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/init.go @@ -0,0 +1,209 @@ +// +build linux + +package mount + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/docker/libcontainer/label" + "github.com/docker/libcontainer/mount/nodes" +) + +// default mount point flags +const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV + +type mount struct { + source string + path string + device string + flags int + data string +} + +// InitializeMountNamespace sets up the devices, mount points, and filesystems for use inside a +// new mount namespace. +func InitializeMountNamespace(rootfs, console string, sysReadonly bool, mountConfig *MountConfig) error { + var ( + err error + flag = syscall.MS_PRIVATE + ) + + if mountConfig.NoPivotRoot { + flag = syscall.MS_SLAVE + } + + if err := syscall.Mount("", "/", "", uintptr(flag|syscall.MS_REC), ""); err != nil { + return fmt.Errorf("mounting / with flags %X %s", (flag | syscall.MS_REC), err) + } + + if err := syscall.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil { + return fmt.Errorf("mouting %s as bind %s", rootfs, err) + } + + if err := mountSystem(rootfs, sysReadonly, mountConfig); err != nil { + return fmt.Errorf("mount system %s", err) + } + + // apply any user specified mounts within the new mount namespace + for _, m := range mountConfig.Mounts { + if err := m.Mount(rootfs, mountConfig.MountLabel); err != nil { + return err + } + } + + if err := nodes.CreateDeviceNodes(rootfs, mountConfig.DeviceNodes); err != nil { + return fmt.Errorf("create device nodes %s", err) + } + + if err := SetupPtmx(rootfs, console, mountConfig.MountLabel); err != nil { + return err + } + + // stdin, stdout and stderr could be pointing to /dev/null from parent namespace. + // Re-open them inside this namespace. + if err := reOpenDevNull(rootfs); err != nil { + return fmt.Errorf("Failed to reopen /dev/null %s", err) + } + + if err := setupDevSymlinks(rootfs); err != nil { + return fmt.Errorf("dev symlinks %s", err) + } + + if err := syscall.Chdir(rootfs); err != nil { + return fmt.Errorf("chdir into %s %s", rootfs, err) + } + + if mountConfig.NoPivotRoot { + err = MsMoveRoot(rootfs) + } else { + err = PivotRoot(rootfs) + } + + if err != nil { + return err + } + + if mountConfig.ReadonlyFs { + if err := SetReadonly(); err != nil { + return fmt.Errorf("set readonly %s", err) + } + } + + syscall.Umask(0022) + + return nil +} + +// mountSystem sets up linux specific system mounts like mqueue, sys, proc, shm, and devpts +// inside the mount namespace +func mountSystem(rootfs string, sysReadonly bool, mountConfig *MountConfig) error { + for _, m := range newSystemMounts(rootfs, mountConfig.MountLabel, sysReadonly) { + if err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) { + return fmt.Errorf("mkdirall %s %s", m.path, err) + } + if err := syscall.Mount(m.source, m.path, m.device, uintptr(m.flags), m.data); err != nil { + return fmt.Errorf("mounting %s into %s %s", m.source, m.path, err) + } + } + return nil +} + +func createIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } else { + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + } + return nil +} + +func setupDevSymlinks(rootfs string) error { + var links = [][2]string{ + {"/proc/self/fd", "/dev/fd"}, + {"/proc/self/fd/0", "/dev/stdin"}, + {"/proc/self/fd/1", "/dev/stdout"}, + {"/proc/self/fd/2", "/dev/stderr"}, + } + + // kcore support can be toggled with CONFIG_PROC_KCORE; only create a symlink + // in /dev if it exists in /proc. + if _, err := os.Stat("/proc/kcore"); err == nil { + links = append(links, [2]string{"/proc/kcore", "/dev/kcore"}) + } + + for _, link := range links { + var ( + src = link[0] + dst = filepath.Join(rootfs, link[1]) + ) + + if err := os.Symlink(src, dst); err != nil && !os.IsExist(err) { + return fmt.Errorf("symlink %s %s %s", src, dst, err) + } + } + + return nil +} + +// TODO: this is crappy right now and should be cleaned up with a better way of handling system and +// standard bind mounts allowing them to be more dynamic +func newSystemMounts(rootfs, mountLabel string, sysReadonly bool) []mount { + systemMounts := []mount{ + {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, + {source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: label.FormatMountLabel("mode=755", mountLabel)}, + {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)}, + {source: "mqueue", path: filepath.Join(rootfs, "dev", "mqueue"), device: "mqueue", flags: defaultMountFlags}, + {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}, + } + + sysMountFlags := defaultMountFlags + if sysReadonly { + sysMountFlags |= syscall.MS_RDONLY + } + + systemMounts = append(systemMounts, mount{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: sysMountFlags}) + + return systemMounts +} + +// Is stdin, stdout or stderr were to be pointing to '/dev/null', +// this method will make them point to '/dev/null' from within this namespace. +func reOpenDevNull(rootfs string) error { + var stat, devNullStat syscall.Stat_t + file, err := os.Open(filepath.Join(rootfs, "/dev/null")) + if err != nil { + return fmt.Errorf("Failed to open /dev/null - %s", err) + } + defer file.Close() + if err = syscall.Fstat(int(file.Fd()), &devNullStat); err != nil { + return fmt.Errorf("Failed to stat /dev/null - %s", err) + } + for fd := 0; fd < 3; fd++ { + if err = syscall.Fstat(fd, &stat); err != nil { + return fmt.Errorf("Failed to stat fd %d - %s", fd, err) + } + if stat.Rdev == devNullStat.Rdev { + // Close and re-open the fd. + if err = syscall.Dup2(int(file.Fd()), fd); err != nil { + return fmt.Errorf("Failed to dup fd %d to fd %d - %s", file.Fd(), fd, err) + } + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/mount.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/mount.go new file mode 100644 index 00000000000..c1b424214f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/mount.go @@ -0,0 +1,109 @@ +package mount + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/symlink" + "github.com/docker/libcontainer/label" +) + +type Mount struct { + Type string `json:"type,omitempty"` + Source string `json:"source,omitempty"` // Source path, in the host namespace + Destination string `json:"destination,omitempty"` // Destination path, in the container + Writable bool `json:"writable,omitempty"` + Relabel string `json:"relabel,omitempty"` // Relabel source if set, "z" indicates shared, "Z" indicates unshared + Private bool `json:"private,omitempty"` + Slave bool `json:"slave,omitempty"` +} + +func (m *Mount) Mount(rootfs, mountLabel string) error { + switch m.Type { + case "bind": + return m.bindMount(rootfs, mountLabel) + case "tmpfs": + return m.tmpfsMount(rootfs, mountLabel) + default: + return fmt.Errorf("unsupported mount type %s for %s", m.Type, m.Destination) + } +} + +func (m *Mount) bindMount(rootfs, mountLabel string) error { + var ( + flags = syscall.MS_BIND | syscall.MS_REC + dest = filepath.Join(rootfs, m.Destination) + ) + + if !m.Writable { + flags = flags | syscall.MS_RDONLY + } + + if m.Slave { + flags = flags | syscall.MS_SLAVE + } + + stat, err := os.Stat(m.Source) + if err != nil { + return err + } + + // FIXME: (crosbymichael) This does not belong here and should be done a layer above + dest, err = symlink.FollowSymlinkInScope(dest, rootfs) + if err != nil { + return err + } + + if err := createIfNotExists(dest, stat.IsDir()); err != nil { + return fmt.Errorf("creating new bind mount target %s", err) + } + + if err := syscall.Mount(m.Source, dest, "bind", uintptr(flags), ""); err != nil { + return fmt.Errorf("mounting %s into %s %s", m.Source, dest, err) + } + + if !m.Writable { + if err := syscall.Mount(m.Source, dest, "bind", uintptr(flags|syscall.MS_REMOUNT), ""); err != nil { + return fmt.Errorf("remounting %s into %s %s", m.Source, dest, err) + } + } + + if m.Relabel != "" { + if err := label.Relabel(m.Source, mountLabel, m.Relabel); err != nil { + return fmt.Errorf("relabeling %s to %s %s", m.Source, mountLabel, err) + } + } + + if m.Private { + if err := syscall.Mount("", dest, "none", uintptr(syscall.MS_PRIVATE), ""); err != nil { + return fmt.Errorf("mounting %s private %s", dest, err) + } + } + + return nil +} + +func (m *Mount) tmpfsMount(rootfs, mountLabel string) error { + var ( + err error + l = label.FormatMountLabel("", mountLabel) + dest = filepath.Join(rootfs, m.Destination) + ) + + // FIXME: (crosbymichael) This does not belong here and should be done a layer above + if dest, err = symlink.FollowSymlinkInScope(dest, rootfs); err != nil { + return err + } + + if err := createIfNotExists(dest, true); err != nil { + return fmt.Errorf("creating new tmpfs mount target %s", err) + } + + if err := syscall.Mount("tmpfs", dest, "tmpfs", uintptr(defaultMountFlags), l); err != nil { + return fmt.Errorf("%s mounting %s in tmpfs", err, dest) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/mount_config.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/mount_config.go new file mode 100644 index 00000000000..eef9b8ce4da --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/mount_config.go @@ -0,0 +1,28 @@ +package mount + +import ( + "errors" + + "github.com/docker/libcontainer/devices" +) + +var ErrUnsupported = errors.New("Unsupported method") + +type MountConfig struct { + // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs + // This is a common option when the container is running in ramdisk + NoPivotRoot bool `json:"no_pivot_root,omitempty"` + + // ReadonlyFs will remount the container's rootfs as readonly where only externally mounted + // bind mounts are writtable + ReadonlyFs bool `json:"readonly_fs,omitempty"` + + // Mounts specify additional source and destination paths that will be mounted inside the container's + // rootfs and mount namespace if specified + Mounts []*Mount `json:"mounts,omitempty"` + + // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well! + DeviceNodes []*devices.Device `json:"device_nodes,omitempty"` + + MountLabel string `json:"mount_label,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/msmoveroot.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/msmoveroot.go new file mode 100644 index 00000000000..94afd3a99c4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/msmoveroot.go @@ -0,0 +1,20 @@ +// +build linux + +package mount + +import ( + "fmt" + "syscall" +) + +func MsMoveRoot(rootfs string) error { + if err := syscall.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil { + return fmt.Errorf("mount move %s into / %s", rootfs, err) + } + + if err := syscall.Chroot("."); err != nil { + return fmt.Errorf("chroot . %s", err) + } + + return syscall.Chdir("/") +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes.go new file mode 100644 index 00000000000..322c0c0ee27 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes.go @@ -0,0 +1,57 @@ +// +build linux + +package nodes + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/docker/libcontainer/devices" +) + +// Create the device nodes in the container. +func CreateDeviceNodes(rootfs string, nodesToCreate []*devices.Device) error { + oldMask := syscall.Umask(0000) + defer syscall.Umask(oldMask) + + for _, node := range nodesToCreate { + if err := CreateDeviceNode(rootfs, node); err != nil { + return err + } + } + return nil +} + +// Creates the device node in the rootfs of the container. +func CreateDeviceNode(rootfs string, node *devices.Device) error { + var ( + dest = filepath.Join(rootfs, node.Path) + parent = filepath.Dir(dest) + ) + + if err := os.MkdirAll(parent, 0755); err != nil { + return err + } + + fileMode := node.FileMode + switch node.Type { + case 'c': + fileMode |= syscall.S_IFCHR + case 'b': + fileMode |= syscall.S_IFBLK + default: + return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path) + } + + if err := syscall.Mknod(dest, uint32(fileMode), devices.Mkdev(node.MajorNumber, node.MinorNumber)); err != nil && !os.IsExist(err) { + return fmt.Errorf("mknod %s %s", node.Path, err) + } + + if err := syscall.Chown(dest, int(node.Uid), int(node.Gid)); err != nil { + return fmt.Errorf("chown %s to %d:%d", node.Path, node.Uid, node.Gid) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes_unsupported.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes_unsupported.go new file mode 100644 index 00000000000..83660715d46 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/nodes/nodes_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package nodes + +import ( + "errors" + + "github.com/docker/libcontainer/devices" +) + +func CreateDeviceNodes(rootfs string, nodesToCreate []*devices.Device) error { + return errors.New("Unsupported method") +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/pivotroot.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/pivotroot.go new file mode 100644 index 00000000000..a88ed4a84c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/pivotroot.go @@ -0,0 +1,34 @@ +// +build linux + +package mount + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" +) + +func PivotRoot(rootfs string) error { + pivotDir, err := ioutil.TempDir(rootfs, ".pivot_root") + if err != nil { + return fmt.Errorf("can't create pivot_root dir %s, error %v", pivotDir, err) + } + + if err := syscall.PivotRoot(rootfs, pivotDir); err != nil { + return fmt.Errorf("pivot_root %s", err) + } + + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("chdir / %s", err) + } + + // path to pivot dir now changed, update + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { + return fmt.Errorf("unmount pivot_root dir %s", err) + } + + return os.Remove(pivotDir) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/ptmx.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/ptmx.go new file mode 100644 index 00000000000..c316481adfd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/ptmx.go @@ -0,0 +1,30 @@ +// +build linux + +package mount + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/libcontainer/console" +) + +func SetupPtmx(rootfs, consolePath, mountLabel string) error { + ptmx := filepath.Join(rootfs, "dev/ptmx") + if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) { + return err + } + + if err := os.Symlink("pts/ptmx", ptmx); err != nil { + return fmt.Errorf("symlink dev ptmx %s", err) + } + + if consolePath != "" { + if err := console.Setup(rootfs, consolePath, mountLabel); err != nil { + return err + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/readonly.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/readonly.go new file mode 100644 index 00000000000..9b4a6f704c4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/readonly.go @@ -0,0 +1,11 @@ +// +build linux + +package mount + +import ( + "syscall" +) + +func SetReadonly() error { + return syscall.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "") +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/mount/remount.go b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/remount.go new file mode 100644 index 00000000000..99a01209d1f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/mount/remount.go @@ -0,0 +1,31 @@ +// +build linux + +package mount + +import "syscall" + +func RemountProc() error { + if err := syscall.Unmount("/proc", syscall.MNT_DETACH); err != nil { + return err + } + + if err := syscall.Mount("proc", "/proc", "proc", uintptr(defaultMountFlags), ""); err != nil { + return err + } + + return nil +} + +func RemountSys() error { + if err := syscall.Unmount("/sys", syscall.MNT_DETACH); err != nil { + if err != syscall.EINVAL { + return err + } + } else { + if err := syscall.Mount("sysfs", "/sys", "sysfs", uintptr(defaultMountFlags), ""); err != nil { + return err + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/create.go b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/create.go new file mode 100644 index 00000000000..b6418b6e9f3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/create.go @@ -0,0 +1,10 @@ +package namespaces + +import ( + "os" + "os/exec" + + "github.com/docker/libcontainer" +) + +type CreateCommand func(container *libcontainer.Config, console, dataPath, init string, childPipe *os.File, args []string) *exec.Cmd diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/exec.go b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/exec.go new file mode 100644 index 00000000000..b7873edd0ef --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/exec.go @@ -0,0 +1,175 @@ +// +build linux + +package namespaces + +import ( + "encoding/json" + "io" + "os" + "os/exec" + "syscall" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/cgroups/fs" + "github.com/docker/libcontainer/cgroups/systemd" + "github.com/docker/libcontainer/network" + "github.com/docker/libcontainer/system" +) + +// TODO(vishh): This is part of the libcontainer API and it does much more than just namespaces related work. +// Move this to libcontainer package. +// Exec performs setup outside of a namespace so that a container can be +// executed. Exec is a high level function for working with container namespaces. +func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Writer, console, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) { + var err error + + // create a pipe so that we can syncronize with the namespaced process and + // pass the state and configuration to the child process + parent, child, err := newInitPipe() + if err != nil { + return -1, err + } + defer parent.Close() + + command := createCommand(container, console, dataPath, os.Args[0], child, args) + // Note: these are only used in non-tty mode + // if there is a tty for the container it will be opened within the namespace and the + // fds will be duped to stdin, stdiout, and stderr + command.Stdin = stdin + command.Stdout = stdout + command.Stderr = stderr + + if err := command.Start(); err != nil { + child.Close() + return -1, err + } + child.Close() + + terminate := func(terr error) (int, error) { + // TODO: log the errors for kill and wait + command.Process.Kill() + command.Wait() + return -1, terr + } + + started, err := system.GetProcessStartTime(command.Process.Pid) + if err != nil { + return terminate(err) + } + + // Do this before syncing with child so that no children + // can escape the cgroup + cgroupPaths, err := SetupCgroups(container, command.Process.Pid) + if err != nil { + return terminate(err) + } + defer cgroups.RemovePaths(cgroupPaths) + + var networkState network.NetworkState + if err := InitializeNetworking(container, command.Process.Pid, &networkState); err != nil { + return terminate(err) + } + // send the state to the container's init process then shutdown writes for the parent + if err := json.NewEncoder(parent).Encode(networkState); err != nil { + return terminate(err) + } + // shutdown writes for the parent side of the pipe + if err := syscall.Shutdown(int(parent.Fd()), syscall.SHUT_WR); err != nil { + return terminate(err) + } + + state := &libcontainer.State{ + InitPid: command.Process.Pid, + InitStartTime: started, + NetworkState: networkState, + CgroupPaths: cgroupPaths, + } + + if err := libcontainer.SaveState(dataPath, state); err != nil { + return terminate(err) + } + defer libcontainer.DeleteState(dataPath) + + // wait for the child process to fully complete and receive an error message + // if one was encoutered + var ierr *initError + if err := json.NewDecoder(parent).Decode(&ierr); err != nil && err != io.EOF { + return terminate(err) + } + if ierr != nil { + return terminate(ierr) + } + + if startCallback != nil { + startCallback() + } + + if err := command.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { + return -1, err + } + } + return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil +} + +// DefaultCreateCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces +// defined on the container's configuration and use the current binary as the init with the +// args provided +// +// console: the /dev/console to setup inside the container +// init: the program executed inside the namespaces +// root: the path to the container json file and information +// pipe: sync pipe to synchronize the parent and child processes +// args: the arguments to pass to the container to run as the user's program +func DefaultCreateCommand(container *libcontainer.Config, console, dataPath, init string, pipe *os.File, args []string) *exec.Cmd { + // get our binary name from arg0 so we can always reexec ourself + env := []string{ + "console=" + console, + "pipe=3", + "data_path=" + dataPath, + } + + command := exec.Command(init, append([]string{"init", "--"}, args...)...) + // make sure the process is executed inside the context of the rootfs + command.Dir = container.RootFs + command.Env = append(os.Environ(), env...) + + if command.SysProcAttr == nil { + command.SysProcAttr = &syscall.SysProcAttr{} + } + command.SysProcAttr.Cloneflags = uintptr(GetNamespaceFlags(container.Namespaces)) + + command.SysProcAttr.Pdeathsig = syscall.SIGKILL + command.ExtraFiles = []*os.File{pipe} + + return command +} + +// SetupCgroups applies the cgroup restrictions to the process running in the container based +// on the container's configuration +func SetupCgroups(container *libcontainer.Config, nspid int) (map[string]string, error) { + if container.Cgroups != nil { + c := container.Cgroups + if systemd.UseSystemd() { + return systemd.Apply(c, nspid) + } + return fs.Apply(c, nspid) + } + return map[string]string{}, nil +} + +// InitializeNetworking creates the container's network stack outside of the namespace and moves +// interfaces into the container's net namespaces if necessary +func InitializeNetworking(container *libcontainer.Config, nspid int, networkState *network.NetworkState) error { + for _, config := range container.Networks { + strategy, err := network.GetStrategy(config.Type) + if err != nil { + return err + } + if err := strategy.Create((*network.Network)(config), nspid, networkState); err != nil { + return err + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/execin.go b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/execin.go new file mode 100644 index 00000000000..7ce82c81bb7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/execin.go @@ -0,0 +1,127 @@ +// +build linux + +package namespaces + +import ( + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strconv" + "syscall" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/apparmor" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/label" + "github.com/docker/libcontainer/system" +) + +// ExecIn reexec's the initPath with the argv 0 rewrite to "nsenter" so that it is able to run the +// setns code in a single threaded environment joining the existing containers' namespaces. +func ExecIn(container *libcontainer.Config, state *libcontainer.State, userArgs []string, initPath, action string, + stdin io.Reader, stdout, stderr io.Writer, console string, startCallback func(*exec.Cmd)) (int, error) { + + args := []string{fmt.Sprintf("nsenter-%s", action), "--nspid", strconv.Itoa(state.InitPid)} + + if console != "" { + args = append(args, "--console", console) + } + + cmd := &exec.Cmd{ + Path: initPath, + Args: append(args, append([]string{"--"}, userArgs...)...), + } + + if filepath.Base(initPath) == initPath { + if lp, err := exec.LookPath(initPath); err == nil { + cmd.Path = lp + } + } + + parent, child, err := newInitPipe() + if err != nil { + return -1, err + } + defer parent.Close() + + // Note: these are only used in non-tty mode + // if there is a tty for the container it will be opened within the namespace and the + // fds will be duped to stdin, stdiout, and stderr + cmd.Stdin = stdin + cmd.Stdout = stdout + cmd.Stderr = stderr + cmd.ExtraFiles = []*os.File{child} + + if err := cmd.Start(); err != nil { + child.Close() + return -1, err + } + child.Close() + + terminate := func(terr error) (int, error) { + // TODO: log the errors for kill and wait + cmd.Process.Kill() + cmd.Wait() + return -1, terr + } + + // Enter cgroups. + if err := EnterCgroups(state, cmd.Process.Pid); err != nil { + return terminate(err) + } + + if err := json.NewEncoder(parent).Encode(container); err != nil { + return terminate(err) + } + + if startCallback != nil { + startCallback(cmd) + } + + if err := cmd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { + return -1, err + } + } + return cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil +} + +// Finalize expects that the setns calls have been setup and that is has joined an +// existing namespace +func FinalizeSetns(container *libcontainer.Config, args []string) error { + // clear the current processes env and replace it with the environment defined on the container + if err := LoadContainerEnvironment(container); err != nil { + return err + } + + if err := setupRlimits(container); err != nil { + return fmt.Errorf("setup rlimits %s", err) + } + + if err := FinalizeNamespace(container); err != nil { + return err + } + + if err := apparmor.ApplyProfile(container.AppArmorProfile); err != nil { + return fmt.Errorf("set apparmor profile %s: %s", container.AppArmorProfile, err) + } + + if container.ProcessLabel != "" { + if err := label.SetProcessLabel(container.ProcessLabel); err != nil { + return err + } + } + + if err := system.Execv(args[0], args[0:], os.Environ()); err != nil { + return err + } + + panic("unreachable") +} + +func EnterCgroups(state *libcontainer.State, pid int) error { + return cgroups.EnterPid(state.CgroupPaths, pid) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/init.go b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/init.go new file mode 100644 index 00000000000..a4400bddbcb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/init.go @@ -0,0 +1,329 @@ +// +build linux + +package namespaces + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" + "syscall" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/apparmor" + "github.com/docker/libcontainer/console" + "github.com/docker/libcontainer/label" + "github.com/docker/libcontainer/mount" + "github.com/docker/libcontainer/netlink" + "github.com/docker/libcontainer/network" + "github.com/docker/libcontainer/security/capabilities" + "github.com/docker/libcontainer/security/restrict" + "github.com/docker/libcontainer/system" + "github.com/docker/libcontainer/user" + "github.com/docker/libcontainer/utils" +) + +// TODO(vishh): This is part of the libcontainer API and it does much more than just namespaces related work. +// Move this to libcontainer package. +// Init is the init process that first runs inside a new namespace to setup mounts, users, networking, +// and other options required for the new container. +// The caller of Init function has to ensure that the go runtime is locked to an OS thread +// (using runtime.LockOSThread) else system calls like setns called within Init may not work as intended. +func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, pipe *os.File, args []string) (err error) { + defer func() { + // if we have an error during the initialization of the container's init then send it back to the + // parent process in the form of an initError. + if err != nil { + // ensure that any data sent from the parent is consumed so it doesn't + // receive ECONNRESET when the child writes to the pipe. + ioutil.ReadAll(pipe) + if err := json.NewEncoder(pipe).Encode(initError{ + Message: err.Error(), + }); err != nil { + panic(err) + } + } + // ensure that this pipe is always closed + pipe.Close() + }() + + rootfs, err := utils.ResolveRootfs(uncleanRootfs) + if err != nil { + return err + } + + // clear the current processes env and replace it with the environment + // defined on the container + if err := LoadContainerEnvironment(container); err != nil { + return err + } + + // We always read this as it is a way to sync with the parent as well + var networkState *network.NetworkState + if err := json.NewDecoder(pipe).Decode(&networkState); err != nil { + return err + } + // join any namespaces via a path to the namespace fd if provided + if err := joinExistingNamespaces(container.Namespaces); err != nil { + return err + } + if consolePath != "" { + if err := console.OpenAndDup(consolePath); err != nil { + return err + } + } + if _, err := syscall.Setsid(); err != nil { + return fmt.Errorf("setsid %s", err) + } + if consolePath != "" { + if err := system.Setctty(); err != nil { + return fmt.Errorf("setctty %s", err) + } + } + + if err := setupNetwork(container, networkState); err != nil { + return fmt.Errorf("setup networking %s", err) + } + if err := setupRoute(container); err != nil { + return fmt.Errorf("setup route %s", err) + } + + if err := setupRlimits(container); err != nil { + return fmt.Errorf("setup rlimits %s", err) + } + + label.Init() + + if err := mount.InitializeMountNamespace(rootfs, + consolePath, + container.RestrictSys, + (*mount.MountConfig)(container.MountConfig)); err != nil { + return fmt.Errorf("setup mount namespace %s", err) + } + + if container.Hostname != "" { + if err := syscall.Sethostname([]byte(container.Hostname)); err != nil { + return fmt.Errorf("unable to sethostname %q: %s", container.Hostname, err) + } + } + + if err := apparmor.ApplyProfile(container.AppArmorProfile); err != nil { + return fmt.Errorf("set apparmor profile %s: %s", container.AppArmorProfile, err) + } + + if err := label.SetProcessLabel(container.ProcessLabel); err != nil { + return fmt.Errorf("set process label %s", err) + } + + // TODO: (crosbymichael) make this configurable at the Config level + if container.RestrictSys { + if err := restrict.Restrict("proc/sys", "proc/sysrq-trigger", "proc/irq", "proc/bus"); err != nil { + return err + } + } + + pdeathSignal, err := system.GetParentDeathSignal() + if err != nil { + return fmt.Errorf("get parent death signal %s", err) + } + + if err := FinalizeNamespace(container); err != nil { + return fmt.Errorf("finalize namespace %s", err) + } + + // FinalizeNamespace can change user/group which clears the parent death + // signal, so we restore it here. + if err := RestoreParentDeathSignal(pdeathSignal); err != nil { + return fmt.Errorf("restore parent death signal %s", err) + } + + return system.Execv(args[0], args[0:], os.Environ()) +} + +// RestoreParentDeathSignal sets the parent death signal to old. +func RestoreParentDeathSignal(old int) error { + if old == 0 { + return nil + } + + current, err := system.GetParentDeathSignal() + if err != nil { + return fmt.Errorf("get parent death signal %s", err) + } + + if old == current { + return nil + } + + if err := system.ParentDeathSignal(uintptr(old)); err != nil { + return fmt.Errorf("set parent death signal %s", err) + } + + // Signal self if parent is already dead. Does nothing if running in a new + // PID namespace, as Getppid will always return 0. + if syscall.Getppid() == 1 { + return syscall.Kill(syscall.Getpid(), syscall.SIGKILL) + } + + return nil +} + +// SetupUser changes the groups, gid, and uid for the user inside the container +func SetupUser(u string) error { + // Set up defaults. + defaultExecUser := user.ExecUser{ + Uid: syscall.Getuid(), + Gid: syscall.Getgid(), + Home: "/", + } + + passwdPath, err := user.GetPasswdPath() + if err != nil { + return err + } + + groupPath, err := user.GetGroupPath() + if err != nil { + return err + } + + execUser, err := user.GetExecUserPath(u, &defaultExecUser, passwdPath, groupPath) + if err != nil { + return fmt.Errorf("get supplementary groups %s", err) + } + + if err := syscall.Setgroups(execUser.Sgids); err != nil { + return fmt.Errorf("setgroups %s", err) + } + + if err := system.Setgid(execUser.Gid); err != nil { + return fmt.Errorf("setgid %s", err) + } + + if err := system.Setuid(execUser.Uid); err != nil { + return fmt.Errorf("setuid %s", err) + } + + // if we didn't get HOME already, set it based on the user's HOME + if envHome := os.Getenv("HOME"); envHome == "" { + if err := os.Setenv("HOME", execUser.Home); err != nil { + return fmt.Errorf("set HOME %s", err) + } + } + + return nil +} + +// setupVethNetwork uses the Network config if it is not nil to initialize +// the new veth interface inside the container for use by changing the name to eth0 +// setting the MTU and IP address along with the default gateway +func setupNetwork(container *libcontainer.Config, networkState *network.NetworkState) error { + for _, config := range container.Networks { + strategy, err := network.GetStrategy(config.Type) + if err != nil { + return err + } + + err1 := strategy.Initialize((*network.Network)(config), networkState) + if err1 != nil { + return err1 + } + } + return nil +} + +func setupRoute(container *libcontainer.Config) error { + for _, config := range container.Routes { + if err := netlink.AddRoute(config.Destination, config.Source, config.Gateway, config.InterfaceName); err != nil { + return err + } + } + return nil +} + +func setupRlimits(container *libcontainer.Config) error { + for _, rlimit := range container.Rlimits { + l := &syscall.Rlimit{Max: rlimit.Hard, Cur: rlimit.Soft} + if err := syscall.Setrlimit(rlimit.Type, l); err != nil { + return fmt.Errorf("error setting rlimit type %v: %v", rlimit.Type, err) + } + } + return nil +} + +// FinalizeNamespace drops the caps, sets the correct user +// and working dir, and closes any leaky file descriptors +// before execing the command inside the namespace +func FinalizeNamespace(container *libcontainer.Config) error { + // Ensure that all non-standard fds we may have accidentally + // inherited are marked close-on-exec so they stay out of the + // container + if err := utils.CloseExecFrom(3); err != nil { + return fmt.Errorf("close open file descriptors %s", err) + } + + // drop capabilities in bounding set before changing user + if err := capabilities.DropBoundingSet(container.Capabilities); err != nil { + return fmt.Errorf("drop bounding set %s", err) + } + + // preserve existing capabilities while we change users + if err := system.SetKeepCaps(); err != nil { + return fmt.Errorf("set keep caps %s", err) + } + + if err := SetupUser(container.User); err != nil { + return fmt.Errorf("setup user %s", err) + } + + if err := system.ClearKeepCaps(); err != nil { + return fmt.Errorf("clear keep caps %s", err) + } + + // drop all other capabilities + if err := capabilities.DropCapabilities(container.Capabilities); err != nil { + return fmt.Errorf("drop capabilities %s", err) + } + + if container.WorkingDir != "" { + if err := syscall.Chdir(container.WorkingDir); err != nil { + return fmt.Errorf("chdir to %s %s", container.WorkingDir, err) + } + } + + return nil +} + +func LoadContainerEnvironment(container *libcontainer.Config) error { + os.Clearenv() + for _, pair := range container.Env { + p := strings.SplitN(pair, "=", 2) + if len(p) < 2 { + return fmt.Errorf("invalid environment '%v'", pair) + } + if err := os.Setenv(p[0], p[1]); err != nil { + return err + } + } + return nil +} + +// joinExistingNamespaces gets all the namespace paths specified for the container and +// does a setns on the namespace fd so that the current process joins the namespace. +func joinExistingNamespaces(namespaces []libcontainer.Namespace) error { + for _, ns := range namespaces { + if ns.Path != "" { + f, err := os.OpenFile(ns.Path, os.O_RDONLY, 0) + if err != nil { + return err + } + err = system.Setns(f.Fd(), uintptr(namespaceInfo[ns.Type])) + f.Close() + if err != nil { + return err + } + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter/README.md b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter/README.md new file mode 100644 index 00000000000..ac94cba0591 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter/README.md @@ -0,0 +1,6 @@ +## nsenter + +The `nsenter` package registers a special init constructor that is called before the Go runtime has +a chance to boot. This provides us the ability to `setns` on existing namespaces and avoid the issues +that the Go runtime has with multiple threads. This constructor is only called if this package is +registered, imported, in your go application and the argv 0 is `nsenter`. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.c b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.c new file mode 100644 index 00000000000..b735b1fa20b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.c @@ -0,0 +1,227 @@ +// +build cgo +// +// formated with indent -linux nsenter.c + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const kBufSize = 256; +static const char *kNsEnter = "nsenter"; + +void get_args(int *argc, char ***argv) +{ + // Read argv + int fd = open("/proc/self/cmdline", O_RDONLY); + + // Read the whole commandline. + ssize_t contents_size = 0; + ssize_t contents_offset = 0; + char *contents = NULL; + ssize_t bytes_read = 0; + do { + contents_size += kBufSize; + contents = (char *)realloc(contents, contents_size); + bytes_read = + read(fd, contents + contents_offset, + contents_size - contents_offset); + contents_offset += bytes_read; + } + while (bytes_read > 0); + close(fd); + + // Parse the commandline into an argv. /proc/self/cmdline has \0 delimited args. + ssize_t i; + *argc = 0; + for (i = 0; i < contents_offset; i++) { + if (contents[i] == '\0') { + (*argc)++; + } + } + *argv = (char **)malloc(sizeof(char *) * ((*argc) + 1)); + int idx; + for (idx = 0; idx < (*argc); idx++) { + (*argv)[idx] = contents; + contents += strlen(contents) + 1; + } + (*argv)[*argc] = NULL; +} + +// Use raw setns syscall for versions of glibc that don't include it (namely glibc-2.12) +#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 14 +#define _GNU_SOURCE +#include +#include "syscall.h" +#ifdef SYS_setns +int setns(int fd, int nstype) +{ + return syscall(SYS_setns, fd, nstype); +} +#endif +#endif + +void print_usage() +{ + fprintf(stderr, + "nsenter --nspid --console -- cmd1 arg1 arg2...\n"); +} + +void nsenter() +{ + int argc, c; + char **argv; + get_args(&argc, &argv); + + // check argv 0 to ensure that we are supposed to setns + // we use strncmp to test for a value of "nsenter" but also allows alternate implmentations + // after the setns code path to continue to use the argv 0 to determine actions to be run + // resulting in the ability to specify "nsenter-mknod", "nsenter-exec", etc... + if (strncmp(argv[0], kNsEnter, strlen(kNsEnter)) != 0) { + return; + } + + #ifdef PR_SET_CHILD_SUBREAPER + if (prctl(PR_SET_CHILD_SUBREAPER, 1, 0, 0, 0) == -1) { + fprintf(stderr, "nsenter: failed to set child subreaper: %s", + strerror(errno)); + exit(1); + } + #endif + + static const struct option longopts[] = { + {"nspid", required_argument, NULL, 'n'}, + {"console", required_argument, NULL, 't'}, + {NULL, 0, NULL, 0} + }; + + pid_t init_pid = -1; + char *init_pid_str = NULL; + char *console = NULL; + while ((c = getopt_long_only(argc, argv, "n:c:", longopts, NULL)) != -1) { + switch (c) { + case 'n': + init_pid_str = optarg; + break; + case 't': + console = optarg; + break; + } + } + + if (init_pid_str == NULL) { + print_usage(); + exit(1); + } + + init_pid = strtol(init_pid_str, NULL, 10); + if ((init_pid == 0 && errno == EINVAL) || errno == ERANGE) { + fprintf(stderr, + "nsenter: Failed to parse PID from \"%s\" with output \"%d\" and error: \"%s\"\n", + init_pid_str, init_pid, strerror(errno)); + print_usage(); + exit(1); + } + + argc -= 3; + argv += 3; + + if (setsid() == -1) { + fprintf(stderr, "setsid failed. Error: %s\n", strerror(errno)); + exit(1); + } + // before we setns we need to dup the console + int consolefd = -1; + if (console != NULL) { + consolefd = open(console, O_RDWR); + if (consolefd < 0) { + fprintf(stderr, + "nsenter: failed to open console %s %s\n", + console, strerror(errno)); + exit(1); + } + } + // Setns on all supported namespaces. + char ns_dir[PATH_MAX]; + memset(ns_dir, 0, PATH_MAX); + snprintf(ns_dir, PATH_MAX - 1, "/proc/%d/ns/", init_pid); + + char *namespaces[] = { "ipc", "uts", "net", "pid", "mnt" }; + const int num = sizeof(namespaces) / sizeof(char *); + int i; + for (i = 0; i < num; i++) { + char buf[PATH_MAX]; + memset(buf, 0, PATH_MAX); + snprintf(buf, PATH_MAX - 1, "%s%s", ns_dir, namespaces[i]); + int fd = open(buf, O_RDONLY); + if (fd == -1) { + // Ignore nonexistent namespaces. + if (errno == ENOENT) + continue; + + fprintf(stderr, + "nsenter: Failed to open ns file \"%s\" for ns \"%s\" with error: \"%s\"\n", + buf, namespaces[i], strerror(errno)); + exit(1); + } + // Set the namespace. + if (setns(fd, 0) == -1) { + fprintf(stderr, + "nsenter: Failed to setns for \"%s\" with error: \"%s\"\n", + namespaces[i], strerror(errno)); + exit(1); + } + close(fd); + } + + // We must fork to actually enter the PID namespace. + int child = fork(); + if (child == 0) { + if (consolefd != -1) { + if (dup2(consolefd, STDIN_FILENO) != 0) { + fprintf(stderr, "nsenter: failed to dup 0 %s\n", + strerror(errno)); + exit(1); + } + if (dup2(consolefd, STDOUT_FILENO) != STDOUT_FILENO) { + fprintf(stderr, "nsenter: failed to dup 1 %s\n", + strerror(errno)); + exit(1); + } + if (dup2(consolefd, STDERR_FILENO) != STDERR_FILENO) { + fprintf(stderr, "nsenter: failed to dup 2 %s\n", + strerror(errno)); + exit(1); + } + } + // Finish executing, let the Go runtime take over. + return; + } else { + // Parent, wait for the child. + int status = 0; + if (waitpid(child, &status, 0) == -1) { + fprintf(stderr, + "nsenter: Failed to waitpid with error: \"%s\"\n", + strerror(errno)); + exit(1); + } + // Forward the child's exit code or re-send its death signal. + if (WIFEXITED(status)) { + exit(WEXITSTATUS(status)); + } else if (WIFSIGNALED(status)) { + kill(getpid(), WTERMSIG(status)); + } + + exit(1); + } + + return; +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.go b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.go new file mode 100644 index 00000000000..7d21e8e59fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.go @@ -0,0 +1,10 @@ +// +build linux + +package nsenter + +/* +__attribute__((constructor)) init() { + nsenter(); +} +*/ +import "C" diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter_unsupported.go b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter_unsupported.go new file mode 100644 index 00000000000..2459c6367e4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package nsenter diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/utils.go b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/utils.go new file mode 100644 index 00000000000..de71a379f85 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/namespaces/utils.go @@ -0,0 +1,45 @@ +// +build linux + +package namespaces + +import ( + "os" + "syscall" + + "github.com/docker/libcontainer" +) + +type initError struct { + Message string `json:"message,omitempty"` +} + +func (i initError) Error() string { + return i.Message +} + +var namespaceInfo = map[libcontainer.NamespaceType]int{ + libcontainer.NEWNET: syscall.CLONE_NEWNET, + libcontainer.NEWNS: syscall.CLONE_NEWNS, + libcontainer.NEWUSER: syscall.CLONE_NEWUSER, + libcontainer.NEWIPC: syscall.CLONE_NEWIPC, + libcontainer.NEWUTS: syscall.CLONE_NEWUTS, + libcontainer.NEWPID: syscall.CLONE_NEWPID, +} + +// New returns a newly initialized Pipe for communication between processes +func newInitPipe() (parent *os.File, child *os.File, err error) { + fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0) + if err != nil { + return nil, nil, err + } + return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil +} + +// GetNamespaceFlags parses the container's Namespaces options to set the correct +// flags on clone, unshare, and setns +func GetNamespaceFlags(namespaces libcontainer.Namespaces) (flag int) { + for _, v := range namespaces { + flag |= namespaceInfo[v.Type] + } + return flag +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/MAINTAINERS new file mode 100644 index 00000000000..1cb551364dd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Guillaume J. Charmes (@creack) diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink.go b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink.go new file mode 100644 index 00000000000..90883660612 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink.go @@ -0,0 +1,31 @@ +// Packet netlink provide access to low level Netlink sockets and messages. +// +// Actual implementations are in: +// netlink_linux.go +// netlink_darwin.go +package netlink + +import ( + "errors" + "net" +) + +var ( + ErrWrongSockType = errors.New("Wrong socket type") + ErrShortResponse = errors.New("Got short response from netlink") + ErrInterfaceExists = errors.New("Network interface already exists") +) + +// A Route is a subnet associated with the interface to reach it. +type Route struct { + *net.IPNet + Iface *net.Interface + Default bool +} + +// An IfAddr defines IP network settings for a given network interface +type IfAddr struct { + Iface *net.Interface + IP net.IP + IPNet *net.IPNet +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux.go new file mode 100644 index 00000000000..3cc3cc94f74 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux.go @@ -0,0 +1,1299 @@ +package netlink + +import ( + "encoding/binary" + "fmt" + "io" + "math/rand" + "net" + "os" + "path/filepath" + "sync/atomic" + "syscall" + "unsafe" +) + +const ( + IFNAMSIZ = 16 + DEFAULT_CHANGE = 0xFFFFFFFF + IFLA_INFO_KIND = 1 + IFLA_INFO_DATA = 2 + VETH_INFO_PEER = 1 + IFLA_MACVLAN_MODE = 1 + IFLA_VLAN_ID = 1 + IFLA_NET_NS_FD = 28 + IFLA_ADDRESS = 1 + SIOC_BRADDBR = 0x89a0 + SIOC_BRDELBR = 0x89a1 + SIOC_BRADDIF = 0x89a2 +) + +const ( + MACVLAN_MODE_PRIVATE = 1 << iota + MACVLAN_MODE_VEPA + MACVLAN_MODE_BRIDGE + MACVLAN_MODE_PASSTHRU +) + +var nextSeqNr uint32 + +type ifreqHwaddr struct { + IfrnName [IFNAMSIZ]byte + IfruHwaddr syscall.RawSockaddr +} + +type ifreqIndex struct { + IfrnName [IFNAMSIZ]byte + IfruIndex int32 +} + +type ifreqFlags struct { + IfrnName [IFNAMSIZ]byte + Ifruflags uint16 +} + +var native binary.ByteOrder + +func init() { + var x uint32 = 0x01020304 + if *(*byte)(unsafe.Pointer(&x)) == 0x01 { + native = binary.BigEndian + } else { + native = binary.LittleEndian + } +} + +func getIpFamily(ip net.IP) int { + if len(ip) <= net.IPv4len { + return syscall.AF_INET + } + if ip.To4() != nil { + return syscall.AF_INET + } + return syscall.AF_INET6 +} + +type NetlinkRequestData interface { + Len() int + ToWireFormat() []byte +} + +type IfInfomsg struct { + syscall.IfInfomsg +} + +func newIfInfomsg(family int) *IfInfomsg { + return &IfInfomsg{ + IfInfomsg: syscall.IfInfomsg{ + Family: uint8(family), + }, + } +} + +func newIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { + msg := newIfInfomsg(family) + parent.children = append(parent.children, msg) + return msg +} + +func (msg *IfInfomsg) ToWireFormat() []byte { + length := syscall.SizeofIfInfomsg + b := make([]byte, length) + b[0] = msg.Family + b[1] = 0 + native.PutUint16(b[2:4], msg.Type) + native.PutUint32(b[4:8], uint32(msg.Index)) + native.PutUint32(b[8:12], msg.Flags) + native.PutUint32(b[12:16], msg.Change) + return b +} + +func (msg *IfInfomsg) Len() int { + return syscall.SizeofIfInfomsg +} + +type IfAddrmsg struct { + syscall.IfAddrmsg +} + +func newIfAddrmsg(family int) *IfAddrmsg { + return &IfAddrmsg{ + IfAddrmsg: syscall.IfAddrmsg{ + Family: uint8(family), + }, + } +} + +func (msg *IfAddrmsg) ToWireFormat() []byte { + length := syscall.SizeofIfAddrmsg + b := make([]byte, length) + b[0] = msg.Family + b[1] = msg.Prefixlen + b[2] = msg.Flags + b[3] = msg.Scope + native.PutUint32(b[4:8], msg.Index) + return b +} + +func (msg *IfAddrmsg) Len() int { + return syscall.SizeofIfAddrmsg +} + +type RtMsg struct { + syscall.RtMsg +} + +func newRtMsg() *RtMsg { + return &RtMsg{ + RtMsg: syscall.RtMsg{ + Table: syscall.RT_TABLE_MAIN, + Scope: syscall.RT_SCOPE_UNIVERSE, + Protocol: syscall.RTPROT_BOOT, + Type: syscall.RTN_UNICAST, + }, + } +} + +func (msg *RtMsg) ToWireFormat() []byte { + length := syscall.SizeofRtMsg + b := make([]byte, length) + b[0] = msg.Family + b[1] = msg.Dst_len + b[2] = msg.Src_len + b[3] = msg.Tos + b[4] = msg.Table + b[5] = msg.Protocol + b[6] = msg.Scope + b[7] = msg.Type + native.PutUint32(b[8:12], msg.Flags) + return b +} + +func (msg *RtMsg) Len() int { + return syscall.SizeofRtMsg +} + +func rtaAlignOf(attrlen int) int { + return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1) +} + +type RtAttr struct { + syscall.RtAttr + Data []byte + children []NetlinkRequestData +} + +func newRtAttr(attrType int, data []byte) *RtAttr { + return &RtAttr{ + RtAttr: syscall.RtAttr{ + Type: uint16(attrType), + }, + children: []NetlinkRequestData{}, + Data: data, + } +} + +func newRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr { + attr := newRtAttr(attrType, data) + parent.children = append(parent.children, attr) + return attr +} + +func (a *RtAttr) Len() int { + if len(a.children) == 0 { + return (syscall.SizeofRtAttr + len(a.Data)) + } + + l := 0 + for _, child := range a.children { + l += child.Len() + } + l += syscall.SizeofRtAttr + return rtaAlignOf(l + len(a.Data)) +} + +func (a *RtAttr) ToWireFormat() []byte { + length := a.Len() + buf := make([]byte, rtaAlignOf(length)) + + if a.Data != nil { + copy(buf[4:], a.Data) + } else { + next := 4 + for _, child := range a.children { + childBuf := child.ToWireFormat() + copy(buf[next:], childBuf) + next += rtaAlignOf(len(childBuf)) + } + } + + if l := uint16(length); l != 0 { + native.PutUint16(buf[0:2], l) + } + native.PutUint16(buf[2:4], a.Type) + return buf +} + +func uint32Attr(t int, n uint32) *RtAttr { + buf := make([]byte, 4) + native.PutUint32(buf, n) + return newRtAttr(t, buf) +} + +type NetlinkRequest struct { + syscall.NlMsghdr + Data []NetlinkRequestData +} + +func (rr *NetlinkRequest) ToWireFormat() []byte { + length := rr.Len + dataBytes := make([][]byte, len(rr.Data)) + for i, data := range rr.Data { + dataBytes[i] = data.ToWireFormat() + length += uint32(len(dataBytes[i])) + } + b := make([]byte, length) + native.PutUint32(b[0:4], length) + native.PutUint16(b[4:6], rr.Type) + native.PutUint16(b[6:8], rr.Flags) + native.PutUint32(b[8:12], rr.Seq) + native.PutUint32(b[12:16], rr.Pid) + + next := 16 + for _, data := range dataBytes { + copy(b[next:], data) + next += len(data) + } + return b +} + +func (rr *NetlinkRequest) AddData(data NetlinkRequestData) { + if data != nil { + rr.Data = append(rr.Data, data) + } +} + +func newNetlinkRequest(proto, flags int) *NetlinkRequest { + return &NetlinkRequest{ + NlMsghdr: syscall.NlMsghdr{ + Len: uint32(syscall.NLMSG_HDRLEN), + Type: uint16(proto), + Flags: syscall.NLM_F_REQUEST | uint16(flags), + Seq: atomic.AddUint32(&nextSeqNr, 1), + }, + } +} + +type NetlinkSocket struct { + fd int + lsa syscall.SockaddrNetlink +} + +func getNetlinkSocket() (*NetlinkSocket, error) { + fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, syscall.NETLINK_ROUTE) + if err != nil { + return nil, err + } + s := &NetlinkSocket{ + fd: fd, + } + s.lsa.Family = syscall.AF_NETLINK + if err := syscall.Bind(fd, &s.lsa); err != nil { + syscall.Close(fd) + return nil, err + } + + return s, nil +} + +func (s *NetlinkSocket) Close() { + syscall.Close(s.fd) +} + +func (s *NetlinkSocket) Send(request *NetlinkRequest) error { + if err := syscall.Sendto(s.fd, request.ToWireFormat(), 0, &s.lsa); err != nil { + return err + } + return nil +} + +func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { + rb := make([]byte, syscall.Getpagesize()) + nr, _, err := syscall.Recvfrom(s.fd, rb, 0) + if err != nil { + return nil, err + } + if nr < syscall.NLMSG_HDRLEN { + return nil, ErrShortResponse + } + rb = rb[:nr] + return syscall.ParseNetlinkMessage(rb) +} + +func (s *NetlinkSocket) GetPid() (uint32, error) { + lsa, err := syscall.Getsockname(s.fd) + if err != nil { + return 0, err + } + switch v := lsa.(type) { + case *syscall.SockaddrNetlink: + return v.Pid, nil + } + return 0, ErrWrongSockType +} + +func (s *NetlinkSocket) CheckMessage(m syscall.NetlinkMessage, seq, pid uint32) error { + if m.Header.Seq != seq { + return fmt.Errorf("netlink: invalid seq %d, expected %d", m.Header.Seq, seq) + } + if m.Header.Pid != pid { + return fmt.Errorf("netlink: wrong pid %d, expected %d", m.Header.Pid, pid) + } + if m.Header.Type == syscall.NLMSG_DONE { + return io.EOF + } + if m.Header.Type == syscall.NLMSG_ERROR { + e := int32(native.Uint32(m.Data[0:4])) + if e == 0 { + return io.EOF + } + return syscall.Errno(-e) + } + return nil +} + +func (s *NetlinkSocket) HandleAck(seq uint32) error { + pid, err := s.GetPid() + if err != nil { + return err + } + +outer: + for { + msgs, err := s.Receive() + if err != nil { + return err + } + for _, m := range msgs { + if err := s.CheckMessage(m, seq, pid); err != nil { + if err == io.EOF { + break outer + } + return err + } + } + } + + return nil +} + +func zeroTerminated(s string) []byte { + return []byte(s + "\000") +} + +func nonZeroTerminated(s string) []byte { + return []byte(s) +} + +// Add a new network link of a specified type. +// This is identical to running: ip link add $name type $linkType +func NetworkLinkAdd(name string, linkType string) error { + if name == "" || linkType == "" { + return fmt.Errorf("Neither link name nor link type can be empty!") + } + + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + wb.AddData(msg) + + linkInfo := newRtAttr(syscall.IFLA_LINKINFO, nil) + newRtAttrChild(linkInfo, IFLA_INFO_KIND, nonZeroTerminated(linkType)) + wb.AddData(linkInfo) + + nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name)) + wb.AddData(nameData) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Delete a network link. +// This is identical to running: ip link del $name +func NetworkLinkDel(name string) error { + if name == "" { + return fmt.Errorf("Network link name can not be empty!") + } + + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + + wb := newNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(iface.Index) + wb.AddData(msg) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Bring up a particular network interface. +// This is identical to running: ip link set dev $name up +func NetworkLinkUp(iface *net.Interface) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(iface.Index) + msg.Flags = syscall.IFF_UP + msg.Change = syscall.IFF_UP + wb.AddData(msg) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Bring down a particular network interface. +// This is identical to running: ip link set $name down +func NetworkLinkDown(iface *net.Interface) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(iface.Index) + msg.Flags = 0 & ^syscall.IFF_UP + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Set link layer address ie. MAC Address. +// This is identical to running: ip link set dev $name address $macaddress +func NetworkSetMacAddress(iface *net.Interface, macaddr string) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + hwaddr, err := net.ParseMAC(macaddr) + if err != nil { + return err + } + + var ( + MULTICAST byte = 0x1 + ) + + if hwaddr[0]&0x1 == MULTICAST { + return fmt.Errorf("Multicast MAC Address is not supported: %s", macaddr) + } + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + + macdata := make([]byte, 6) + copy(macdata, hwaddr) + data := newRtAttr(IFLA_ADDRESS, macdata) + wb.AddData(data) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} + +// Set link Maximum Transmission Unit +// This is identical to running: ip link set dev $name mtu $MTU +// bridge is a bitch here https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=292088 +// https://bugzilla.redhat.com/show_bug.cgi?id=697021 +// There is a discussion about how to deal with ifcs joining bridge with MTU > 1500 +// Regular network nterfaces do seem to work though! +func NetworkSetMTU(iface *net.Interface, mtu int) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Type = syscall.RTM_SETLINK + msg.Flags = syscall.NLM_F_REQUEST + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + wb.AddData(uint32Attr(syscall.IFLA_MTU, uint32(mtu))) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} + +// Set link queue length +// This is identical to running: ip link set dev $name txqueuelen $QLEN +func NetworkSetTxQueueLen(iface *net.Interface, txQueueLen int) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Type = syscall.RTM_SETLINK + msg.Flags = syscall.NLM_F_REQUEST + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + wb.AddData(uint32Attr(syscall.IFLA_TXQLEN, uint32(txQueueLen))) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} + +func networkMasterAction(iface *net.Interface, rtattr *RtAttr) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Type = syscall.RTM_SETLINK + msg.Flags = syscall.NLM_F_REQUEST + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + wb.AddData(rtattr) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Add an interface to bridge. +// This is identical to running: ip link set $name master $master +func NetworkSetMaster(iface, master *net.Interface) error { + data := uint32Attr(syscall.IFLA_MASTER, uint32(master.Index)) + return networkMasterAction(iface, data) +} + +// Remove an interface from the bridge +// This is is identical to to running: ip link $name set nomaster +func NetworkSetNoMaster(iface *net.Interface) error { + data := uint32Attr(syscall.IFLA_MASTER, 0) + return networkMasterAction(iface, data) +} + +func networkSetNsAction(iface *net.Interface, rtattr *RtAttr) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(iface.Index) + wb.AddData(msg) + wb.AddData(rtattr) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Move a particular network interface to a particular network namespace +// specified by PID. This is idential to running: ip link set dev $name netns $pid +func NetworkSetNsPid(iface *net.Interface, nspid int) error { + data := uint32Attr(syscall.IFLA_NET_NS_PID, uint32(nspid)) + return networkSetNsAction(iface, data) +} + +// Move a particular network interface to a particular mounted +// network namespace specified by file descriptor. +// This is idential to running: ip link set dev $name netns $fd +func NetworkSetNsFd(iface *net.Interface, fd int) error { + data := uint32Attr(IFLA_NET_NS_FD, uint32(fd)) + return networkSetNsAction(iface, data) +} + +// Rname a particular interface to a different name +// !!! Note that you can't rename an active interface. You need to bring it down before renaming it. +// This is identical to running: ip link set dev ${oldName} name ${newName} +func NetworkChangeName(iface *net.Interface, newName string) error { + if len(newName) >= IFNAMSIZ { + return fmt.Errorf("Interface name %s too long", newName) + } + + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + + nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(newName)) + wb.AddData(nameData) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Add a new VETH pair link on the host +// This is identical to running: ip link add name $name type veth peer name $peername +func NetworkCreateVethPair(name1, name2 string, txQueueLen int) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + wb.AddData(msg) + + nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name1)) + wb.AddData(nameData) + + txqLen := make([]byte, 4) + native.PutUint32(txqLen, uint32(txQueueLen)) + txqData := newRtAttr(syscall.IFLA_TXQLEN, txqLen) + wb.AddData(txqData) + + nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) + newRtAttrChild(nest1, IFLA_INFO_KIND, zeroTerminated("veth")) + nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) + nest3 := newRtAttrChild(nest2, VETH_INFO_PEER, nil) + + newIfInfomsgChild(nest3, syscall.AF_UNSPEC) + newRtAttrChild(nest3, syscall.IFLA_IFNAME, zeroTerminated(name2)) + + txqLen2 := make([]byte, 4) + native.PutUint32(txqLen2, uint32(txQueueLen)) + newRtAttrChild(nest3, syscall.IFLA_TXQLEN, txqLen2) + + wb.AddData(nest1) + + if err := s.Send(wb); err != nil { + return err + } + + if err := s.HandleAck(wb.Seq); err != nil { + if os.IsExist(err) { + return ErrInterfaceExists + } + + return err + } + + return nil +} + +// Add a new VLAN interface with masterDev as its upper device +// This is identical to running: +// ip link add name $name link $masterdev type vlan id $id +func NetworkLinkAddVlan(masterDev, vlanDev string, vlanId uint16) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + masterDevIfc, err := net.InterfaceByName(masterDev) + if err != nil { + return err + } + + msg := newIfInfomsg(syscall.AF_UNSPEC) + wb.AddData(msg) + + nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) + newRtAttrChild(nest1, IFLA_INFO_KIND, nonZeroTerminated("vlan")) + + nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) + vlanData := make([]byte, 2) + native.PutUint16(vlanData, vlanId) + newRtAttrChild(nest2, IFLA_VLAN_ID, vlanData) + wb.AddData(nest1) + + wb.AddData(uint32Attr(syscall.IFLA_LINK, uint32(masterDevIfc.Index))) + wb.AddData(newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(vlanDev))) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} + +// MacVlan link has LowerDev, UpperDev and operates in Mode mode +// This simplifies the code when creating MacVlan or MacVtap interface +type MacVlanLink struct { + MasterDev string + SlaveDev string + mode string +} + +func (m MacVlanLink) Mode() uint32 { + modeMap := map[string]uint32{ + "private": MACVLAN_MODE_PRIVATE, + "vepa": MACVLAN_MODE_VEPA, + "bridge": MACVLAN_MODE_BRIDGE, + "passthru": MACVLAN_MODE_PASSTHRU, + } + + return modeMap[m.mode] +} + +// Add MAC VLAN network interface with masterDev as its upper device +// This is identical to running: +// ip link add name $name link $masterdev type macvlan mode $mode +func networkLinkMacVlan(dev_type string, mcvln *MacVlanLink) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + masterDevIfc, err := net.InterfaceByName(mcvln.MasterDev) + if err != nil { + return err + } + + msg := newIfInfomsg(syscall.AF_UNSPEC) + wb.AddData(msg) + + nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) + newRtAttrChild(nest1, IFLA_INFO_KIND, nonZeroTerminated(dev_type)) + + nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) + macVlanData := make([]byte, 4) + native.PutUint32(macVlanData, mcvln.Mode()) + newRtAttrChild(nest2, IFLA_MACVLAN_MODE, macVlanData) + wb.AddData(nest1) + + wb.AddData(uint32Attr(syscall.IFLA_LINK, uint32(masterDevIfc.Index))) + wb.AddData(newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(mcvln.SlaveDev))) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} + +func NetworkLinkAddMacVlan(masterDev, macVlanDev string, mode string) error { + return networkLinkMacVlan("macvlan", &MacVlanLink{ + MasterDev: masterDev, + SlaveDev: macVlanDev, + mode: mode, + }) +} + +func NetworkLinkAddMacVtap(masterDev, macVlanDev string, mode string) error { + return networkLinkMacVlan("macvtap", &MacVlanLink{ + MasterDev: masterDev, + SlaveDev: macVlanDev, + mode: mode, + }) +} + +func networkLinkIpAction(action, flags int, ifa IfAddr) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + family := getIpFamily(ifa.IP) + + wb := newNetlinkRequest(action, flags) + + msg := newIfAddrmsg(family) + msg.Index = uint32(ifa.Iface.Index) + prefixLen, _ := ifa.IPNet.Mask.Size() + msg.Prefixlen = uint8(prefixLen) + wb.AddData(msg) + + var ipData []byte + if family == syscall.AF_INET { + ipData = ifa.IP.To4() + } else { + ipData = ifa.IP.To16() + } + + localData := newRtAttr(syscall.IFA_LOCAL, ipData) + wb.AddData(localData) + + addrData := newRtAttr(syscall.IFA_ADDRESS, ipData) + wb.AddData(addrData) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Delete an IP address from an interface. This is identical to: +// ip addr del $ip/$ipNet dev $iface +func NetworkLinkDelIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { + return networkLinkIpAction( + syscall.RTM_DELADDR, + syscall.NLM_F_ACK, + IfAddr{iface, ip, ipNet}, + ) +} + +// Add an Ip address to an interface. This is identical to: +// ip addr add $ip/$ipNet dev $iface +func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { + return networkLinkIpAction( + syscall.RTM_NEWADDR, + syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK, + IfAddr{iface, ip, ipNet}, + ) +} + +// Returns an array of IPNet for all the currently routed subnets on ipv4 +// This is similar to the first column of "ip route" output +func NetworkGetRoutes() ([]Route, error) { + s, err := getNetlinkSocket() + if err != nil { + return nil, err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + wb.AddData(msg) + + if err := s.Send(wb); err != nil { + return nil, err + } + + pid, err := s.GetPid() + if err != nil { + return nil, err + } + + res := make([]Route, 0) + +outer: + for { + msgs, err := s.Receive() + if err != nil { + return nil, err + } + for _, m := range msgs { + if err := s.CheckMessage(m, wb.Seq, pid); err != nil { + if err == io.EOF { + break outer + } + return nil, err + } + if m.Header.Type != syscall.RTM_NEWROUTE { + continue + } + + var r Route + + msg := (*RtMsg)(unsafe.Pointer(&m.Data[0:syscall.SizeofRtMsg][0])) + + if msg.Flags&syscall.RTM_F_CLONED != 0 { + // Ignore cloned routes + continue + } + + if msg.Table != syscall.RT_TABLE_MAIN { + // Ignore non-main tables + continue + } + + if msg.Family != syscall.AF_INET { + // Ignore non-ipv4 routes + continue + } + + if msg.Dst_len == 0 { + // Default routes + r.Default = true + } + + attrs, err := syscall.ParseNetlinkRouteAttr(&m) + if err != nil { + return nil, err + } + for _, attr := range attrs { + switch attr.Attr.Type { + case syscall.RTA_DST: + ip := attr.Value + r.IPNet = &net.IPNet{ + IP: ip, + Mask: net.CIDRMask(int(msg.Dst_len), 8*len(ip)), + } + case syscall.RTA_OIF: + index := int(native.Uint32(attr.Value[0:4])) + r.Iface, _ = net.InterfaceByIndex(index) + } + } + if r.Default || r.IPNet != nil { + res = append(res, r) + } + } + } + + return res, nil +} + +// Add a new route table entry. +func AddRoute(destination, source, gateway, device string) error { + if destination == "" && source == "" && gateway == "" { + return fmt.Errorf("one of destination, source or gateway must not be blank") + } + + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + msg := newRtMsg() + currentFamily := -1 + var rtAttrs []*RtAttr + + if destination != "" { + destIP, destNet, err := net.ParseCIDR(destination) + if err != nil { + return fmt.Errorf("destination CIDR %s couldn't be parsed", destination) + } + destFamily := getIpFamily(destIP) + currentFamily = destFamily + destLen, bits := destNet.Mask.Size() + if destLen == 0 && bits == 0 { + return fmt.Errorf("destination CIDR %s generated a non-canonical Mask", destination) + } + msg.Family = uint8(destFamily) + msg.Dst_len = uint8(destLen) + var destData []byte + if destFamily == syscall.AF_INET { + destData = destIP.To4() + } else { + destData = destIP.To16() + } + rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_DST, destData)) + } + + if source != "" { + srcIP := net.ParseIP(source) + if srcIP == nil { + return fmt.Errorf("source IP %s couldn't be parsed", source) + } + srcFamily := getIpFamily(srcIP) + if currentFamily != -1 && currentFamily != srcFamily { + return fmt.Errorf("source and destination ip were not the same IP family") + } + currentFamily = srcFamily + msg.Family = uint8(srcFamily) + var srcData []byte + if srcFamily == syscall.AF_INET { + srcData = srcIP.To4() + } else { + srcData = srcIP.To16() + } + rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_PREFSRC, srcData)) + } + + if gateway != "" { + gwIP := net.ParseIP(gateway) + if gwIP == nil { + return fmt.Errorf("gateway IP %s couldn't be parsed", gateway) + } + gwFamily := getIpFamily(gwIP) + if currentFamily != -1 && currentFamily != gwFamily { + return fmt.Errorf("gateway, source, and destination ip were not the same IP family") + } + msg.Family = uint8(gwFamily) + var gwData []byte + if gwFamily == syscall.AF_INET { + gwData = gwIP.To4() + } else { + gwData = gwIP.To16() + } + rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_GATEWAY, gwData)) + } + + wb.AddData(msg) + for _, attr := range rtAttrs { + wb.AddData(attr) + } + + iface, err := net.InterfaceByName(device) + if err != nil { + return err + } + wb.AddData(uint32Attr(syscall.RTA_OIF, uint32(iface.Index))) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} + +// Add a new default gateway. Identical to: +// ip route add default via $ip +func AddDefaultGw(ip, device string) error { + return AddRoute("", "", ip, device) +} + +// THIS CODE DOES NOT COMMUNICATE WITH KERNEL VIA RTNETLINK INTERFACE +// IT IS HERE FOR BACKWARDS COMPATIBILITY WITH OLDER LINUX KERNELS +// WHICH SHIP WITH OLDER NOT ENTIRELY FUNCTIONAL VERSION OF NETLINK +func getIfSocket() (fd int, err error) { + for _, socket := range []int{ + syscall.AF_INET, + syscall.AF_PACKET, + syscall.AF_INET6, + } { + if fd, err = syscall.Socket(socket, syscall.SOCK_DGRAM, 0); err == nil { + break + } + } + if err == nil { + return fd, nil + } + return -1, err +} + +// Create the actual bridge device. This is more backward-compatible than +// netlink.NetworkLinkAdd and works on RHEL 6. +func CreateBridge(name string, setMacAddr bool) error { + if len(name) >= IFNAMSIZ { + return fmt.Errorf("Interface name %s too long", name) + } + + s, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(s) + + nameBytePtr, err := syscall.BytePtrFromString(name) + if err != nil { + return err + } + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), SIOC_BRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { + return err + } + if setMacAddr { + return SetMacAddress(name, randMacAddr()) + } + return nil +} + +// Delete the actual bridge device. +func DeleteBridge(name string) error { + s, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(s) + + nameBytePtr, err := syscall.BytePtrFromString(name) + if err != nil { + return err + } + + var ifr ifreqFlags + copy(ifr.IfrnName[:len(ifr.IfrnName)-1], []byte(name)) + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), + syscall.SIOCSIFFLAGS, uintptr(unsafe.Pointer(&ifr))); err != 0 { + return err + } + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), + SIOC_BRDELBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { + return err + } + return nil +} + +// Add a slave to abridge device. This is more backward-compatible than +// netlink.NetworkSetMaster and works on RHEL 6. +func AddToBridge(iface, master *net.Interface) error { + if len(master.Name) >= IFNAMSIZ { + return fmt.Errorf("Interface name %s too long", master.Name) + } + + s, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(s) + + ifr := ifreqIndex{} + copy(ifr.IfrnName[:len(ifr.IfrnName)-1], master.Name) + ifr.IfruIndex = int32(iface.Index) + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), SIOC_BRADDIF, uintptr(unsafe.Pointer(&ifr))); err != 0 { + return err + } + + return nil +} + +func randMacAddr() string { + hw := make(net.HardwareAddr, 6) + for i := 0; i < 6; i++ { + hw[i] = byte(rand.Intn(255)) + } + hw[0] &^= 0x1 // clear multicast bit + hw[0] |= 0x2 // set local assignment bit (IEEE802) + return hw.String() +} + +func SetMacAddress(name, addr string) error { + if len(name) >= IFNAMSIZ { + return fmt.Errorf("Interface name %s too long", name) + } + + hw, err := net.ParseMAC(addr) + if err != nil { + return err + } + + s, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(s) + + ifr := ifreqHwaddr{} + ifr.IfruHwaddr.Family = syscall.ARPHRD_ETHER + copy(ifr.IfrnName[:len(ifr.IfrnName)-1], name) + + for i := 0; i < 6; i++ { + ifr.IfruHwaddr.Data[i] = ifrDataByte(hw[i]) + } + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), syscall.SIOCSIFHWADDR, uintptr(unsafe.Pointer(&ifr))); err != 0 { + return err + } + return nil +} + +func SetHairpinMode(iface *net.Interface, enabled bool) error { + sysPath := filepath.Join("/sys/class/net", iface.Name, "brport/hairpin_mode") + + sysFile, err := os.OpenFile(sysPath, os.O_WRONLY, 0) + if err != nil { + return err + } + defer sysFile.Close() + + var writeVal []byte + if enabled { + writeVal = []byte("1") + } else { + writeVal = []byte("0") + } + if _, err := sysFile.Write(writeVal); err != nil { + return err + } + + return nil +} + +func ChangeName(iface *net.Interface, newName string) error { + if len(newName) >= IFNAMSIZ { + return fmt.Errorf("Interface name %s too long", newName) + } + + fd, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(fd) + + data := [IFNAMSIZ * 2]byte{} + // the "-1"s here are very important for ensuring we get proper null + // termination of our new C strings + copy(data[:IFNAMSIZ-1], iface.Name) + copy(data[IFNAMSIZ:IFNAMSIZ*2-1], newName) + + if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.SIOCSIFNAME, uintptr(unsafe.Pointer(&data[0]))); errno != 0 { + return errno + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_arm.go b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_arm.go new file mode 100644 index 00000000000..779e58a771f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_arm.go @@ -0,0 +1,5 @@ +package netlink + +func ifrDataByte(b byte) uint8 { + return uint8(b) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go new file mode 100644 index 00000000000..f151722a1b4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go @@ -0,0 +1,7 @@ +// +build !arm + +package netlink + +func ifrDataByte(b byte) int8 { + return int8(b) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go new file mode 100644 index 00000000000..3f6511abfe8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go @@ -0,0 +1,408 @@ +package netlink + +import ( + "net" + "strings" + "syscall" + "testing" +) + +type testLink struct { + name string + linkType string +} + +func addLink(t *testing.T, name string, linkType string) { + if err := NetworkLinkAdd(name, linkType); err != nil { + t.Fatalf("Unable to create %s link: %s", name, err) + } +} + +func readLink(t *testing.T, name string) *net.Interface { + iface, err := net.InterfaceByName(name) + if err != nil { + t.Fatalf("Could not find %s interface: %s", name, err) + } + + return iface +} + +func deleteLink(t *testing.T, name string) { + if err := NetworkLinkDel(name); err != nil { + t.Fatalf("Unable to delete %s link: %s", name, err) + } +} + +func upLink(t *testing.T, name string) { + iface := readLink(t, name) + if err := NetworkLinkUp(iface); err != nil { + t.Fatalf("Could not bring UP %#v interface: %s", iface, err) + } +} + +func downLink(t *testing.T, name string) { + iface := readLink(t, name) + if err := NetworkLinkDown(iface); err != nil { + t.Fatalf("Could not bring DOWN %#v interface: %s", iface, err) + } +} + +func ipAssigned(iface *net.Interface, ip net.IP) bool { + addrs, _ := iface.Addrs() + + for _, addr := range addrs { + args := strings.SplitN(addr.String(), "/", 2) + if args[0] == ip.String() { + return true + } + } + + return false +} + +func TestNetworkLinkAddDel(t *testing.T) { + if testing.Short() { + return + } + + testLinks := []testLink{ + {"tstEth", "dummy"}, + {"tstBr", "bridge"}, + } + + for _, tl := range testLinks { + addLink(t, tl.name, tl.linkType) + defer deleteLink(t, tl.name) + readLink(t, tl.name) + } +} + +func TestNetworkLinkUpDown(t *testing.T) { + if testing.Short() { + return + } + + tl := testLink{name: "tstEth", linkType: "dummy"} + + addLink(t, tl.name, tl.linkType) + defer deleteLink(t, tl.name) + + upLink(t, tl.name) + ifcAfterUp := readLink(t, tl.name) + + if (ifcAfterUp.Flags & syscall.IFF_UP) != syscall.IFF_UP { + t.Fatalf("Could not bring UP %#v initerface", tl) + } + + downLink(t, tl.name) + ifcAfterDown := readLink(t, tl.name) + + if (ifcAfterDown.Flags & syscall.IFF_UP) == syscall.IFF_UP { + t.Fatalf("Could not bring DOWN %#v initerface", tl) + } +} + +func TestNetworkSetMacAddress(t *testing.T) { + if testing.Short() { + return + } + + tl := testLink{name: "tstEth", linkType: "dummy"} + macaddr := "22:ce:e0:99:63:6f" + + addLink(t, tl.name, tl.linkType) + defer deleteLink(t, tl.name) + + ifcBeforeSet := readLink(t, tl.name) + + if err := NetworkSetMacAddress(ifcBeforeSet, macaddr); err != nil { + t.Fatalf("Could not set %s MAC address on %#v interface: %s", macaddr, tl, err) + } + + ifcAfterSet := readLink(t, tl.name) + + if ifcAfterSet.HardwareAddr.String() != macaddr { + t.Fatalf("Could not set %s MAC address on %#v interface", macaddr, tl) + } +} + +func TestNetworkSetMTU(t *testing.T) { + if testing.Short() { + return + } + + tl := testLink{name: "tstEth", linkType: "dummy"} + mtu := 1400 + + addLink(t, tl.name, tl.linkType) + defer deleteLink(t, tl.name) + + ifcBeforeSet := readLink(t, tl.name) + + if err := NetworkSetMTU(ifcBeforeSet, mtu); err != nil { + t.Fatalf("Could not set %d MTU on %#v interface: %s", mtu, tl, err) + } + + ifcAfterSet := readLink(t, tl.name) + + if ifcAfterSet.MTU != mtu { + t.Fatalf("Could not set %d MTU on %#v interface", mtu, tl) + } +} + +func TestNetworkSetMasterNoMaster(t *testing.T) { + if testing.Short() { + return + } + + master := testLink{"tstBr", "bridge"} + slave := testLink{"tstEth", "dummy"} + testLinks := []testLink{master, slave} + + for _, tl := range testLinks { + addLink(t, tl.name, tl.linkType) + defer deleteLink(t, tl.name) + upLink(t, tl.name) + } + + masterIfc := readLink(t, master.name) + slaveIfc := readLink(t, slave.name) + if err := NetworkSetMaster(slaveIfc, masterIfc); err != nil { + t.Fatalf("Could not set %#v to be the master of %#v: %s", master, slave, err) + } + + // Trying to figure out a way to test which will not break on RHEL6. + // We could check for existence of /sys/class/net/tstEth/upper_tstBr + // which should point to the ../tstBr which is the UPPER device i.e. network bridge + + if err := NetworkSetNoMaster(slaveIfc); err != nil { + t.Fatalf("Could not UNset %#v master of %#v: %s", master, slave, err) + } +} + +func TestNetworkChangeName(t *testing.T) { + if testing.Short() { + return + } + + tl := testLink{"tstEth", "dummy"} + newName := "newTst" + + addLink(t, tl.name, tl.linkType) + + linkIfc := readLink(t, tl.name) + if err := NetworkChangeName(linkIfc, newName); err != nil { + deleteLink(t, tl.name) + t.Fatalf("Could not change %#v interface name to %s: %s", tl, newName, err) + } + + readLink(t, newName) + deleteLink(t, newName) +} + +func TestNetworkLinkAddVlan(t *testing.T) { + if testing.Short() { + return + } + + tl := struct { + name string + id uint16 + }{ + name: "tstVlan", + id: 32, + } + masterLink := testLink{"tstEth", "dummy"} + + addLink(t, masterLink.name, masterLink.linkType) + defer deleteLink(t, masterLink.name) + + if err := NetworkLinkAddVlan(masterLink.name, tl.name, tl.id); err != nil { + t.Fatalf("Unable to create %#v VLAN interface: %s", tl, err) + } + + readLink(t, tl.name) +} + +func TestNetworkLinkAddMacVlan(t *testing.T) { + if testing.Short() { + return + } + + tl := struct { + name string + mode string + }{ + name: "tstVlan", + mode: "private", + } + masterLink := testLink{"tstEth", "dummy"} + + addLink(t, masterLink.name, masterLink.linkType) + defer deleteLink(t, masterLink.name) + + if err := NetworkLinkAddMacVlan(masterLink.name, tl.name, tl.mode); err != nil { + t.Fatalf("Unable to create %#v MAC VLAN interface: %s", tl, err) + } + + readLink(t, tl.name) +} + +func TestNetworkLinkAddMacVtap(t *testing.T) { + if testing.Short() { + return + } + + tl := struct { + name string + mode string + }{ + name: "tstVtap", + mode: "private", + } + masterLink := testLink{"tstEth", "dummy"} + + addLink(t, masterLink.name, masterLink.linkType) + defer deleteLink(t, masterLink.name) + + if err := NetworkLinkAddMacVtap(masterLink.name, tl.name, tl.mode); err != nil { + t.Fatalf("Unable to create %#v MAC VTAP interface: %s", tl, err) + } + + readLink(t, tl.name) +} + +func TestAddDelNetworkIp(t *testing.T) { + if testing.Short() { + return + } + + ifaceName := "lo" + ip := net.ParseIP("127.0.1.1") + mask := net.IPv4Mask(255, 255, 255, 255) + ipNet := &net.IPNet{IP: ip, Mask: mask} + + iface, err := net.InterfaceByName(ifaceName) + if err != nil { + t.Skip("No 'lo' interface; skipping tests") + } + + if err := NetworkLinkAddIp(iface, ip, ipNet); err != nil { + t.Fatalf("Could not add IP address %s to interface %#v: %s", ip.String(), iface, err) + } + + if !ipAssigned(iface, ip) { + t.Fatalf("Could not locate address '%s' in lo address list.", ip.String()) + } + + if err := NetworkLinkDelIp(iface, ip, ipNet); err != nil { + t.Fatalf("Could not delete IP address %s from interface %#v: %s", ip.String(), iface, err) + } + + if ipAssigned(iface, ip) { + t.Fatalf("Located address '%s' in lo address list after removal.", ip.String()) + } +} + +func TestAddRouteSourceSelection(t *testing.T) { + tstIp := "127.1.1.1" + tl := testLink{name: "tstEth", linkType: "dummy"} + + addLink(t, tl.name, tl.linkType) + defer deleteLink(t, tl.name) + + ip := net.ParseIP(tstIp) + mask := net.IPv4Mask(255, 255, 255, 255) + ipNet := &net.IPNet{IP: ip, Mask: mask} + + iface, err := net.InterfaceByName(tl.name) + if err != nil { + t.Fatalf("Lost created link %#v", tl) + } + + if err := NetworkLinkAddIp(iface, ip, ipNet); err != nil { + t.Fatalf("Could not add IP address %s to interface %#v: %s", ip.String(), iface, err) + } + + upLink(t, tl.name) + defer downLink(t, tl.name) + + if err := AddRoute("127.0.0.0/8", tstIp, "", tl.name); err != nil { + t.Fatalf("Failed to add route with source address") + } +} + +func TestCreateVethPair(t *testing.T) { + if testing.Short() { + return + } + + var ( + name1 = "veth1" + name2 = "veth2" + ) + + if err := NetworkCreateVethPair(name1, name2, 0); err != nil { + t.Fatalf("Could not create veth pair %s %s: %s", name1, name2, err) + } + defer NetworkLinkDel(name1) + + readLink(t, name1) + readLink(t, name2) +} + +// +// netlink package tests which do not use RTNETLINK +// +func TestCreateBridgeWithMac(t *testing.T) { + if testing.Short() { + return + } + + name := "testbridge" + + if err := CreateBridge(name, true); err != nil { + t.Fatal(err) + } + + if _, err := net.InterfaceByName(name); err != nil { + t.Fatal(err) + } + + // cleanup and tests + + if err := DeleteBridge(name); err != nil { + t.Fatal(err) + } + + if _, err := net.InterfaceByName(name); err == nil { + t.Fatalf("expected error getting interface because %s bridge was deleted", name) + } +} + +func TestSetMacAddress(t *testing.T) { + if testing.Short() { + return + } + + name := "testmac" + mac := randMacAddr() + + if err := NetworkLinkAdd(name, "bridge"); err != nil { + t.Fatal(err) + } + defer NetworkLinkDel(name) + + if err := SetMacAddress(name, mac); err != nil { + t.Fatal(err) + } + + iface, err := net.InterfaceByName(name) + if err != nil { + t.Fatal(err) + } + + if iface.HardwareAddr.String() != mac { + t.Fatalf("mac address %q does not match %q", iface.HardwareAddr, mac) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go new file mode 100644 index 00000000000..4b11bf8ba56 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go @@ -0,0 +1,88 @@ +// +build !linux + +package netlink + +import ( + "errors" + "net" +) + +var ( + ErrNotImplemented = errors.New("not implemented") +) + +func NetworkGetRoutes() ([]Route, error) { + return nil, ErrNotImplemented +} + +func NetworkLinkAdd(name string, linkType string) error { + return ErrNotImplemented +} + +func NetworkLinkDel(name string) error { + return ErrNotImplemented +} + +func NetworkLinkUp(iface *net.Interface) error { + return ErrNotImplemented +} + +func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { + return ErrNotImplemented +} + +func NetworkLinkDelIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { + return ErrNotImplemented +} + +func AddRoute(destination, source, gateway, device string) error { + return ErrNotImplemented +} + +func AddDefaultGw(ip, device string) error { + return ErrNotImplemented +} + +func NetworkSetMTU(iface *net.Interface, mtu int) error { + return ErrNotImplemented +} + +func NetworkSetTxQueueLen(iface *net.Interface, txQueueLen int) error { + return ErrNotImplemented +} + +func NetworkCreateVethPair(name1, name2 string, txQueueLen int) error { + return ErrNotImplemented +} + +func NetworkChangeName(iface *net.Interface, newName string) error { + return ErrNotImplemented +} + +func NetworkSetNsFd(iface *net.Interface, fd int) error { + return ErrNotImplemented +} + +func NetworkSetNsPid(iface *net.Interface, nspid int) error { + return ErrNotImplemented +} + +func NetworkSetMaster(iface, master *net.Interface) error { + return ErrNotImplemented +} + +func NetworkLinkDown(iface *net.Interface) error { + return ErrNotImplemented +} + +func CreateBridge(name string, setMacAddr bool) error { + return ErrNotImplemented +} + +func DeleteBridge(name string) error { + return ErrNotImplemented +} + +func AddToBridge(iface, master *net.Interface) error { + return ErrNotImplemented +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/network/loopback.go b/Godeps/_workspace/src/github.com/docker/libcontainer/network/loopback.go new file mode 100644 index 00000000000..1667b4d82a3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/network/loopback.go @@ -0,0 +1,23 @@ +// +build linux + +package network + +import ( + "fmt" +) + +// Loopback is a network strategy that provides a basic loopback device +type Loopback struct { +} + +func (l *Loopback) Create(n *Network, nspid int, networkState *NetworkState) error { + return nil +} + +func (l *Loopback) Initialize(config *Network, networkState *NetworkState) error { + // Do not set the MTU on the loopback interface - use the default. + if err := InterfaceUp("lo"); err != nil { + return fmt.Errorf("lo up %s", err) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/network/network.go b/Godeps/_workspace/src/github.com/docker/libcontainer/network/network.go new file mode 100644 index 00000000000..40b25b135b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/network/network.go @@ -0,0 +1,117 @@ +// +build linux + +package network + +import ( + "net" + + "github.com/docker/libcontainer/netlink" +) + +func InterfaceUp(name string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkLinkUp(iface) +} + +func InterfaceDown(name string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkLinkDown(iface) +} + +func ChangeInterfaceName(old, newName string) error { + iface, err := net.InterfaceByName(old) + if err != nil { + return err + } + return netlink.NetworkChangeName(iface, newName) +} + +func CreateVethPair(name1, name2 string, txQueueLen int) error { + return netlink.NetworkCreateVethPair(name1, name2, txQueueLen) +} + +func SetInterfaceInNamespacePid(name string, nsPid int) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkSetNsPid(iface, nsPid) +} + +func SetInterfaceInNamespaceFd(name string, fd uintptr) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkSetNsFd(iface, int(fd)) +} + +func SetInterfaceMaster(name, master string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + masterIface, err := net.InterfaceByName(master) + if err != nil { + return err + } + return netlink.AddToBridge(iface, masterIface) +} + +func SetDefaultGateway(ip, ifaceName string) error { + return netlink.AddDefaultGw(ip, ifaceName) +} + +func SetInterfaceMac(name string, macaddr string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkSetMacAddress(iface, macaddr) +} + +func SetInterfaceIp(name string, rawIp string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + ip, ipNet, err := net.ParseCIDR(rawIp) + if err != nil { + return err + } + return netlink.NetworkLinkAddIp(iface, ip, ipNet) +} + +func DeleteInterfaceIp(name string, rawIp string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + ip, ipNet, err := net.ParseCIDR(rawIp) + if err != nil { + return err + } + return netlink.NetworkLinkDelIp(iface, ip, ipNet) +} + +func SetMtu(name string, mtu int) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkSetMTU(iface, mtu) +} + +func SetHairpinMode(name string, enabled bool) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.SetHairpinMode(iface, enabled) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/network/stats.go b/Godeps/_workspace/src/github.com/docker/libcontainer/network/stats.go new file mode 100644 index 00000000000..e2156c74da4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/network/stats.go @@ -0,0 +1,74 @@ +package network + +import ( + "io/ioutil" + "path/filepath" + "strconv" + "strings" +) + +type NetworkStats struct { + RxBytes uint64 `json:"rx_bytes"` + RxPackets uint64 `json:"rx_packets"` + RxErrors uint64 `json:"rx_errors"` + RxDropped uint64 `json:"rx_dropped"` + TxBytes uint64 `json:"tx_bytes"` + TxPackets uint64 `json:"tx_packets"` + TxErrors uint64 `json:"tx_errors"` + TxDropped uint64 `json:"tx_dropped"` +} + +// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo. +func GetStats(networkState *NetworkState) (*NetworkStats, error) { + // This can happen if the network runtime information is missing - possible if the container was created by an old version of libcontainer. + if networkState.VethHost == "" { + return &NetworkStats{}, nil + } + + out := &NetworkStats{} + + type netStatsPair struct { + // Where to write the output. + Out *uint64 + + // The network stats file to read. + File string + } + + // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container. + netStats := []netStatsPair{ + {Out: &out.RxBytes, File: "tx_bytes"}, + {Out: &out.RxPackets, File: "tx_packets"}, + {Out: &out.RxErrors, File: "tx_errors"}, + {Out: &out.RxDropped, File: "tx_dropped"}, + + {Out: &out.TxBytes, File: "rx_bytes"}, + {Out: &out.TxPackets, File: "rx_packets"}, + {Out: &out.TxErrors, File: "rx_errors"}, + {Out: &out.TxDropped, File: "rx_dropped"}, + } + for _, netStat := range netStats { + data, err := readSysfsNetworkStats(networkState.VethHost, netStat.File) + if err != nil { + return nil, err + } + *(netStat.Out) = data + } + + return out, nil +} + +// Reads the specified statistics available under /sys/class/net//statistics +func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) { + fullPath := filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile) + data, err := ioutil.ReadFile(fullPath) + if err != nil { + return 0, err + } + value, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) + if err != nil { + return 0, err + } + + return value, err +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/network/strategy.go b/Godeps/_workspace/src/github.com/docker/libcontainer/network/strategy.go new file mode 100644 index 00000000000..019fe62f419 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/network/strategy.go @@ -0,0 +1,34 @@ +// +build linux + +package network + +import ( + "errors" +) + +var ( + ErrNotValidStrategyType = errors.New("not a valid network strategy type") +) + +var strategies = map[string]NetworkStrategy{ + "veth": &Veth{}, + "loopback": &Loopback{}, +} + +// NetworkStrategy represents a specific network configuration for +// a container's networking stack +type NetworkStrategy interface { + Create(*Network, int, *NetworkState) error + Initialize(*Network, *NetworkState) error +} + +// GetStrategy returns the specific network strategy for the +// provided type. If no strategy is registered for the type an +// ErrNotValidStrategyType is returned. +func GetStrategy(tpe string) (NetworkStrategy, error) { + s, exists := strategies[tpe] + if !exists { + return nil, ErrNotValidStrategyType + } + return s, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/network/types.go b/Godeps/_workspace/src/github.com/docker/libcontainer/network/types.go new file mode 100644 index 00000000000..dcf00420f3c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/network/types.go @@ -0,0 +1,50 @@ +package network + +// Network defines configuration for a container's networking stack +// +// The network configuration can be omited from a container causing the +// container to be setup with the host's networking stack +type Network struct { + // Type sets the networks type, commonly veth and loopback + Type string `json:"type,omitempty"` + + // The bridge to use. + Bridge string `json:"bridge,omitempty"` + + // Prefix for the veth interfaces. + VethPrefix string `json:"veth_prefix,omitempty"` + + // MacAddress contains the MAC address to set on the network interface + MacAddress string `json:"mac_address,omitempty"` + + // Address contains the IPv4 and mask to set on the network interface + Address string `json:"address,omitempty"` + + // IPv6Address contains the IPv6 and mask to set on the network interface + IPv6Address string `json:"ipv6_address,omitempty"` + + // Gateway sets the gateway address that is used as the default for the interface + Gateway string `json:"gateway,omitempty"` + + // IPv6Gateway sets the ipv6 gateway address that is used as the default for the interface + IPv6Gateway string `json:"ipv6_gateway,omitempty"` + + // Mtu sets the mtu value for the interface and will be mirrored on both the host and + // container's interfaces if a pair is created, specifically in the case of type veth + // Note: This does not apply to loopback interfaces. + Mtu int `json:"mtu,omitempty"` + + // TxQueueLen sets the tx_queuelen value for the interface and will be mirrored on both the host and + // container's interfaces if a pair is created, specifically in the case of type veth + // Note: This does not apply to loopback interfaces. + TxQueueLen int `json:"txqueuelen,omitempty"` +} + +// Struct describing the network specific runtime state that will be maintained by libcontainer for all running containers +// Do not depend on it outside of libcontainer. +type NetworkState struct { + // The name of the veth interface on the Host. + VethHost string `json:"veth_host,omitempty"` + // The name of the veth interface created inside the container for the child. + VethChild string `json:"veth_child,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/network/veth.go b/Godeps/_workspace/src/github.com/docker/libcontainer/network/veth.go new file mode 100644 index 00000000000..3d7dc8729e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/network/veth.go @@ -0,0 +1,122 @@ +// +build linux + +package network + +import ( + "fmt" + + "github.com/docker/libcontainer/netlink" + "github.com/docker/libcontainer/utils" +) + +// Veth is a network strategy that uses a bridge and creates +// a veth pair, one that stays outside on the host and the other +// is placed inside the container's namespace +type Veth struct { +} + +const defaultDevice = "eth0" + +func (v *Veth) Create(n *Network, nspid int, networkState *NetworkState) error { + var ( + bridge = n.Bridge + prefix = n.VethPrefix + txQueueLen = n.TxQueueLen + ) + if bridge == "" { + return fmt.Errorf("bridge is not specified") + } + if prefix == "" { + return fmt.Errorf("veth prefix is not specified") + } + name1, name2, err := createVethPair(prefix, txQueueLen) + if err != nil { + return err + } + if err := SetInterfaceMaster(name1, bridge); err != nil { + return err + } + if err := SetMtu(name1, n.Mtu); err != nil { + return err + } + if err := InterfaceUp(name1); err != nil { + return err + } + if err := SetInterfaceInNamespacePid(name2, nspid); err != nil { + return err + } + networkState.VethHost = name1 + networkState.VethChild = name2 + + return nil +} + +func (v *Veth) Initialize(config *Network, networkState *NetworkState) error { + var vethChild = networkState.VethChild + if vethChild == "" { + return fmt.Errorf("vethChild is not specified") + } + if err := InterfaceDown(vethChild); err != nil { + return fmt.Errorf("interface down %s %s", vethChild, err) + } + if err := ChangeInterfaceName(vethChild, defaultDevice); err != nil { + return fmt.Errorf("change %s to %s %s", vethChild, defaultDevice, err) + } + if config.MacAddress != "" { + if err := SetInterfaceMac(defaultDevice, config.MacAddress); err != nil { + return fmt.Errorf("set %s mac %s", defaultDevice, err) + } + } + if err := SetInterfaceIp(defaultDevice, config.Address); err != nil { + return fmt.Errorf("set %s ip %s", defaultDevice, err) + } + if config.IPv6Address != "" { + if err := SetInterfaceIp(defaultDevice, config.IPv6Address); err != nil { + return fmt.Errorf("set %s ipv6 %s", defaultDevice, err) + } + } + + if err := SetMtu(defaultDevice, config.Mtu); err != nil { + return fmt.Errorf("set %s mtu to %d %s", defaultDevice, config.Mtu, err) + } + if err := InterfaceUp(defaultDevice); err != nil { + return fmt.Errorf("%s up %s", defaultDevice, err) + } + if config.Gateway != "" { + if err := SetDefaultGateway(config.Gateway, defaultDevice); err != nil { + return fmt.Errorf("set gateway to %s on device %s failed with %s", config.Gateway, defaultDevice, err) + } + } + if config.IPv6Gateway != "" { + if err := SetDefaultGateway(config.IPv6Gateway, defaultDevice); err != nil { + return fmt.Errorf("set gateway for ipv6 to %s on device %s failed with %s", config.IPv6Gateway, defaultDevice, err) + } + } + return nil +} + +// createVethPair will automatically generage two random names for +// the veth pair and ensure that they have been created +func createVethPair(prefix string, txQueueLen int) (name1 string, name2 string, err error) { + for i := 0; i < 10; i++ { + if name1, err = utils.GenerateRandomName(prefix, 7); err != nil { + return + } + + if name2, err = utils.GenerateRandomName(prefix, 7); err != nil { + return + } + + if err = CreateVethPair(name1, name2, txQueueLen); err != nil { + if err == netlink.ErrInterfaceExists { + continue + } + + return + } + + break + } + + return +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/network/veth_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/network/veth_test.go new file mode 100644 index 00000000000..b92b284eb09 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/network/veth_test.go @@ -0,0 +1,53 @@ +// +build linux + +package network + +import ( + "testing" + + "github.com/docker/libcontainer/netlink" +) + +func TestGenerateVethNames(t *testing.T) { + if testing.Short() { + return + } + + prefix := "veth" + + name1, name2, err := createVethPair(prefix, 0) + if err != nil { + t.Fatal(err) + } + + if name1 == "" { + t.Fatal("name1 should not be empty") + } + + if name2 == "" { + t.Fatal("name2 should not be empty") + } +} + +func TestCreateDuplicateVethPair(t *testing.T) { + if testing.Short() { + return + } + + prefix := "veth" + + name1, name2, err := createVethPair(prefix, 0) + if err != nil { + t.Fatal(err) + } + + // retry to create the name interfaces and make sure that we get the correct error + err = CreateVethPair(name1, name2, 0) + if err == nil { + t.Fatal("expected error to not be nil with duplicate interface") + } + + if err != netlink.ErrInterfaceExists { + t.Fatalf("expected error to be ErrInterfaceExists but received %q", err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/notify_linux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/notify_linux.go new file mode 100644 index 00000000000..a4923273a30 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/notify_linux.go @@ -0,0 +1,62 @@ +// +build linux + +package libcontainer + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" +) + +const oomCgroupName = "memory" + +// NotifyOnOOM returns channel on which you can expect event about OOM, +// if process died without OOM this channel will be closed. +// s is current *libcontainer.State for container. +func NotifyOnOOM(s *State) (<-chan struct{}, error) { + dir := s.CgroupPaths[oomCgroupName] + if dir == "" { + return nil, fmt.Errorf("There is no path for %q in state", oomCgroupName) + } + oomControl, err := os.Open(filepath.Join(dir, "memory.oom_control")) + if err != nil { + return nil, err + } + fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0) + if syserr != 0 { + return nil, syserr + } + + eventfd := os.NewFile(fd, "eventfd") + + eventControlPath := filepath.Join(dir, "cgroup.event_control") + data := fmt.Sprintf("%d %d", eventfd.Fd(), oomControl.Fd()) + if err := ioutil.WriteFile(eventControlPath, []byte(data), 0700); err != nil { + eventfd.Close() + oomControl.Close() + return nil, err + } + ch := make(chan struct{}) + go func() { + defer func() { + close(ch) + eventfd.Close() + oomControl.Close() + }() + buf := make([]byte, 8) + for { + if _, err := eventfd.Read(buf); err != nil { + return + } + // When a cgroup is destroyed, an event is sent to eventfd. + // So if the control path is gone, return instead of notifying. + if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) { + return + } + ch <- struct{}{} + } + }() + return ch, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/notify_linux_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/notify_linux_test.go new file mode 100644 index 00000000000..5d1d54576b9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/notify_linux_test.go @@ -0,0 +1,98 @@ +// +build linux + +package libcontainer + +import ( + "encoding/binary" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" + "time" +) + +func TestNotifyOnOOM(t *testing.T) { + memoryPath, err := ioutil.TempDir("", "testnotifyoom-") + if err != nil { + t.Fatal(err) + } + oomPath := filepath.Join(memoryPath, "memory.oom_control") + eventPath := filepath.Join(memoryPath, "cgroup.event_control") + if err := ioutil.WriteFile(oomPath, []byte{}, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(eventPath, []byte{}, 0700); err != nil { + t.Fatal(err) + } + var eventFd, oomControlFd int + st := &State{ + CgroupPaths: map[string]string{ + "memory": memoryPath, + }, + } + ooms, err := NotifyOnOOM(st) + if err != nil { + t.Fatal("expected no error, got:", err) + } + + data, err := ioutil.ReadFile(eventPath) + if err != nil { + t.Fatal("couldn't read event control file:", err) + } + + if _, err := fmt.Sscanf(string(data), "%d %d", &eventFd, &oomControlFd); err != nil { + t.Fatalf("invalid control data %q: %s", data, err) + } + + // re-open the eventfd + efd, err := syscall.Dup(eventFd) + if err != nil { + t.Fatal("unable to reopen eventfd:", err) + } + defer syscall.Close(efd) + + if err != nil { + t.Fatal("unable to dup event fd:", err) + } + + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, 1) + + if _, err := syscall.Write(efd, buf); err != nil { + t.Fatal("unable to write to eventfd:", err) + } + + select { + case <-ooms: + case <-time.After(100 * time.Millisecond): + t.Fatal("no notification on oom channel after 100ms") + } + + // simulate what happens when a cgroup is destroyed by cleaning up and then + // writing to the eventfd. + if err := os.RemoveAll(memoryPath); err != nil { + t.Fatal(err) + } + if _, err := syscall.Write(efd, buf); err != nil { + t.Fatal("unable to write to eventfd:", err) + } + + // give things a moment to shut down + select { + case _, ok := <-ooms: + if ok { + t.Fatal("expected no oom to be triggered") + } + case <-time.After(100 * time.Millisecond): + } + + if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(oomControlFd), syscall.F_GETFD, 0); err != syscall.EBADF { + t.Error("expected oom control to be closed") + } + + if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(eventFd), syscall.F_GETFD, 0); err != syscall.EBADF { + t.Error("expected event fd to be closed") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/config.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/config.go new file mode 100644 index 00000000000..74c7b3c09f7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/config.go @@ -0,0 +1,29 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/codegangsta/cli" +) + +var configCommand = cli.Command{ + Name: "config", + Usage: "display the container configuration", + Action: configAction, +} + +func configAction(context *cli.Context) { + container, err := loadConfig() + if err != nil { + log.Fatal(err) + } + + data, err := json.MarshalIndent(container, "", "\t") + if err != nil { + log.Fatal(err) + } + + fmt.Printf("%s", data) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/exec.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/exec.go new file mode 100644 index 00000000000..6fc553b8f93 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/exec.go @@ -0,0 +1,208 @@ +package main + +import ( + "fmt" + "io" + "log" + "os" + "os/exec" + "os/signal" + "syscall" + "text/tabwriter" + + "github.com/codegangsta/cli" + "github.com/docker/docker/pkg/term" + "github.com/docker/libcontainer" + consolepkg "github.com/docker/libcontainer/console" + "github.com/docker/libcontainer/namespaces" +) + +var execCommand = cli.Command{ + Name: "exec", + Usage: "execute a new command inside a container", + Action: execAction, + Flags: []cli.Flag{ + cli.BoolFlag{Name: "list", Usage: "list all registered exec functions"}, + cli.StringFlag{Name: "func", Value: "exec", Usage: "function name to exec inside a container"}, + }, +} + +func execAction(context *cli.Context) { + if context.Bool("list") { + w := tabwriter.NewWriter(os.Stdout, 10, 1, 3, ' ', 0) + fmt.Fprint(w, "NAME\tUSAGE\n") + + for k, f := range argvs { + fmt.Fprintf(w, "%s\t%s\n", k, f.Usage) + } + + w.Flush() + + return + } + + var exitCode int + + container, err := loadConfig() + if err != nil { + log.Fatal(err) + } + + state, err := libcontainer.GetState(dataPath) + if err != nil && !os.IsNotExist(err) { + log.Fatalf("unable to read state.json: %s", err) + } + + if state != nil { + exitCode, err = startInExistingContainer(container, state, context.String("func"), context) + } else { + exitCode, err = startContainer(container, dataPath, []string(context.Args())) + } + + if err != nil { + log.Fatalf("failed to exec: %s", err) + } + + os.Exit(exitCode) +} + +// the process for execing a new process inside an existing container is that we have to exec ourself +// with the nsenter argument so that the C code can setns an the namespaces that we require. Then that +// code path will drop us into the path that we can do the final setup of the namespace and exec the users +// application. +func startInExistingContainer(config *libcontainer.Config, state *libcontainer.State, action string, context *cli.Context) (int, error) { + var ( + master *os.File + console string + err error + + sigc = make(chan os.Signal, 10) + + stdin = os.Stdin + stdout = os.Stdout + stderr = os.Stderr + ) + signal.Notify(sigc) + + if config.Tty { + stdin = nil + stdout = nil + stderr = nil + + master, console, err = consolepkg.CreateMasterAndConsole() + if err != nil { + return -1, err + } + + go io.Copy(master, os.Stdin) + go io.Copy(os.Stdout, master) + + state, err := term.SetRawTerminal(os.Stdin.Fd()) + if err != nil { + return -1, err + } + + defer term.RestoreTerminal(os.Stdin.Fd(), state) + } + + startCallback := func(cmd *exec.Cmd) { + go func() { + resizeTty(master) + + for sig := range sigc { + switch sig { + case syscall.SIGWINCH: + resizeTty(master) + default: + cmd.Process.Signal(sig) + } + } + }() + } + + return namespaces.ExecIn(config, state, context.Args(), os.Args[0], action, stdin, stdout, stderr, console, startCallback) +} + +// startContainer starts the container. Returns the exit status or -1 and an +// error. +// +// Signals sent to the current process will be forwarded to container. +func startContainer(container *libcontainer.Config, dataPath string, args []string) (int, error) { + var ( + cmd *exec.Cmd + sigc = make(chan os.Signal, 10) + ) + + signal.Notify(sigc) + + createCommand := func(container *libcontainer.Config, console, dataPath, init string, pipe *os.File, args []string) *exec.Cmd { + cmd = namespaces.DefaultCreateCommand(container, console, dataPath, init, pipe, args) + if logPath != "" { + cmd.Env = append(cmd.Env, fmt.Sprintf("log=%s", logPath)) + } + return cmd + } + + var ( + master *os.File + console string + err error + + stdin = os.Stdin + stdout = os.Stdout + stderr = os.Stderr + ) + + if container.Tty { + stdin = nil + stdout = nil + stderr = nil + + master, console, err = consolepkg.CreateMasterAndConsole() + if err != nil { + return -1, err + } + + go io.Copy(master, os.Stdin) + go io.Copy(os.Stdout, master) + + state, err := term.SetRawTerminal(os.Stdin.Fd()) + if err != nil { + return -1, err + } + + defer term.RestoreTerminal(os.Stdin.Fd(), state) + } + + startCallback := func() { + go func() { + resizeTty(master) + + for sig := range sigc { + switch sig { + case syscall.SIGWINCH: + resizeTty(master) + default: + cmd.Process.Signal(sig) + } + } + }() + } + + return namespaces.Exec(container, stdin, stdout, stderr, console, dataPath, args, createCommand, startCallback) +} + +func resizeTty(master *os.File) { + if master == nil { + return + } + + ws, err := term.GetWinsize(os.Stdin.Fd()) + if err != nil { + return + } + + if err := term.SetWinsize(master.Fd(), ws); err != nil { + return + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/init.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/init.go new file mode 100644 index 00000000000..6df9b1d894d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/init.go @@ -0,0 +1,47 @@ +package main + +import ( + "log" + "os" + "runtime" + "strconv" + + "github.com/codegangsta/cli" + "github.com/docker/libcontainer/namespaces" +) + +var ( + dataPath = os.Getenv("data_path") + console = os.Getenv("console") + rawPipeFd = os.Getenv("pipe") + + initCommand = cli.Command{ + Name: "init", + Usage: "runs the init process inside the namespace", + Action: initAction, + } +) + +func initAction(context *cli.Context) { + runtime.LockOSThread() + + container, err := loadConfig() + if err != nil { + log.Fatal(err) + } + + rootfs, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + + pipeFd, err := strconv.Atoi(rawPipeFd) + if err != nil { + log.Fatal(err) + } + + pipe := os.NewFile(uintptr(pipeFd), "pipe") + if err := namespaces.Init(container, rootfs, console, pipe, []string(context.Args())); err != nil { + log.Fatalf("unable to initialize for container: %s", err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/main.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/main.go new file mode 100644 index 00000000000..d65c0140e8a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/main.go @@ -0,0 +1,67 @@ +package main + +import ( + "log" + "os" + "strings" + + "github.com/codegangsta/cli" +) + +var ( + logPath = os.Getenv("log") + argvs = make(map[string]*rFunc) +) + +func init() { + argvs["exec"] = &rFunc{ + Usage: "execute a process inside an existing container", + Action: nsenterExec, + } + + argvs["mknod"] = &rFunc{ + Usage: "mknod a device inside an existing container", + Action: nsenterMknod, + } + + argvs["ip"] = &rFunc{ + Usage: "display the container's network interfaces", + Action: nsenterIp, + } +} + +func main() { + // we need to check our argv 0 for any registred functions to run instead of the + // normal cli code path + f, exists := argvs[strings.TrimPrefix(os.Args[0], "nsenter-")] + if exists { + runFunc(f) + + return + } + + app := cli.NewApp() + + app.Name = "nsinit" + app.Version = "0.1" + app.Author = "libcontainer maintainers" + app.Flags = []cli.Flag{ + cli.StringFlag{Name: "nspid"}, + cli.StringFlag{Name: "console"}, + } + + app.Before = preload + + app.Commands = []cli.Command{ + execCommand, + initCommand, + statsCommand, + configCommand, + pauseCommand, + unpauseCommand, + } + + if err := app.Run(os.Args); err != nil { + log.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/nsenter.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/nsenter.go new file mode 100644 index 00000000000..8dc149f4fbd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/nsenter.go @@ -0,0 +1,84 @@ +package main + +import ( + "fmt" + "log" + "net" + "os" + "strconv" + "strings" + "text/tabwriter" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/devices" + "github.com/docker/libcontainer/mount/nodes" + "github.com/docker/libcontainer/namespaces" + _ "github.com/docker/libcontainer/namespaces/nsenter" +) + +// nsenterExec exec's a process inside an existing container +func nsenterExec(config *libcontainer.Config, args []string) { + if err := namespaces.FinalizeSetns(config, args); err != nil { + log.Fatalf("failed to nsenter: %s", err) + } +} + +// nsenterMknod runs mknod inside an existing container +// +// mknod +func nsenterMknod(config *libcontainer.Config, args []string) { + if len(args) != 4 { + log.Fatalf("expected mknod to have 4 arguments not %d", len(args)) + } + + t := rune(args[1][0]) + + major, err := strconv.Atoi(args[2]) + if err != nil { + log.Fatal(err) + } + + minor, err := strconv.Atoi(args[3]) + if err != nil { + log.Fatal(err) + } + + n := &devices.Device{ + Path: args[0], + Type: t, + MajorNumber: int64(major), + MinorNumber: int64(minor), + } + + if err := nodes.CreateDeviceNode("/", n); err != nil { + log.Fatal(err) + } +} + +// nsenterIp displays the network interfaces inside a container's net namespace +func nsenterIp(config *libcontainer.Config, args []string) { + interfaces, err := net.Interfaces() + if err != nil { + log.Fatal(err) + } + + w := tabwriter.NewWriter(os.Stdout, 10, 1, 3, ' ', 0) + fmt.Fprint(w, "NAME\tMTU\tMAC\tFLAG\tADDRS\n") + + for _, iface := range interfaces { + addrs, err := iface.Addrs() + if err != nil { + log.Fatal(err) + } + + o := []string{} + + for _, a := range addrs { + o = append(o, a.String()) + } + + fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\n", iface.Name, iface.MTU, iface.HardwareAddr, iface.Flags, strings.Join(o, ",")) + } + + w.Flush() +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/pause.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/pause.go new file mode 100644 index 00000000000..ada24250c15 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/pause.go @@ -0,0 +1,49 @@ +package main + +import ( + "log" + + "github.com/codegangsta/cli" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/cgroups/fs" + "github.com/docker/libcontainer/cgroups/systemd" +) + +var pauseCommand = cli.Command{ + Name: "pause", + Usage: "pause the container's processes", + Action: pauseAction, +} + +var unpauseCommand = cli.Command{ + Name: "unpause", + Usage: "unpause the container's processes", + Action: unpauseAction, +} + +func pauseAction(context *cli.Context) { + if err := toggle(cgroups.Frozen); err != nil { + log.Fatal(err) + } +} + +func unpauseAction(context *cli.Context) { + if err := toggle(cgroups.Thawed); err != nil { + log.Fatal(err) + } +} + +func toggle(state cgroups.FreezerState) error { + container, err := loadConfig() + if err != nil { + return err + } + + if systemd.UseSystemd() { + err = systemd.Freeze(container.Cgroups, state) + } else { + err = fs.Freeze(container.Cgroups, state) + } + + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/stats.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/stats.go new file mode 100644 index 00000000000..612b4a4baeb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/stats.go @@ -0,0 +1,39 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/codegangsta/cli" + "github.com/docker/libcontainer" +) + +var statsCommand = cli.Command{ + Name: "stats", + Usage: "display statistics for the container", + Action: statsAction, +} + +func statsAction(context *cli.Context) { + container, err := loadConfig() + if err != nil { + log.Fatal(err) + } + + state, err := libcontainer.GetState(dataPath) + if err != nil { + log.Fatal(err) + } + + stats, err := libcontainer.GetStats(container, state) + if err != nil { + log.Fatal(err) + } + data, err := json.MarshalIndent(stats, "", "\t") + if err != nil { + log.Fatal(err) + } + + fmt.Printf("%s", data) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/utils.go b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/utils.go new file mode 100644 index 00000000000..6a8aafbf170 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/nsinit/utils.go @@ -0,0 +1,90 @@ +package main + +import ( + "encoding/json" + "log" + "os" + "path/filepath" + + "github.com/codegangsta/cli" + "github.com/docker/libcontainer" +) + +// rFunc is a function registration for calling after an execin +type rFunc struct { + Usage string + Action func(*libcontainer.Config, []string) +} + +func loadConfig() (*libcontainer.Config, error) { + f, err := os.Open(filepath.Join(dataPath, "container.json")) + if err != nil { + return nil, err + } + defer f.Close() + + var container *libcontainer.Config + if err := json.NewDecoder(f).Decode(&container); err != nil { + return nil, err + } + + return container, nil +} + +func openLog(name string) error { + f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755) + if err != nil { + return err + } + + log.SetOutput(f) + + return nil +} + +func findUserArgs() []string { + i := 0 + for _, a := range os.Args { + i++ + + if a == "--" { + break + } + } + + return os.Args[i:] +} + +// loadConfigFromFd loads a container's config from the sync pipe that is provided by +// fd 3 when running a process +func loadConfigFromFd() (*libcontainer.Config, error) { + pipe := os.NewFile(3, "pipe") + defer pipe.Close() + + var config *libcontainer.Config + if err := json.NewDecoder(pipe).Decode(&config); err != nil { + return nil, err + } + return config, nil +} + +func preload(context *cli.Context) error { + if logPath != "" { + if err := openLog(logPath); err != nil { + return err + } + } + + return nil +} + +func runFunc(f *rFunc) { + userArgs := findUserArgs() + + config, err := loadConfigFromFd() + if err != nil { + log.Fatalf("unable to receive config from sync pipe: %s", err) + } + + f.Action(config, userArgs) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/process.go b/Godeps/_workspace/src/github.com/docker/libcontainer/process.go new file mode 100644 index 00000000000..489666a5878 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/process.go @@ -0,0 +1,27 @@ +package libcontainer + +import "io" + +// Configuration for a process to be run inside a container. +type ProcessConfig struct { + // The command to be run followed by any arguments. + Args []string + + // Map of environment variables to their values. + Env []string + + // Stdin is a pointer to a reader which provides the standard input stream. + // Stdout is a pointer to a writer which receives the standard output stream. + // Stderr is a pointer to a writer which receives the standard error stream. + // + // If a reader or writer is nil, the input stream is assumed to be empty and the output is + // discarded. + // + // The readers and writers, if supplied, are closed when the process terminates. Their Close + // methods should be idempotent. + // + // Stdout and Stderr may refer to the same writer in which case the output is interspersed. + Stdin io.ReadCloser + Stdout io.WriteCloser + Stderr io.WriteCloser +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/README.md b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/README.md new file mode 100644 index 00000000000..4ccc6cde944 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/README.md @@ -0,0 +1,5 @@ +These configuration files can be used with `nsinit` to quickly develop, test, +and experiment with features of libcontainer. + +When consuming these configuration files, copy them into your rootfs and rename +the file to `container.json` for use with `nsinit`. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/apparmor.json b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/apparmor.json new file mode 100644 index 00000000000..96f73cb7949 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/apparmor.json @@ -0,0 +1,196 @@ +{ + "capabilities": [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL" + ], + "cgroups": { + "allowed_devices": [ + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 98 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 1, + "path": "/dev/console", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "path": "/dev/tty0", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "minor_number": 1, + "path": "/dev/tty1", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 136, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 2, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 10, + "minor_number": 200, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "name": "docker-koye", + "parent": "docker" + }, + "restrict_sys": true, + "apparmor_profile": "docker-default", + "mount_config": { + "device_nodes": [ + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ] + }, + "environment": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=koye", + "TERM=xterm" + ], + "hostname": "koye", + "namespaces": [ + {"type":"NEWIPC"}, + {"type": "NEWNET"}, + {"type": "NEWNS"}, + {"type": "NEWPID"}, + {"type": "NEWUTS"} + ], + "networks": [ + { + "address": "127.0.0.1/0", + "gateway": "localhost", + "mtu": 1500, + "type": "loopback" + } + ], + "tty": true, + "user": "daemon" +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/attach_to_bridge.json b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/attach_to_bridge.json new file mode 100644 index 00000000000..e5c03a7ef42 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/attach_to_bridge.json @@ -0,0 +1,202 @@ +{ + "capabilities": [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL" + ], + "cgroups": { + "allowed_devices": [ + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 98 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 1, + "path": "/dev/console", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "path": "/dev/tty0", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "minor_number": 1, + "path": "/dev/tty1", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 136, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 2, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 10, + "minor_number": 200, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "name": "docker-koye", + "parent": "docker" + }, + "restrict_sys": true, + "mount_config": { + "device_nodes": [ + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ] + }, + "environment": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=koye", + "TERM=xterm" + ], + "hostname": "koye", + "namespaces": [ + {"type": "NEWIPC"}, + {"type": "NEWNET"}, + {"type": "NEWNS"}, + {"type": "NEWPID"}, + {"type": "NEWUTS"} + ], + "networks": [ + { + "address": "127.0.0.1/0", + "gateway": "localhost", + "mtu": 1500, + "type": "loopback" + }, + { + "address": "172.17.0.101/16", + "bridge": "docker0", + "veth_prefix": "veth", + "gateway": "172.17.42.1", + "mtu": 1500, + "type": "veth" + } + ], + "tty": true +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/minimal.json b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/minimal.json new file mode 100644 index 00000000000..01de467468f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/minimal.json @@ -0,0 +1,201 @@ +{ + "capabilities": [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL" + ], + "cgroups": { + "allowed_devices": [ + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 98 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 1, + "path": "/dev/console", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "path": "/dev/tty0", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "minor_number": 1, + "path": "/dev/tty1", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 136, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 2, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 10, + "minor_number": 200, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "name": "docker-koye", + "parent": "docker" + }, + "restrict_sys": true, + "mount_config": { + "device_nodes": [ + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "mounts": [ + { + "type": "tmpfs", + "destination": "/tmp" + } + ] + }, + "environment": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=koye", + "TERM=xterm" + ], + "hostname": "koye", + "namespaces": [ + {"type": "NEWIPC"}, + {"type": "NEWNET"}, + {"type": "NEWNS"}, + {"type": "NEWPID"}, + {"type": "NEWUTS"} + ], + "networks": [ + { + "address": "127.0.0.1/0", + "gateway": "localhost", + "mtu": 1500, + "type": "loopback" + } + ], + "tty": true, + "user": "daemon" +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/route_source_address_selection.json b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/route_source_address_selection.json new file mode 100644 index 00000000000..9c62045a4bb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/route_source_address_selection.json @@ -0,0 +1,209 @@ +{ + "capabilities": [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL" + ], + "cgroups": { + "allowed_devices": [ + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 98 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 1, + "path": "/dev/console", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "path": "/dev/tty0", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "minor_number": 1, + "path": "/dev/tty1", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 136, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 2, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 10, + "minor_number": 200, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "name": "docker-koye", + "parent": "docker" + }, + "restrict_sys": true, + "mount_config": { + "device_nodes": [ + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ] + }, + "environment": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=koye", + "TERM=xterm" + ], + "hostname": "koye", + "namespaces": [ + {"type": "NEWIPC"}, + {"type": "NEWNET"}, + {"type": "NEWNS"}, + {"type": "NEWPID"}, + {"type": "NEWUTS"} + ], + "networks": [ + { + "address": "127.0.0.1/0", + "gateway": "localhost", + "mtu": 1500, + "type": "loopback" + }, + { + "address": "172.17.0.101/16", + "bridge": "docker0", + "veth_prefix": "veth", + "mtu": 1500, + "type": "veth" + } + ], + "routes": [ + { + "destination": "0.0.0.0/0", + "source": "172.17.0.101", + "gateway": "172.17.42.1", + "interface_name": "eth0" + } + ], + "tty": true +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/selinux.json b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/selinux.json new file mode 100644 index 00000000000..15556488a27 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/sample_configs/selinux.json @@ -0,0 +1,197 @@ +{ + "capabilities": [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL" + ], + "cgroups": { + "allowed_devices": [ + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 98 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 1, + "path": "/dev/console", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "path": "/dev/tty0", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "minor_number": 1, + "path": "/dev/tty1", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 136, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 2, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 10, + "minor_number": 200, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "name": "docker-koye", + "parent": "docker" + }, + "restrict_sys": true, + "process_label": "system_u:system_r:svirt_lxc_net_t:s0:c164,c475", + "mount_config": { + "mount_label": "system_u:system_r:svirt_lxc_net_t:s0:c164,c475", + "device_nodes": [ + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ] + }, + "environment": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=koye", + "TERM=xterm" + ], + "hostname": "koye", + "namespaces": [ + {"type": "NEWIPC"}, + {"type": "NEWNET"}, + {"type": "NEWNS"}, + {"type": "NEWPID"}, + {"type": "NEWUTS"} + ], + "networks": [ + { + "address": "127.0.0.1/0", + "gateway": "localhost", + "mtu": 1500, + "type": "loopback" + } + ], + "tty": true, + "user": "daemon" +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/capabilities.go b/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/capabilities.go new file mode 100644 index 00000000000..7aef5fa67f3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/capabilities.go @@ -0,0 +1,56 @@ +package capabilities + +import ( + "os" + + "github.com/syndtr/gocapability/capability" +) + +const allCapabilityTypes = capability.CAPS | capability.BOUNDS + +// DropBoundingSet drops the capability bounding set to those specified in the +// container configuration. +func DropBoundingSet(capabilities []string) error { + c, err := capability.NewPid(os.Getpid()) + if err != nil { + return err + } + + keep := getEnabledCapabilities(capabilities) + c.Clear(capability.BOUNDS) + c.Set(capability.BOUNDS, keep...) + + if err := c.Apply(capability.BOUNDS); err != nil { + return err + } + + return nil +} + +// DropCapabilities drops all capabilities for the current process except those specified in the container configuration. +func DropCapabilities(capList []string) error { + c, err := capability.NewPid(os.Getpid()) + if err != nil { + return err + } + + keep := getEnabledCapabilities(capList) + c.Clear(allCapabilityTypes) + c.Set(allCapabilityTypes, keep...) + + if err := c.Apply(allCapabilityTypes); err != nil { + return err + } + return nil +} + +// getEnabledCapabilities returns the capabilities that should not be dropped by the container. +func getEnabledCapabilities(capList []string) []capability.Cap { + keep := []capability.Cap{} + for _, capability := range capList { + if c := GetCapability(capability); c != nil { + keep = append(keep, c.Value) + } + } + return keep +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/types.go b/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/types.go new file mode 100644 index 00000000000..a960b804c68 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/types.go @@ -0,0 +1,88 @@ +package capabilities + +import "github.com/syndtr/gocapability/capability" + +type ( + CapabilityMapping struct { + Key string `json:"key,omitempty"` + Value capability.Cap `json:"value,omitempty"` + } + Capabilities []*CapabilityMapping +) + +func (c *CapabilityMapping) String() string { + return c.Key +} + +func GetCapability(key string) *CapabilityMapping { + for _, capp := range capabilityList { + if capp.Key == key { + cpy := *capp + return &cpy + } + } + return nil +} + +func GetAllCapabilities() []string { + output := make([]string, len(capabilityList)) + for i, capability := range capabilityList { + output[i] = capability.String() + } + return output +} + +// Contains returns true if the specified Capability is +// in the slice +func (c Capabilities) contains(capp string) bool { + return c.get(capp) != nil +} + +func (c Capabilities) get(capp string) *CapabilityMapping { + for _, cap := range c { + if cap.Key == capp { + return cap + } + } + return nil +} + +var capabilityList = Capabilities{ + {Key: "SETPCAP", Value: capability.CAP_SETPCAP}, + {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE}, + {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO}, + {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT}, + {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN}, + {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE}, + {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE}, + {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME}, + {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG}, + {Key: "MKNOD", Value: capability.CAP_MKNOD}, + {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE}, + {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL}, + {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE}, + {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN}, + {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN}, + {Key: "SYSLOG", Value: capability.CAP_SYSLOG}, + {Key: "CHOWN", Value: capability.CAP_CHOWN}, + {Key: "NET_RAW", Value: capability.CAP_NET_RAW}, + {Key: "DAC_OVERRIDE", Value: capability.CAP_DAC_OVERRIDE}, + {Key: "FOWNER", Value: capability.CAP_FOWNER}, + {Key: "DAC_READ_SEARCH", Value: capability.CAP_DAC_READ_SEARCH}, + {Key: "FSETID", Value: capability.CAP_FSETID}, + {Key: "KILL", Value: capability.CAP_KILL}, + {Key: "SETGID", Value: capability.CAP_SETGID}, + {Key: "SETUID", Value: capability.CAP_SETUID}, + {Key: "LINUX_IMMUTABLE", Value: capability.CAP_LINUX_IMMUTABLE}, + {Key: "NET_BIND_SERVICE", Value: capability.CAP_NET_BIND_SERVICE}, + {Key: "NET_BROADCAST", Value: capability.CAP_NET_BROADCAST}, + {Key: "IPC_LOCK", Value: capability.CAP_IPC_LOCK}, + {Key: "IPC_OWNER", Value: capability.CAP_IPC_OWNER}, + {Key: "SYS_CHROOT", Value: capability.CAP_SYS_CHROOT}, + {Key: "SYS_PTRACE", Value: capability.CAP_SYS_PTRACE}, + {Key: "SYS_BOOT", Value: capability.CAP_SYS_BOOT}, + {Key: "LEASE", Value: capability.CAP_LEASE}, + {Key: "SETFCAP", Value: capability.CAP_SETFCAP}, + {Key: "WAKE_ALARM", Value: capability.CAP_WAKE_ALARM}, + {Key: "BLOCK_SUSPEND", Value: capability.CAP_BLOCK_SUSPEND}, +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/types_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/types_test.go new file mode 100644 index 00000000000..06e8a2b01c8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/security/capabilities/types_test.go @@ -0,0 +1,19 @@ +package capabilities + +import ( + "testing" +) + +func TestCapabilitiesContains(t *testing.T) { + caps := Capabilities{ + GetCapability("MKNOD"), + GetCapability("SETPCAP"), + } + + if caps.contains("SYS_ADMIN") { + t.Fatal("capabilities should not contain SYS_ADMIN") + } + if !caps.contains("MKNOD") { + t.Fatal("capabilities should contain MKNOD but does not") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/security/restrict/restrict.go b/Godeps/_workspace/src/github.com/docker/libcontainer/security/restrict/restrict.go new file mode 100644 index 00000000000..dd765b1f1b8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/security/restrict/restrict.go @@ -0,0 +1,53 @@ +// +build linux + +package restrict + +import ( + "fmt" + "os" + "syscall" + "time" +) + +const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV + +func mountReadonly(path string) error { + for i := 0; i < 5; i++ { + if err := syscall.Mount("", path, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil && !os.IsNotExist(err) { + switch err { + case syscall.EINVAL: + // Probably not a mountpoint, use bind-mount + if err := syscall.Mount(path, path, "", syscall.MS_BIND, ""); err != nil { + return err + } + + return syscall.Mount(path, path, "", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC|defaultMountFlags, "") + case syscall.EBUSY: + time.Sleep(100 * time.Millisecond) + continue + default: + return err + } + } + + return nil + } + + return fmt.Errorf("unable to mount %s as readonly max retries reached", path) +} + +// This has to be called while the container still has CAP_SYS_ADMIN (to be able to perform mounts). +// However, afterwards, CAP_SYS_ADMIN should be dropped (otherwise the user will be able to revert those changes). +func Restrict(mounts ...string) error { + for _, dest := range mounts { + if err := mountReadonly(dest); err != nil { + return fmt.Errorf("unable to remount %s readonly: %s", dest, err) + } + } + + if err := syscall.Mount("/dev/null", "/proc/kcore", "", syscall.MS_BIND, ""); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to bind-mount /dev/null over /proc/kcore: %s", err) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/security/restrict/unsupported.go b/Godeps/_workspace/src/github.com/docker/libcontainer/security/restrict/unsupported.go new file mode 100644 index 00000000000..464e8d498d7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/security/restrict/unsupported.go @@ -0,0 +1,9 @@ +// +build !linux + +package restrict + +import "fmt" + +func Restrict() error { + return fmt.Errorf("not supported") +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/selinux/selinux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/selinux/selinux.go new file mode 100644 index 00000000000..e5bd8209809 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/selinux/selinux.go @@ -0,0 +1,461 @@ +// +build linux + +package selinux + +import ( + "bufio" + "crypto/rand" + "encoding/binary" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "syscall" + + "github.com/docker/docker/pkg/mount" + "github.com/docker/libcontainer/system" +) + +const ( + Enforcing = 1 + Permissive = 0 + Disabled = -1 + selinuxDir = "/etc/selinux/" + selinuxConfig = selinuxDir + "config" + selinuxTypeTag = "SELINUXTYPE" + selinuxTag = "SELINUX" + selinuxPath = "/sys/fs/selinux" + xattrNameSelinux = "security.selinux" + stRdOnly = 0x01 +) + +var ( + assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) + spaceRegex = regexp.MustCompile(`^([^=]+) (.*)$`) + mcsList = make(map[string]bool) + selinuxfs = "unknown" + selinuxEnabled = false + selinuxEnabledChecked = false +) + +type SELinuxContext map[string]string + +// SetDisabled disables selinux support for the package +func SetDisabled() { + selinuxEnabled, selinuxEnabledChecked = false, true +} + +func getSelinuxMountPoint() string { + if selinuxfs != "unknown" { + return selinuxfs + } + selinuxfs = "" + + mounts, err := mount.GetMounts() + if err != nil { + return selinuxfs + } + for _, mount := range mounts { + if mount.Fstype == "selinuxfs" { + selinuxfs = mount.Mountpoint + break + } + } + if selinuxfs != "" { + var buf syscall.Statfs_t + syscall.Statfs(selinuxfs, &buf) + if (buf.Flags & stRdOnly) == 1 { + selinuxfs = "" + } + } + return selinuxfs +} + +func SelinuxEnabled() bool { + if selinuxEnabledChecked { + return selinuxEnabled + } + selinuxEnabledChecked = true + if fs := getSelinuxMountPoint(); fs != "" { + if con, _ := Getcon(); con != "kernel" { + selinuxEnabled = true + } + } + return selinuxEnabled +} + +func readConfig(target string) (value string) { + var ( + val, key string + bufin *bufio.Reader + ) + + in, err := os.Open(selinuxConfig) + if err != nil { + return "" + } + defer in.Close() + + bufin = bufio.NewReader(in) + + for done := false; !done; { + var line string + if line, err = bufin.ReadString('\n'); err != nil { + if err != io.EOF { + return "" + } + done = true + } + line = strings.TrimSpace(line) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + if groups := assignRegex.FindStringSubmatch(line); groups != nil { + key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) + if key == target { + return strings.Trim(val, "\"") + } + } + } + return "" +} + +func getSELinuxPolicyRoot() string { + return selinuxDir + readConfig(selinuxTypeTag) +} + +func readCon(name string) (string, error) { + var val string + + in, err := os.Open(name) + if err != nil { + return "", err + } + defer in.Close() + + _, err = fmt.Fscanf(in, "%s", &val) + return val, err +} + +func Setfilecon(path string, scon string) error { + return system.Lsetxattr(path, xattrNameSelinux, []byte(scon), 0) +} + +// Return the SELinux label for this path +func Getfilecon(path string) (string, error) { + con, err := system.Lgetxattr(path, xattrNameSelinux) + return string(con), err +} + +func Setfscreatecon(scon string) error { + return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()), scon) +} + +func Getfscreatecon() (string, error) { + return readCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid())) +} + +// Return the SELinux label of the current process thread. +func Getcon() (string, error) { + return readCon(fmt.Sprintf("/proc/self/task/%d/attr/current", syscall.Gettid())) +} + +func Getpidcon(pid int) (string, error) { + return readCon(fmt.Sprintf("/proc/%d/attr/current", pid)) +} + +func Getexeccon() (string, error) { + return readCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid())) +} + +func writeCon(name string, val string) error { + out, err := os.OpenFile(name, os.O_WRONLY, 0) + if err != nil { + return err + } + defer out.Close() + + if val != "" { + _, err = out.Write([]byte(val)) + } else { + _, err = out.Write(nil) + } + return err +} + +func Setexeccon(scon string) error { + return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), scon) +} + +func (c SELinuxContext) Get() string { + return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"]) +} + +func NewContext(scon string) SELinuxContext { + c := make(SELinuxContext) + + if len(scon) != 0 { + con := strings.SplitN(scon, ":", 4) + c["user"] = con[0] + c["role"] = con[1] + c["type"] = con[2] + c["level"] = con[3] + } + return c +} + +func ReserveLabel(scon string) { + if len(scon) != 0 { + con := strings.SplitN(scon, ":", 4) + mcsAdd(con[3]) + } +} + +func SelinuxGetEnforce() int { + var enforce int + + enforceS, err := readCon(fmt.Sprintf("%s/enforce", selinuxPath)) + if err != nil { + return -1 + } + + enforce, err = strconv.Atoi(string(enforceS)) + if err != nil { + return -1 + } + return enforce +} + +func SelinuxGetEnforceMode() int { + switch readConfig(selinuxTag) { + case "enforcing": + return Enforcing + case "permissive": + return Permissive + } + return Disabled +} + +func mcsAdd(mcs string) error { + if mcsList[mcs] { + return fmt.Errorf("MCS Label already exists") + } + mcsList[mcs] = true + return nil +} + +func mcsDelete(mcs string) { + mcsList[mcs] = false +} + +func mcsExists(mcs string) bool { + return mcsList[mcs] +} + +func IntToMcs(id int, catRange uint32) string { + var ( + SETSIZE = int(catRange) + TIER = SETSIZE + ORD = id + ) + + if id < 1 || id > 523776 { + return "" + } + + for ORD > TIER { + ORD = ORD - TIER + TIER -= 1 + } + TIER = SETSIZE - TIER + ORD = ORD + TIER + return fmt.Sprintf("s0:c%d,c%d", TIER, ORD) +} + +func uniqMcs(catRange uint32) string { + var ( + n uint32 + c1, c2 uint32 + mcs string + ) + + for { + binary.Read(rand.Reader, binary.LittleEndian, &n) + c1 = n % catRange + binary.Read(rand.Reader, binary.LittleEndian, &n) + c2 = n % catRange + if c1 == c2 { + continue + } else { + if c1 > c2 { + t := c1 + c1 = c2 + c2 = t + } + } + mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2) + if err := mcsAdd(mcs); err != nil { + continue + } + break + } + return mcs +} + +func FreeLxcContexts(scon string) { + if len(scon) != 0 { + con := strings.SplitN(scon, ":", 4) + mcsDelete(con[3]) + } +} + +func GetLxcContexts() (processLabel string, fileLabel string) { + var ( + val, key string + bufin *bufio.Reader + ) + + if !SelinuxEnabled() { + return "", "" + } + lxcPath := fmt.Sprintf("%s/contexts/lxc_contexts", getSELinuxPolicyRoot()) + in, err := os.Open(lxcPath) + if err != nil { + return "", "" + } + defer in.Close() + + bufin = bufio.NewReader(in) + + for done := false; !done; { + var line string + if line, err = bufin.ReadString('\n'); err != nil { + if err == io.EOF { + done = true + } else { + goto exit + } + } + line = strings.TrimSpace(line) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + if groups := assignRegex.FindStringSubmatch(line); groups != nil { + key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) + if key == "process" { + processLabel = strings.Trim(val, "\"") + } + if key == "file" { + fileLabel = strings.Trim(val, "\"") + } + } + } + + if processLabel == "" || fileLabel == "" { + return "", "" + } + +exit: + // mcs := IntToMcs(os.Getpid(), 1024) + mcs := uniqMcs(1024) + scon := NewContext(processLabel) + scon["level"] = mcs + processLabel = scon.Get() + scon = NewContext(fileLabel) + scon["level"] = mcs + fileLabel = scon.Get() + return processLabel, fileLabel +} + +func SecurityCheckContext(val string) error { + return writeCon(fmt.Sprintf("%s.context", selinuxPath), val) +} + +func CopyLevel(src, dest string) (string, error) { + if src == "" { + return "", nil + } + if err := SecurityCheckContext(src); err != nil { + return "", err + } + if err := SecurityCheckContext(dest); err != nil { + return "", err + } + scon := NewContext(src) + tcon := NewContext(dest) + mcsDelete(tcon["level"]) + mcsAdd(scon["level"]) + tcon["level"] = scon["level"] + return tcon.Get(), nil +} + +// Prevent users from relabing system files +func badPrefix(fpath string) error { + var badprefixes = []string{"/usr"} + + for _, prefix := range badprefixes { + if fpath == prefix || strings.HasPrefix(fpath, fmt.Sprintf("%s/", prefix)) { + return fmt.Errorf("Relabeling content in %s is not allowed.", prefix) + } + } + return nil +} + +// Change the fpath file object to the SELinux label scon. +// If the fpath is a directory and recurse is true Chcon will walk the +// directory tree setting the label +func Chcon(fpath string, scon string, recurse bool) error { + if scon == "" { + return nil + } + if err := badPrefix(fpath); err != nil { + return err + } + callback := func(p string, info os.FileInfo, err error) error { + return Setfilecon(p, scon) + } + + if recurse { + return filepath.Walk(fpath, callback) + } + + return Setfilecon(fpath, scon) +} + +// DupSecOpt takes an SELinux process label and returns security options that +// can will set the SELinux Type and Level for future container processes +func DupSecOpt(src string) []string { + if src == "" { + return nil + } + con := NewContext(src) + if con["user"] == "" || + con["role"] == "" || + con["type"] == "" || + con["level"] == "" { + return nil + } + return []string{"label:user:" + con["user"], + "label:role:" + con["role"], + "label:type:" + con["type"], + "label:level:" + con["level"]} +} + +// DisableSecOpt returns a security opt that can be used to disabling SELinux +// labeling support for future container processes +func DisableSecOpt() []string { + return []string{"label:disable"} +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/selinux/selinux_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/selinux/selinux_test.go new file mode 100644 index 00000000000..228ad8361c8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/selinux/selinux_test.go @@ -0,0 +1,64 @@ +// +build linux + +package selinux_test + +import ( + "os" + "testing" + + "github.com/docker/libcontainer/selinux" +) + +func testSetfilecon(t *testing.T) { + if selinux.SelinuxEnabled() { + tmp := "selinux_test" + out, _ := os.OpenFile(tmp, os.O_WRONLY, 0) + out.Close() + err := selinux.Setfilecon(tmp, "system_u:object_r:bin_t:s0") + if err != nil { + t.Log("Setfilecon failed") + t.Fatal(err) + } + os.Remove(tmp) + } +} + +func TestSELinux(t *testing.T) { + var ( + err error + plabel, flabel string + ) + + if selinux.SelinuxEnabled() { + t.Log("Enabled") + plabel, flabel = selinux.GetLxcContexts() + t.Log(plabel) + t.Log(flabel) + selinux.FreeLxcContexts(plabel) + plabel, flabel = selinux.GetLxcContexts() + t.Log(plabel) + t.Log(flabel) + selinux.FreeLxcContexts(plabel) + t.Log("getenforce ", selinux.SelinuxGetEnforce()) + t.Log("getenforcemode ", selinux.SelinuxGetEnforceMode()) + pid := os.Getpid() + t.Logf("PID:%d MCS:%s\n", pid, selinux.IntToMcs(pid, 1023)) + err = selinux.Setfscreatecon("unconfined_u:unconfined_r:unconfined_t:s0") + if err == nil { + t.Log(selinux.Getfscreatecon()) + } else { + t.Log("setfscreatecon failed", err) + t.Fatal(err) + } + err = selinux.Setfscreatecon("") + if err == nil { + t.Log(selinux.Getfscreatecon()) + } else { + t.Log("setfscreatecon failed", err) + t.Fatal(err) + } + t.Log(selinux.Getpidcon(1)) + } else { + t.Log("Disabled") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/state.go b/Godeps/_workspace/src/github.com/docker/libcontainer/state.go new file mode 100644 index 00000000000..208b4c62762 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/state.go @@ -0,0 +1,77 @@ +package libcontainer + +import ( + "encoding/json" + "os" + "path/filepath" + + "github.com/docker/libcontainer/network" +) + +// State represents a running container's state +type State struct { + // InitPid is the init process id in the parent namespace + InitPid int `json:"init_pid,omitempty"` + + // InitStartTime is the init process start time + InitStartTime string `json:"init_start_time,omitempty"` + + // Network runtime state. + NetworkState network.NetworkState `json:"network_state,omitempty"` + + // Path to all the cgroups setup for a container. Key is cgroup subsystem name. + CgroupPaths map[string]string `json:"cgroup_paths,omitempty"` +} + +// The running state of the container. +type RunState int + +const ( + // The name of the runtime state file + stateFile = "state.json" + + // The container exists and is running. + Running RunState = iota + + // The container exists, it is in the process of being paused. + Pausing + + // The container exists, but all its processes are paused. + Paused + + // The container does not exist. + Destroyed +) + +// SaveState writes the container's runtime state to a state.json file +// in the specified path +func SaveState(basePath string, state *State) error { + f, err := os.Create(filepath.Join(basePath, stateFile)) + if err != nil { + return err + } + defer f.Close() + + return json.NewEncoder(f).Encode(state) +} + +// GetState reads the state.json file for a running container +func GetState(basePath string) (*State, error) { + f, err := os.Open(filepath.Join(basePath, stateFile)) + if err != nil { + return nil, err + } + defer f.Close() + + var state *State + if err := json.NewDecoder(f).Decode(&state); err != nil { + return nil, err + } + + return state, nil +} + +// DeleteState deletes the state.json file +func DeleteState(basePath string) error { + return os.Remove(filepath.Join(basePath, stateFile)) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/system/linux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/system/linux.go new file mode 100644 index 00000000000..c07ef1532dd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/system/linux.go @@ -0,0 +1,60 @@ +// +build linux + +package system + +import ( + "os/exec" + "syscall" + "unsafe" +) + +func Execv(cmd string, args []string, env []string) error { + name, err := exec.LookPath(cmd) + if err != nil { + return err + } + + return syscall.Exec(name, args, env) +} + +func ParentDeathSignal(sig uintptr) error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 { + return err + } + return nil +} + +func GetParentDeathSignal() (int, error) { + var sig int + + _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0) + + if err != 0 { + return -1, err + } + + return sig, nil +} + +func SetKeepCaps() error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 1, 0); err != 0 { + return err + } + + return nil +} + +func ClearKeepCaps() error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 0, 0); err != 0 { + return err + } + + return nil +} + +func Setctty() error { + if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/system/proc.go b/Godeps/_workspace/src/github.com/docker/libcontainer/system/proc.go new file mode 100644 index 00000000000..37808a29f6a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/system/proc.go @@ -0,0 +1,27 @@ +package system + +import ( + "io/ioutil" + "path/filepath" + "strconv" + "strings" +) + +// look in /proc to find the process start time so that we can verify +// that this pid has started after ourself +func GetProcessStartTime(pid int) (string, error) { + data, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) + if err != nil { + return "", err + } + + parts := strings.Split(string(data), " ") + // the starttime is located at pos 22 + // from the man page + // + // starttime %llu (was %lu before Linux 2.6) + // (22) The time the process started after system boot. In kernels before Linux 2.6, this + // value was expressed in jiffies. Since Linux 2.6, the value is expressed in clock ticks + // (divide by sysconf(_SC_CLK_TCK)). + return parts[22-1], nil // starts at 1 +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/system/setns_linux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/system/setns_linux.go new file mode 100644 index 00000000000..228e6ccd7f4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/system/setns_linux.go @@ -0,0 +1,34 @@ +package system + +import ( + "fmt" + "runtime" + "syscall" +) + +// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092 +// +// We need different setns values for the different platforms and arch +// We are declaring the macro here because the SETNS syscall does not exist in th stdlib +var setNsMap = map[string]uintptr{ + "linux/386": 346, + "linux/amd64": 308, + "linux/arm": 374, + "linux/ppc64": 350, + "linux/ppc64le": 350, + "linux/s390x": 339, +} + +func Setns(fd uintptr, flags uintptr) error { + ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] + if !exists { + return fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) + } + + _, _, err := syscall.RawSyscall(ns, fd, flags, 0) + if err != 0 { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/system/syscall_linux_386.go b/Godeps/_workspace/src/github.com/docker/libcontainer/system/syscall_linux_386.go new file mode 100644 index 00000000000..2fcbf21309e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/system/syscall_linux_386.go @@ -0,0 +1,24 @@ +// +build linux,386 +package system + +import ( + "syscall" +) + +// Setuid sets the uid of the calling thread to the specified uid. +func Setuid(uid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// Setgid sets the gid of the calling thread to the specified gid. +func Setgid(gid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/system/syscall_linux_64.go b/Godeps/_workspace/src/github.com/docker/libcontainer/system/syscall_linux_64.go new file mode 100644 index 00000000000..6840c3770f4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/system/syscall_linux_64.go @@ -0,0 +1,25 @@ +// +build linux,amd64 linux,ppc64 linux,ppc64le linux,s390x + +package system + +import ( + "syscall" +) + +// Setuid sets the uid of the calling thread to the specified uid. +func Setuid(uid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// Setgid sets the gid of the calling thread to the specified gid. +func Setgid(gid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/system/syscall_linux_arm.go b/Godeps/_workspace/src/github.com/docker/libcontainer/system/syscall_linux_arm.go new file mode 100644 index 00000000000..7d8cda9d009 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/system/syscall_linux_arm.go @@ -0,0 +1,24 @@ +// +build linux,arm +package system + +import ( + "syscall" +) + +// Setuid sets the uid of the calling thread to the specified uid. +func Setuid(uid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID32, uintptr(uid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// Setgid sets the gid of the calling thread to the specified gid. +func Setgid(gid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/system/sysconfig.go b/Godeps/_workspace/src/github.com/docker/libcontainer/system/sysconfig.go new file mode 100644 index 00000000000..5efddefa79f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/system/sysconfig.go @@ -0,0 +1,12 @@ +// +build cgo + +package system + +/* +#include +*/ +import "C" + +func GetClockTicks() int { + return int(C.sysconf(C._SC_CLK_TCK)) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/system/sysconfig_notcgo.go b/Godeps/_workspace/src/github.com/docker/libcontainer/system/sysconfig_notcgo.go new file mode 100644 index 00000000000..663db82bce8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/system/sysconfig_notcgo.go @@ -0,0 +1,8 @@ +// +build !cgo + +package system + +func GetClockTicks() int { + // TODO figure out a better alternative for platforms where we're missing cgo + return 100 +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/system/xattrs_linux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/system/xattrs_linux.go new file mode 100644 index 00000000000..30f74dfb1b4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/system/xattrs_linux.go @@ -0,0 +1,99 @@ +package system + +import ( + "syscall" + "unsafe" +) + +var _zero uintptr + +// Returns the size of xattrs and nil error +// Requires path, takes allocated []byte or nil as last argument +func Llistxattr(path string, dest []byte) (size int, err error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return -1, err + } + var newpathBytes unsafe.Pointer + if len(dest) > 0 { + newpathBytes = unsafe.Pointer(&dest[0]) + } else { + newpathBytes = unsafe.Pointer(&_zero) + } + + _size, _, errno := syscall.Syscall6(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(newpathBytes), uintptr(len(dest)), 0, 0, 0) + size = int(_size) + if errno != 0 { + return -1, errno + } + + return size, nil +} + +// Returns a []byte slice if the xattr is set and nil otherwise +// Requires path and its attribute as arguments +func Lgetxattr(path string, attr string) ([]byte, error) { + var sz int + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + // Start with a 128 length byte array + sz = 128 + dest := make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + _sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + + switch { + case errno == syscall.ENODATA: + return nil, errno + case errno == syscall.ENOTSUP: + return nil, errno + case errno == syscall.ERANGE: + // 128 byte array might just not be good enough, + // A dummy buffer is used ``uintptr(0)`` to get real size + // of the xattrs on disk + _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(unsafe.Pointer(nil)), uintptr(0), 0, 0) + sz = int(_sz) + if sz < 0 { + return nil, errno + } + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno != 0 { + return nil, errno + } + case errno != 0: + return nil, errno + } + sz = int(_sz) + return dest[:sz], nil +} + +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/types.go b/Godeps/_workspace/src/github.com/docker/libcontainer/types.go new file mode 100644 index 00000000000..c341137ec8d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/types.go @@ -0,0 +1,11 @@ +package libcontainer + +import ( + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/network" +) + +type ContainerStats struct { + NetworkStats *network.NetworkStats `json:"network_stats,omitempty"` + CgroupStats *cgroups.Stats `json:"cgroup_stats,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/update-vendor.sh b/Godeps/_workspace/src/github.com/docker/libcontainer/update-vendor.sh new file mode 100644 index 00000000000..df66a0a8d56 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/update-vendor.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$BASH_SOURCE")" + +# Downloads dependencies into vendor/ directory +mkdir -p vendor +cd vendor + +clone() { + vcs=$1 + pkg=$2 + rev=$3 + + pkg_url=https://$pkg + target_dir=src/$pkg + + echo -n "$pkg @ $rev: " + + if [ -d $target_dir ]; then + echo -n 'rm old, ' + rm -fr $target_dir + fi + + echo -n 'clone, ' + case $vcs in + git) + git clone --quiet --no-checkout $pkg_url $target_dir + ( cd $target_dir && git reset --quiet --hard $rev ) + ;; + hg) + hg clone --quiet --updaterev $rev $pkg_url $target_dir + ;; + esac + + echo -n 'rm VCS, ' + ( cd $target_dir && rm -rf .{git,hg} ) + + echo done +} + +# the following lines are in sorted order, FYI +clone git github.com/codegangsta/cli 1.1.0 +clone git github.com/coreos/go-systemd v2 +clone git github.com/godbus/dbus v1 +clone git github.com/syndtr/gocapability 3c85049eae + +# intentionally not vendoring Docker itself... that'd be a circle :) diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/user/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/libcontainer/user/MAINTAINERS new file mode 100644 index 00000000000..edbe2006694 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/user/MAINTAINERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Aleksa Sarai (@cyphar) diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/user/lookup.go b/Godeps/_workspace/src/github.com/docker/libcontainer/user/lookup.go new file mode 100644 index 00000000000..6f8a982ff72 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/user/lookup.go @@ -0,0 +1,108 @@ +package user + +import ( + "errors" + "fmt" + "syscall" +) + +var ( + // The current operating system does not provide the required data for user lookups. + ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") +) + +func lookupUser(filter func(u User) bool) (User, error) { + // Get operating system-specific passwd reader-closer. + passwd, err := GetPasswd() + if err != nil { + return User{}, err + } + defer passwd.Close() + + // Get the users. + users, err := ParsePasswdFilter(passwd, filter) + if err != nil { + return User{}, err + } + + // No user entries found. + if len(users) == 0 { + return User{}, fmt.Errorf("no matching entries in passwd file") + } + + // Assume the first entry is the "correct" one. + return users[0], nil +} + +// CurrentUser looks up the current user by their user id in /etc/passwd. If the +// user cannot be found (or there is no /etc/passwd file on the filesystem), +// then CurrentUser returns an error. +func CurrentUser() (User, error) { + return LookupUid(syscall.Getuid()) +} + +// LookupUser looks up a user by their username in /etc/passwd. If the user +// cannot be found (or there is no /etc/passwd file on the filesystem), then +// LookupUser returns an error. +func LookupUser(username string) (User, error) { + return lookupUser(func(u User) bool { + return u.Name == username + }) +} + +// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot +// be found (or there is no /etc/passwd file on the filesystem), then LookupId +// returns an error. +func LookupUid(uid int) (User, error) { + return lookupUser(func(u User) bool { + return u.Uid == uid + }) +} + +func lookupGroup(filter func(g Group) bool) (Group, error) { + // Get operating system-specific group reader-closer. + group, err := GetGroup() + if err != nil { + return Group{}, err + } + defer group.Close() + + // Get the users. + groups, err := ParseGroupFilter(group, filter) + if err != nil { + return Group{}, err + } + + // No user entries found. + if len(groups) == 0 { + return Group{}, fmt.Errorf("no matching entries in group file") + } + + // Assume the first entry is the "correct" one. + return groups[0], nil +} + +// CurrentGroup looks up the current user's group by their primary group id's +// entry in /etc/passwd. If the group cannot be found (or there is no +// /etc/group file on the filesystem), then CurrentGroup returns an error. +func CurrentGroup() (Group, error) { + return LookupGid(syscall.Getgid()) +} + +// LookupGroup looks up a group by its name in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGroup +// returns an error. +func LookupGroup(groupname string) (Group, error) { + return lookupGroup(func(g Group) bool { + return g.Name == groupname + }) +} + +// LookupGid looks up a group by its group id in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGid +// returns an error. +func LookupGid(gid int) (Group, error) { + return lookupGroup(func(g Group) bool { + return g.Gid == gid + }) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/user/lookup_unix.go b/Godeps/_workspace/src/github.com/docker/libcontainer/user/lookup_unix.go new file mode 100644 index 00000000000..758b734c225 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/user/lookup_unix.go @@ -0,0 +1,30 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package user + +import ( + "io" + "os" +) + +// Unix-specific path to the passwd and group formatted files. +const ( + unixPasswdPath = "/etc/passwd" + unixGroupPath = "/etc/group" +) + +func GetPasswdPath() (string, error) { + return unixPasswdPath, nil +} + +func GetPasswd() (io.ReadCloser, error) { + return os.Open(unixPasswdPath) +} + +func GetGroupPath() (string, error) { + return unixGroupPath, nil +} + +func GetGroup() (io.ReadCloser, error) { + return os.Open(unixGroupPath) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/user/lookup_unsupported.go b/Godeps/_workspace/src/github.com/docker/libcontainer/user/lookup_unsupported.go new file mode 100644 index 00000000000..7217948870c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/user/lookup_unsupported.go @@ -0,0 +1,21 @@ +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package user + +import "io" + +func GetPasswdPath() (string, error) { + return "", ErrUnsupported +} + +func GetPasswd() (io.ReadCloser, error) { + return nil, ErrUnsupported +} + +func GetGroupPath() (string, error) { + return "", ErrUnsupported +} + +func GetGroup() (io.ReadCloser, error) { + return nil, ErrUnsupported +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/user/user.go b/Godeps/_workspace/src/github.com/docker/libcontainer/user/user.go new file mode 100644 index 00000000000..d7439f12e39 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/user/user.go @@ -0,0 +1,350 @@ +package user + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +const ( + minId = 0 + maxId = 1<<31 - 1 //for 32-bit systems compatibility +) + +var ( + ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId) +) + +type User struct { + Name string + Pass string + Uid int + Gid int + Gecos string + Home string + Shell string +} + +type Group struct { + Name string + Pass string + Gid int + List []string +} + +func parseLine(line string, v ...interface{}) { + if line == "" { + return + } + + parts := strings.Split(line, ":") + for i, p := range parts { + if len(v) <= i { + // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files + break + } + + switch e := v[i].(type) { + case *string: + // "root", "adm", "/bin/bash" + *e = p + case *int: + // "0", "4", "1000" + // ignore string to int conversion errors, for great "tolerance" of naughty configuration files + *e, _ = strconv.Atoi(p) + case *[]string: + // "", "root", "root,adm,daemon" + if p != "" { + *e = strings.Split(p, ",") + } else { + *e = []string{} + } + default: + // panic, because this is a programming/logic error, not a runtime one + panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!") + } + } +} + +func ParsePasswdFile(path string) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswd(passwd) +} + +func ParsePasswd(passwd io.Reader) ([]User, error) { + return ParsePasswdFilter(passwd, nil) +} + +func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswdFilter(passwd, filter) +} + +func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { + if r == nil { + return nil, fmt.Errorf("nil source for passwd-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []User{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" { + continue + } + + // see: man 5 passwd + // name:password:UID:GID:GECOS:directory:shell + // Name:Pass:Uid:Gid:Gecos:Home:Shell + // root:x:0:0:root:/root:/bin/bash + // adm:x:3:4:adm:/var/adm:/bin/false + p := User{} + parseLine( + text, + &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, + ) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +func ParseGroupFile(path string) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroup(group) +} + +func ParseGroup(group io.Reader) ([]Group, error) { + return ParseGroupFilter(group, nil) +} + +func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroupFilter(group, filter) +} + +func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { + if r == nil { + return nil, fmt.Errorf("nil source for group-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []Group{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := s.Text() + if text == "" { + continue + } + + // see: man 5 group + // group_name:password:GID:user_list + // Name:Pass:Gid:List + // root:x:0:root + // adm:x:4:root,adm,daemon + p := Group{} + parseLine( + text, + &p.Name, &p.Pass, &p.Gid, &p.List, + ) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +type ExecUser struct { + Uid, Gid int + Sgids []int + Home string +} + +// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the +// given file paths and uses that data as the arguments to GetExecUser. If the +// files cannot be opened for any reason, the error is ignored and a nil +// io.Reader is passed instead. +func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { + passwd, err := os.Open(passwdPath) + if err != nil { + passwd = nil + } else { + defer passwd.Close() + } + + group, err := os.Open(groupPath) + if err != nil { + group = nil + } else { + defer group.Close() + } + + return GetExecUser(userSpec, defaults, passwd, group) +} + +// GetExecUser parses a user specification string (using the passwd and group +// readers as sources for /etc/passwd and /etc/group data, respectively). In +// the case of blank fields or missing data from the sources, the values in +// defaults is used. +// +// GetExecUser will return an error if a user or group literal could not be +// found in any entry in passwd and group respectively. +// +// Examples of valid user specifications are: +// * "" +// * "user" +// * "uid" +// * "user:group" +// * "uid:gid +// * "user:gid" +// * "uid:group" +func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { + var ( + userArg, groupArg string + name string + ) + + if defaults == nil { + defaults = new(ExecUser) + } + + // Copy over defaults. + user := &ExecUser{ + Uid: defaults.Uid, + Gid: defaults.Gid, + Sgids: defaults.Sgids, + Home: defaults.Home, + } + + // Sgids slice *cannot* be nil. + if user.Sgids == nil { + user.Sgids = []int{} + } + + // allow for userArg to have either "user" syntax, or optionally "user:group" syntax + parseLine(userSpec, &userArg, &groupArg) + + users, err := ParsePasswdFilter(passwd, func(u User) bool { + if userArg == "" { + return u.Uid == user.Uid + } + return u.Name == userArg || strconv.Itoa(u.Uid) == userArg + }) + if err != nil && passwd != nil { + if userArg == "" { + userArg = strconv.Itoa(user.Uid) + } + return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) + } + + haveUser := users != nil && len(users) > 0 + if haveUser { + // if we found any user entries that matched our filter, let's take the first one as "correct" + name = users[0].Name + user.Uid = users[0].Uid + user.Gid = users[0].Gid + user.Home = users[0].Home + } else if userArg != "" { + // we asked for a user but didn't find them... let's check to see if we wanted a numeric user + user.Uid, err = strconv.Atoi(userArg) + if err != nil { + // not numeric - we have to bail + return nil, fmt.Errorf("Unable to find user %v", userArg) + } + + // Must be inside valid uid range. + if user.Uid < minId || user.Uid > maxId { + return nil, ErrRange + } + + // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit + } + + if groupArg != "" || name != "" { + groups, err := ParseGroupFilter(group, func(g Group) bool { + // Explicit group format takes precedence. + if groupArg != "" { + return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg + } + + // Check if user is a member. + for _, u := range g.List { + if u == name { + return true + } + } + + return false + }) + if err != nil && group != nil { + return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) + } + + haveGroup := groups != nil && len(groups) > 0 + if groupArg != "" { + if haveGroup { + // if we found any group entries that matched our filter, let's take the first one as "correct" + user.Gid = groups[0].Gid + } else { + // we asked for a group but didn't find id... let's check to see if we wanted a numeric group + user.Gid, err = strconv.Atoi(groupArg) + if err != nil { + // not numeric - we have to bail + return nil, fmt.Errorf("Unable to find group %v", groupArg) + } + + // Ensure gid is inside gid range. + if user.Gid < minId || user.Gid > maxId { + return nil, ErrRange + } + + // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit + } + } else if haveGroup { + // If implicit group format, fill supplementary gids. + user.Sgids = make([]int, len(groups)) + for i, group := range groups { + user.Sgids[i] = group.Gid + } + } + } + + return user, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/user/user_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/user/user_test.go new file mode 100644 index 00000000000..4fe008fb397 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/user/user_test.go @@ -0,0 +1,352 @@ +package user + +import ( + "io" + "reflect" + "strings" + "testing" +) + +func TestUserParseLine(t *testing.T) { + var ( + a, b string + c []string + d int + ) + + parseLine("", &a, &b) + if a != "" || b != "" { + t.Fatalf("a and b should be empty ('%v', '%v')", a, b) + } + + parseLine("a", &a, &b) + if a != "a" || b != "" { + t.Fatalf("a should be 'a' and b should be empty ('%v', '%v')", a, b) + } + + parseLine("bad boys:corny cows", &a, &b) + if a != "bad boys" || b != "corny cows" { + t.Fatalf("a should be 'bad boys' and b should be 'corny cows' ('%v', '%v')", a, b) + } + + parseLine("", &c) + if len(c) != 0 { + t.Fatalf("c should be empty (%#v)", c) + } + + parseLine("d,e,f:g:h:i,j,k", &c, &a, &b, &c) + if a != "g" || b != "h" || len(c) != 3 || c[0] != "i" || c[1] != "j" || c[2] != "k" { + t.Fatalf("a should be 'g', b should be 'h', and c should be ['i','j','k'] ('%v', '%v', '%#v')", a, b, c) + } + + parseLine("::::::::::", &a, &b, &c) + if a != "" || b != "" || len(c) != 0 { + t.Fatalf("a, b, and c should all be empty ('%v', '%v', '%#v')", a, b, c) + } + + parseLine("not a number", &d) + if d != 0 { + t.Fatalf("d should be 0 (%v)", d) + } + + parseLine("b:12:c", &a, &d, &b) + if a != "b" || b != "c" || d != 12 { + t.Fatalf("a should be 'b' and b should be 'c', and d should be 12 ('%v', '%v', %v)", a, b, d) + } +} + +func TestUserParsePasswd(t *testing.T) { + users, err := ParsePasswdFilter(strings.NewReader(` +root:x:0:0:root:/root:/bin/bash +adm:x:3:4:adm:/var/adm:/bin/false +this is just some garbage data +`), nil) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if len(users) != 3 { + t.Fatalf("Expected 3 users, got %v", len(users)) + } + if users[0].Uid != 0 || users[0].Name != "root" { + t.Fatalf("Expected users[0] to be 0 - root, got %v - %v", users[0].Uid, users[0].Name) + } + if users[1].Uid != 3 || users[1].Name != "adm" { + t.Fatalf("Expected users[1] to be 3 - adm, got %v - %v", users[1].Uid, users[1].Name) + } +} + +func TestUserParseGroup(t *testing.T) { + groups, err := ParseGroupFilter(strings.NewReader(` +root:x:0:root +adm:x:4:root,adm,daemon +this is just some garbage data +`), nil) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if len(groups) != 3 { + t.Fatalf("Expected 3 groups, got %v", len(groups)) + } + if groups[0].Gid != 0 || groups[0].Name != "root" || len(groups[0].List) != 1 { + t.Fatalf("Expected groups[0] to be 0 - root - 1 member, got %v - %v - %v", groups[0].Gid, groups[0].Name, len(groups[0].List)) + } + if groups[1].Gid != 4 || groups[1].Name != "adm" || len(groups[1].List) != 3 { + t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List)) + } +} + +func TestValidGetExecUser(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + defaultExecUser := ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + } + + tests := []struct { + ref string + expected ExecUser + }{ + { + ref: "root", + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{0, 1234}, + Home: "/root", + }, + }, + { + ref: "adm", + expected: ExecUser{ + Uid: 42, + Gid: 43, + Sgids: []int{1234}, + Home: "/var/adm", + }, + }, + { + ref: "root:adm", + expected: ExecUser{ + Uid: 0, + Gid: 43, + Sgids: defaultExecUser.Sgids, + Home: "/root", + }, + }, + { + ref: "adm:1234", + expected: ExecUser{ + Uid: 42, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: "/var/adm", + }, + }, + { + ref: "42:1234", + expected: ExecUser{ + Uid: 42, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: "/var/adm", + }, + }, + { + ref: "1337:1234", + expected: ExecUser{ + Uid: 1337, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + { + ref: "1337", + expected: ExecUser{ + Uid: 1337, + Gid: defaultExecUser.Gid, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + { + ref: "", + expected: ExecUser{ + Uid: defaultExecUser.Uid, + Gid: defaultExecUser.Gid, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + } + + for _, test := range tests { + passwd := strings.NewReader(passwdContent) + group := strings.NewReader(groupContent) + + execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) + if err != nil { + t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) + t.Fail() + continue + } + + if !reflect.DeepEqual(test.expected, *execUser) { + t.Logf("got: %#v", execUser) + t.Logf("expected: %#v", test.expected) + t.Fail() + continue + } + } +} + +func TestInvalidGetExecUser(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + + tests := []string{ + // No such user/group. + "notuser", + "notuser:notgroup", + "root:notgroup", + "notuser:adm", + "8888:notgroup", + "notuser:8888", + + // Invalid user/group values. + "-1:0", + "0:-3", + "-5:-2", + } + + for _, test := range tests { + passwd := strings.NewReader(passwdContent) + group := strings.NewReader(groupContent) + + execUser, err := GetExecUser(test, nil, passwd, group) + if err == nil { + t.Logf("got unexpected success when parsing '%s': %#v", test, execUser) + t.Fail() + continue + } + } +} + +func TestGetExecUserNilSources(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + + defaultExecUser := ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + } + + tests := []struct { + ref string + passwd, group bool + expected ExecUser + }{ + { + ref: "", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + { + ref: "root", + passwd: true, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{8888}, + Home: "/root", + }, + }, + { + ref: "0", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + { + ref: "0:0", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + } + + for _, test := range tests { + var passwd, group io.Reader + + if test.passwd { + passwd = strings.NewReader(passwdContent) + } + + if test.group { + group = strings.NewReader(groupContent) + } + + execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) + if err != nil { + t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) + t.Fail() + continue + } + + if !reflect.DeepEqual(test.expected, *execUser) { + t.Logf("got: %#v", execUser) + t.Logf("expected: %#v", test.expected) + t.Fail() + continue + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/utils/utils.go b/Godeps/_workspace/src/github.com/docker/libcontainer/utils/utils.go new file mode 100644 index 00000000000..76184ce00b7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/utils/utils.go @@ -0,0 +1,55 @@ +package utils + +import ( + "crypto/rand" + "encoding/hex" + "io" + "io/ioutil" + "path/filepath" + "strconv" + "syscall" +) + +// GenerateRandomName returns a new name joined with a prefix. This size +// specified is used to truncate the randomly generated value +func GenerateRandomName(prefix string, size int) (string, error) { + id := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, id); err != nil { + return "", err + } + return prefix + hex.EncodeToString(id)[:size], nil +} + +// ResolveRootfs ensures that the current working directory is +// not a symlink and returns the absolute path to the rootfs +func ResolveRootfs(uncleanRootfs string) (string, error) { + rootfs, err := filepath.Abs(uncleanRootfs) + if err != nil { + return "", err + } + return filepath.EvalSymlinks(rootfs) +} + +func CloseExecFrom(minFd int) error { + fdList, err := ioutil.ReadDir("/proc/self/fd") + if err != nil { + return err + } + for _, fi := range fdList { + fd, err := strconv.Atoi(fi.Name()) + if err != nil { + // ignore non-numeric file names + continue + } + + if fd < minFd { + // ignore descriptors lower than our specified minimum + continue + } + + // intentionally ignore errors from syscall.CloseOnExec + syscall.CloseOnExec(fd) + // the cases where this might fail are basically file descriptors that have already been closed (including and especially the one that was created when ioutil.ReadDir did the "opendir" syscall) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/utils/utils_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/utils/utils_test.go new file mode 100644 index 00000000000..41ef1aa3df5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/utils/utils_test.go @@ -0,0 +1,15 @@ +package utils + +import "testing" + +func TestGenerateName(t *testing.T) { + name, err := GenerateRandomName("veth", 5) + if err != nil { + t.Fatal(err) + } + + expected := 5 + len("veth") + if len(name) != 5+len("veth") { + t.Fatalf("expected name to be %d chars but received %d", expected, len(name)) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/.travis.yml b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/.travis.yml new file mode 100644 index 00000000000..baf46abc6f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/.travis.yml @@ -0,0 +1,6 @@ +language: go +go: 1.1 + +script: +- go vet ./... +- go test -v ./... diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/LICENSE b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/LICENSE new file mode 100644 index 00000000000..5515ccfb716 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/LICENSE @@ -0,0 +1,21 @@ +Copyright (C) 2013 Jeremy Saenz +All Rights Reserved. + +MIT LICENSE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/README.md b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/README.md new file mode 100644 index 00000000000..59806f4b90d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/README.md @@ -0,0 +1,257 @@ +[![Build Status](https://travis-ci.org/codegangsta/cli.png?branch=master)](https://travis-ci.org/codegangsta/cli) + +# cli.go +cli.go is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way. + +You can view the API docs here: +http://godoc.org/github.com/codegangsta/cli + +## Overview +Command line apps are usually so tiny that there is absolutely no reason why your code should *not* be self-documenting. Things like generating help text and parsing command flags/options should not hinder productivity when writing a command line app. + +This is where cli.go comes into play. cli.go makes command line programming fun, organized, and expressive! + +## Installation +Make sure you have a working Go environment (go 1.1 is *required*). [See the install instructions](http://golang.org/doc/install.html). + +To install cli.go, simply run: +``` +$ go get github.com/codegangsta/cli +``` + +Make sure your PATH includes to the `$GOPATH/bin` directory so your commands can be easily used: +``` +export PATH=$PATH:$GOPATH/bin +``` + +## Getting Started +One of the philosophies behind cli.go is that an API should be playful and full of discovery. So a cli.go app can be as little as one line of code in `main()`. + +``` go +package main + +import ( + "os" + "github.com/codegangsta/cli" +) + +func main() { + cli.NewApp().Run(os.Args) +} +``` + +This app will run and show help text, but is not very useful. Let's give an action to execute and some help documentation: + +``` go +package main + +import ( + "os" + "github.com/codegangsta/cli" +) + +func main() { + app := cli.NewApp() + app.Name = "boom" + app.Usage = "make an explosive entrance" + app.Action = func(c *cli.Context) { + println("boom! I say!") + } + + app.Run(os.Args) +} +``` + +Running this already gives you a ton of functionality, plus support for things like subcommands and flags, which are covered below. + +## Example + +Being a programmer can be a lonely job. Thankfully by the power of automation that is not the case! Let's create a greeter app to fend off our demons of loneliness! + +``` go +/* greet.go */ +package main + +import ( + "os" + "github.com/codegangsta/cli" +) + +func main() { + app := cli.NewApp() + app.Name = "greet" + app.Usage = "fight the loneliness!" + app.Action = func(c *cli.Context) { + println("Hello friend!") + } + + app.Run(os.Args) +} +``` + +Install our command to the `$GOPATH/bin` directory: + +``` +$ go install +``` + +Finally run our new command: + +``` +$ greet +Hello friend! +``` + +cli.go also generates some bitchass help text: +``` +$ greet help +NAME: + greet - fight the loneliness! + +USAGE: + greet [global options] command [command options] [arguments...] + +VERSION: + 0.0.0 + +COMMANDS: + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS + --version Shows version information +``` + +### Arguments +You can lookup arguments by calling the `Args` function on cli.Context. + +``` go +... +app.Action = func(c *cli.Context) { + println("Hello", c.Args()[0]) +} +... +``` + +### Flags +Setting and querying flags is simple. +``` go +... +app.Flags = []cli.Flag { + cli.StringFlag{Name: "lang", Value: "english", Usage: "language for the greeting"}, +} +app.Action = func(c *cli.Context) { + name := "someone" + if len(c.Args()) > 0 { + name = c.Args()[0] + } + if c.String("lang") == "spanish" { + println("Hola", name) + } else { + println("Hello", name) + } +} +... +``` + +#### Alternate Names + +You can set alternate (or short) names for flags by providing a comma-delimited list for the Name. e.g. + +``` go +app.Flags = []cli.Flag { + cli.StringFlag{Name: "lang, l", Value: "english", Usage: "language for the greeting"}, +} +``` + +That flag can then be set with `--lang spanish` or `-l spanish`. Note that giving two different forms of the same flag in the same command invocation is an error. + +### Subcommands + +Subcommands can be defined for a more git-like command line app. +```go +... +app.Commands = []cli.Command{ + { + Name: "add", + ShortName: "a", + Usage: "add a task to the list", + Action: func(c *cli.Context) { + println("added task: ", c.Args().First()) + }, + }, + { + Name: "complete", + ShortName: "c", + Usage: "complete a task on the list", + Action: func(c *cli.Context) { + println("completed task: ", c.Args().First()) + }, + }, + { + Name: "template", + ShortName: "r", + Usage: "options for task templates", + Subcommands: []cli.Command{ + { + Name: "add", + Usage: "add a new template", + Action: func(c *cli.Context) { + println("new task template: ", c.Args().First()) + }, + }, + { + Name: "remove", + Usage: "remove an existing template", + Action: func(c *cli.Context) { + println("removed task template: ", c.Args().First()) + }, + }, + }, + }, +} +... +``` + +### Bash Completion + +You can enable completion commands by setting the EnableBashCompletion +flag on the App object. By default, this setting will only auto-complete to +show an app's subcommands, but you can write your own completion methods for +the App or its subcommands. +```go +... +var tasks = []string{"cook", "clean", "laundry", "eat", "sleep", "code"} +app := cli.NewApp() +app.EnableBashCompletion = true +app.Commands = []cli.Command{ + { + Name: "complete", + ShortName: "c", + Usage: "complete a task on the list", + Action: func(c *cli.Context) { + println("completed task: ", c.Args().First()) + }, + BashComplete: func(c *cli.Context) { + // This will complete if no args are passed + if len(c.Args()) > 0 { + return + } + for _, t := range tasks { + println(t) + } + }, + } +} +... +``` + +#### To Enable + +Source the autocomplete/bash_autocomplete file in your .bashrc file while +setting the PROG variable to the name of your program: + +`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete` + + +## About +cli.go is written by none other than the [Code Gangsta](http://codegangsta.io) diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/app.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/app.go new file mode 100644 index 00000000000..e193b8282eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/app.go @@ -0,0 +1,248 @@ +package cli + +import ( + "fmt" + "io/ioutil" + "os" + "time" +) + +// App is the main structure of a cli application. It is recomended that +// and app be created with the cli.NewApp() function +type App struct { + // The name of the program. Defaults to os.Args[0] + Name string + // Description of the program. + Usage string + // Version of the program + Version string + // List of commands to execute + Commands []Command + // List of flags to parse + Flags []Flag + // Boolean to enable bash completion commands + EnableBashCompletion bool + // Boolean to hide built-in help command + HideHelp bool + // An action to execute when the bash-completion flag is set + BashComplete func(context *Context) + // An action to execute before any subcommands are run, but after the context is ready + // If a non-nil error is returned, no subcommands are run + Before func(context *Context) error + // The action to execute when no subcommands are specified + Action func(context *Context) + // Execute this function if the proper command cannot be found + CommandNotFound func(context *Context, command string) + // Compilation date + Compiled time.Time + // Author + Author string + // Author e-mail + Email string +} + +// Tries to find out when this binary was compiled. +// Returns the current time if it fails to find it. +func compileTime() time.Time { + info, err := os.Stat(os.Args[0]) + if err != nil { + return time.Now() + } + return info.ModTime() +} + +// Creates a new cli Application with some reasonable defaults for Name, Usage, Version and Action. +func NewApp() *App { + return &App{ + Name: os.Args[0], + Usage: "A new cli application", + Version: "0.0.0", + BashComplete: DefaultAppComplete, + Action: helpCommand.Action, + Compiled: compileTime(), + Author: "Author", + Email: "unknown@email", + } +} + +// Entry point to the cli app. Parses the arguments slice and routes to the proper flag/args combination +func (a *App) Run(arguments []string) error { + // append help to commands + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + a.Commands = append(a.Commands, helpCommand) + a.appendFlag(HelpFlag) + } + + //append version/help flags + if a.EnableBashCompletion { + a.appendFlag(BashCompletionFlag) + } + a.appendFlag(VersionFlag) + + // parse flags + set := flagSet(a.Name, a.Flags) + set.SetOutput(ioutil.Discard) + err := set.Parse(arguments[1:]) + nerr := normalizeFlags(a.Flags, set) + if nerr != nil { + fmt.Println(nerr) + context := NewContext(a, set, set) + ShowAppHelp(context) + fmt.Println("") + return nerr + } + context := NewContext(a, set, set) + + if err != nil { + fmt.Printf("Incorrect Usage.\n\n") + ShowAppHelp(context) + fmt.Println("") + return err + } + + if checkCompletions(context) { + return nil + } + + if checkHelp(context) { + return nil + } + + if checkVersion(context) { + return nil + } + + if a.Before != nil { + err := a.Before(context) + if err != nil { + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + // Run default Action + a.Action(context) + return nil +} + +// Another entry point to the cli app, takes care of passing arguments and error handling +func (a *App) RunAndExitOnError() { + if err := a.Run(os.Args); err != nil { + os.Stderr.WriteString(fmt.Sprintln(err)) + os.Exit(1) + } +} + +// Invokes the subcommand given the context, parses ctx.Args() to generate command-specific flags +func (a *App) RunAsSubcommand(ctx *Context) error { + // append help to commands + if len(a.Commands) > 0 { + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + a.Commands = append(a.Commands, helpCommand) + a.appendFlag(HelpFlag) + } + } + + // append flags + if a.EnableBashCompletion { + a.appendFlag(BashCompletionFlag) + } + + // parse flags + set := flagSet(a.Name, a.Flags) + set.SetOutput(ioutil.Discard) + err := set.Parse(ctx.Args().Tail()) + nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, ctx.globalSet) + + if nerr != nil { + fmt.Println(nerr) + if len(a.Commands) > 0 { + ShowSubcommandHelp(context) + } else { + ShowCommandHelp(ctx, context.Args().First()) + } + fmt.Println("") + return nerr + } + + if err != nil { + fmt.Printf("Incorrect Usage.\n\n") + ShowSubcommandHelp(context) + return err + } + + if checkCompletions(context) { + return nil + } + + if len(a.Commands) > 0 { + if checkSubcommandHelp(context) { + return nil + } + } else { + if checkCommandHelp(ctx, context.Args().First()) { + return nil + } + } + + if a.Before != nil { + err := a.Before(context) + if err != nil { + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + // Run default Action + if len(a.Commands) > 0 { + a.Action(context) + } else { + a.Action(ctx) + } + + return nil +} + +// Returns the named command on App. Returns nil if the command does not exist +func (a *App) Command(name string) *Command { + for _, c := range a.Commands { + if c.HasName(name) { + return &c + } + } + + return nil +} + +func (a *App) hasFlag(flag Flag) bool { + for _, f := range a.Flags { + if flag == f { + return true + } + } + + return false +} + +func (a *App) appendFlag(flag Flag) { + if !a.hasFlag(flag) { + a.Flags = append(a.Flags, flag) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/app_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/app_test.go new file mode 100644 index 00000000000..a9156241032 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/app_test.go @@ -0,0 +1,399 @@ +package cli_test + +import ( + "fmt" + "os" + "testing" + + "github.com/codegangsta/cli" +) + +func ExampleApp() { + // set args for examples sake + os.Args = []string{"greet", "--name", "Jeremy"} + + app := cli.NewApp() + app.Name = "greet" + app.Flags = []cli.Flag{ + cli.StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, + } + app.Action = func(c *cli.Context) { + fmt.Printf("Hello %v\n", c.String("name")) + } + app.Run(os.Args) + // Output: + // Hello Jeremy +} + +func ExampleAppSubcommand() { + // set args for examples sake + os.Args = []string{"say", "hi", "english", "--name", "Jeremy"} + app := cli.NewApp() + app.Name = "say" + app.Commands = []cli.Command{ + { + Name: "hello", + ShortName: "hi", + Usage: "use it to see a description", + Description: "This is how we describe hello the function", + Subcommands: []cli.Command{ + { + Name: "english", + ShortName: "en", + Usage: "sends a greeting in english", + Description: "greets someone in english", + Flags: []cli.Flag{ + cli.StringFlag{Name: "name", Value: "Bob", Usage: "Name of the person to greet"}, + }, + Action: func(c *cli.Context) { + fmt.Println("Hello,", c.String("name")) + }, + }, + }, + }, + } + + app.Run(os.Args) + // Output: + // Hello, Jeremy +} + +func ExampleAppHelp() { + // set args for examples sake + os.Args = []string{"greet", "h", "describeit"} + + app := cli.NewApp() + app.Name = "greet" + app.Flags = []cli.Flag{ + cli.StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, + } + app.Commands = []cli.Command{ + { + Name: "describeit", + ShortName: "d", + Usage: "use it to see a description", + Description: "This is how we describe describeit the function", + Action: func(c *cli.Context) { + fmt.Printf("i like to describe things") + }, + }, + } + app.Run(os.Args) + // Output: + // NAME: + // describeit - use it to see a description + // + // USAGE: + // command describeit [arguments...] + // + // DESCRIPTION: + // This is how we describe describeit the function +} + +func ExampleAppBashComplete() { + // set args for examples sake + os.Args = []string{"greet", "--generate-bash-completion"} + + app := cli.NewApp() + app.Name = "greet" + app.EnableBashCompletion = true + app.Commands = []cli.Command{ + { + Name: "describeit", + ShortName: "d", + Usage: "use it to see a description", + Description: "This is how we describe describeit the function", + Action: func(c *cli.Context) { + fmt.Printf("i like to describe things") + }, + }, { + Name: "next", + Usage: "next example", + Description: "more stuff to see when generating bash completion", + Action: func(c *cli.Context) { + fmt.Printf("the next example") + }, + }, + } + + app.Run(os.Args) + // Output: + // describeit + // d + // next + // help + // h +} + +func TestApp_Run(t *testing.T) { + s := "" + + app := cli.NewApp() + app.Action = func(c *cli.Context) { + s = s + c.Args().First() + } + + err := app.Run([]string{"command", "foo"}) + expect(t, err, nil) + err = app.Run([]string{"command", "bar"}) + expect(t, err, nil) + expect(t, s, "foobar") +} + +var commandAppTests = []struct { + name string + expected bool +}{ + {"foobar", true}, + {"batbaz", true}, + {"b", true}, + {"f", true}, + {"bat", false}, + {"nothing", false}, +} + +func TestApp_Command(t *testing.T) { + app := cli.NewApp() + fooCommand := cli.Command{Name: "foobar", ShortName: "f"} + batCommand := cli.Command{Name: "batbaz", ShortName: "b"} + app.Commands = []cli.Command{ + fooCommand, + batCommand, + } + + for _, test := range commandAppTests { + expect(t, app.Command(test.name) != nil, test.expected) + } +} + +func TestApp_CommandWithArgBeforeFlags(t *testing.T) { + var parsedOption, firstArg string + + app := cli.NewApp() + command := cli.Command{ + Name: "cmd", + Flags: []cli.Flag{ + cli.StringFlag{Name: "option", Value: "", Usage: "some option"}, + }, + Action: func(c *cli.Context) { + parsedOption = c.String("option") + firstArg = c.Args().First() + }, + } + app.Commands = []cli.Command{command} + + app.Run([]string{"", "cmd", "my-arg", "--option", "my-option"}) + + expect(t, parsedOption, "my-option") + expect(t, firstArg, "my-arg") +} + +func TestApp_Float64Flag(t *testing.T) { + var meters float64 + + app := cli.NewApp() + app.Flags = []cli.Flag{ + cli.Float64Flag{Name: "height", Value: 1.5, Usage: "Set the height, in meters"}, + } + app.Action = func(c *cli.Context) { + meters = c.Float64("height") + } + + app.Run([]string{"", "--height", "1.93"}) + expect(t, meters, 1.93) +} + +func TestApp_ParseSliceFlags(t *testing.T) { + var parsedOption, firstArg string + var parsedIntSlice []int + var parsedStringSlice []string + + app := cli.NewApp() + command := cli.Command{ + Name: "cmd", + Flags: []cli.Flag{ + cli.IntSliceFlag{Name: "p", Value: &cli.IntSlice{}, Usage: "set one or more ip addr"}, + cli.StringSliceFlag{Name: "ip", Value: &cli.StringSlice{}, Usage: "set one or more ports to open"}, + }, + Action: func(c *cli.Context) { + parsedIntSlice = c.IntSlice("p") + parsedStringSlice = c.StringSlice("ip") + parsedOption = c.String("option") + firstArg = c.Args().First() + }, + } + app.Commands = []cli.Command{command} + + app.Run([]string{"", "cmd", "my-arg", "-p", "22", "-p", "80", "-ip", "8.8.8.8", "-ip", "8.8.4.4"}) + + IntsEquals := func(a, b []int) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if v != b[i] { + return false + } + } + return true + } + + StrsEquals := func(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if v != b[i] { + return false + } + } + return true + } + var expectedIntSlice = []int{22, 80} + var expectedStringSlice = []string{"8.8.8.8", "8.8.4.4"} + + if !IntsEquals(parsedIntSlice, expectedIntSlice) { + t.Errorf("%v does not match %v", parsedIntSlice, expectedIntSlice) + } + + if !StrsEquals(parsedStringSlice, expectedStringSlice) { + t.Errorf("%v does not match %v", parsedStringSlice, expectedStringSlice) + } +} + +func TestApp_BeforeFunc(t *testing.T) { + beforeRun, subcommandRun := false, false + beforeError := fmt.Errorf("fail") + var err error + + app := cli.NewApp() + + app.Before = func(c *cli.Context) error { + beforeRun = true + s := c.String("opt") + if s == "fail" { + return beforeError + } + + return nil + } + + app.Commands = []cli.Command{ + cli.Command{ + Name: "sub", + Action: func(c *cli.Context) { + subcommandRun = true + }, + }, + } + + app.Flags = []cli.Flag{ + cli.StringFlag{Name: "opt"}, + } + + // run with the Before() func succeeding + err = app.Run([]string{"command", "--opt", "succeed", "sub"}) + + if err != nil { + t.Fatalf("Run error: %s", err) + } + + if beforeRun == false { + t.Errorf("Before() not executed when expected") + } + + if subcommandRun == false { + t.Errorf("Subcommand not executed when expected") + } + + // reset + beforeRun, subcommandRun = false, false + + // run with the Before() func failing + err = app.Run([]string{"command", "--opt", "fail", "sub"}) + + // should be the same error produced by the Before func + if err != beforeError { + t.Errorf("Run error expected, but not received") + } + + if beforeRun == false { + t.Errorf("Before() not executed when expected") + } + + if subcommandRun == true { + t.Errorf("Subcommand executed when NOT expected") + } + +} + +func TestAppHelpPrinter(t *testing.T) { + oldPrinter := cli.HelpPrinter + defer func() { + cli.HelpPrinter = oldPrinter + }() + + var wasCalled = false + cli.HelpPrinter = func(template string, data interface{}) { + wasCalled = true + } + + app := cli.NewApp() + app.Run([]string{"-h"}) + + if wasCalled == false { + t.Errorf("Help printer expected to be called, but was not") + } +} + +func TestAppCommandNotFound(t *testing.T) { + beforeRun, subcommandRun := false, false + app := cli.NewApp() + + app.CommandNotFound = func(c *cli.Context, command string) { + beforeRun = true + } + + app.Commands = []cli.Command{ + cli.Command{ + Name: "bar", + Action: func(c *cli.Context) { + subcommandRun = true + }, + }, + } + + app.Run([]string{"command", "foo"}) + + expect(t, beforeRun, true) + expect(t, subcommandRun, false) +} + +func TestGlobalFlagsInSubcommands(t *testing.T) { + subcommandRun := false + app := cli.NewApp() + + app.Flags = []cli.Flag{ + cli.BoolFlag{Name: "debug, d", Usage: "Enable debugging"}, + } + + app.Commands = []cli.Command{ + cli.Command{ + Name: "foo", + Subcommands: []cli.Command{ + { + Name: "bar", + Action: func(c *cli.Context) { + if c.GlobalBool("debug") { + subcommandRun = true + } + }, + }, + }, + }, + } + + app.Run([]string{"command", "-d", "foo", "bar"}) + + expect(t, subcommandRun, true) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete new file mode 100644 index 00000000000..a860e038d86 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete @@ -0,0 +1,13 @@ +#! /bin/bash + +_cli_bash_autocomplete() { + local cur prev opts base + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + prev="${COMP_WORDS[COMP_CWORD-1]}" + opts=$( ${COMP_WORDS[@]:0:COMP_CWORD} --generate-bash-completion ) + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 + } + + complete -F _cli_bash_autocomplete $PROG \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/cli.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/cli.go new file mode 100644 index 00000000000..b7425458123 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/cli.go @@ -0,0 +1,19 @@ +// Package cli provides a minimal framework for creating and organizing command line +// Go applications. cli is designed to be easy to understand and write, the most simple +// cli application can be written as follows: +// func main() { +// cli.NewApp().Run(os.Args) +// } +// +// Of course this application does not do much, so let's make this an actual application: +// func main() { +// app := cli.NewApp() +// app.Name = "greet" +// app.Usage = "say a greeting" +// app.Action = func(c *cli.Context) { +// println("Greetings") +// } +// +// app.Run(os.Args) +// } +package cli diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/cli_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/cli_test.go new file mode 100644 index 00000000000..4d7bd8479a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/cli_test.go @@ -0,0 +1,88 @@ +package cli_test + +import ( + "os" + + "github.com/codegangsta/cli" +) + +func Example() { + app := cli.NewApp() + app.Name = "todo" + app.Usage = "task list on the command line" + app.Commands = []cli.Command{ + { + Name: "add", + ShortName: "a", + Usage: "add a task to the list", + Action: func(c *cli.Context) { + println("added task: ", c.Args().First()) + }, + }, + { + Name: "complete", + ShortName: "c", + Usage: "complete a task on the list", + Action: func(c *cli.Context) { + println("completed task: ", c.Args().First()) + }, + }, + } + + app.Run(os.Args) +} + +func ExampleSubcommand() { + app := cli.NewApp() + app.Name = "say" + app.Commands = []cli.Command{ + { + Name: "hello", + ShortName: "hi", + Usage: "use it to see a description", + Description: "This is how we describe hello the function", + Subcommands: []cli.Command{ + { + Name: "english", + ShortName: "en", + Usage: "sends a greeting in english", + Description: "greets someone in english", + Flags: []cli.Flag{ + cli.StringFlag{Name: "name", Value: "Bob", Usage: "Name of the person to greet"}, + }, + Action: func(c *cli.Context) { + println("Hello, ", c.String("name")) + }, + }, { + Name: "spanish", + ShortName: "sp", + Usage: "sends a greeting in spanish", + Flags: []cli.Flag{ + cli.StringFlag{Name: "surname", Value: "Jones", Usage: "Surname of the person to greet"}, + }, + Action: func(c *cli.Context) { + println("Hola, ", c.String("surname")) + }, + }, { + Name: "french", + ShortName: "fr", + Usage: "sends a greeting in french", + Flags: []cli.Flag{ + cli.StringFlag{Name: "nickname", Value: "Stevie", Usage: "Nickname of the person to greet"}, + }, + Action: func(c *cli.Context) { + println("Bonjour, ", c.String("nickname")) + }, + }, + }, + }, { + Name: "bye", + Usage: "says goodbye", + Action: func(c *cli.Context) { + println("bye") + }, + }, + } + + app.Run(os.Args) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/command.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/command.go new file mode 100644 index 00000000000..dcc8de5c9a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/command.go @@ -0,0 +1,141 @@ +package cli + +import ( + "fmt" + "io/ioutil" + "strings" +) + +// Command is a subcommand for a cli.App. +type Command struct { + // The name of the command + Name string + // short name of the command. Typically one character + ShortName string + // A short description of the usage of this command + Usage string + // A longer explanation of how the command works + Description string + // The function to call when checking for bash command completions + BashComplete func(context *Context) + // An action to execute before any sub-subcommands are run, but after the context is ready + // If a non-nil error is returned, no sub-subcommands are run + Before func(context *Context) error + // The function to call when this command is invoked + Action func(context *Context) + // List of child commands + Subcommands []Command + // List of flags to parse + Flags []Flag + // Treat all flags as normal arguments if true + SkipFlagParsing bool + // Boolean to hide built-in help command + HideHelp bool +} + +// Invokes the command given the context, parses ctx.Args() to generate command-specific flags +func (c Command) Run(ctx *Context) error { + + if len(c.Subcommands) > 0 || c.Before != nil { + return c.startApp(ctx) + } + + if !c.HideHelp { + // append help to flags + c.Flags = append( + c.Flags, + HelpFlag, + ) + } + + if ctx.App.EnableBashCompletion { + c.Flags = append(c.Flags, BashCompletionFlag) + } + + set := flagSet(c.Name, c.Flags) + set.SetOutput(ioutil.Discard) + + firstFlagIndex := -1 + for index, arg := range ctx.Args() { + if strings.HasPrefix(arg, "-") { + firstFlagIndex = index + break + } + } + + var err error + if firstFlagIndex > -1 && !c.SkipFlagParsing { + args := ctx.Args() + regularArgs := args[1:firstFlagIndex] + flagArgs := args[firstFlagIndex:] + err = set.Parse(append(flagArgs, regularArgs...)) + } else { + err = set.Parse(ctx.Args().Tail()) + } + + if err != nil { + fmt.Printf("Incorrect Usage.\n\n") + ShowCommandHelp(ctx, c.Name) + fmt.Println("") + return err + } + + nerr := normalizeFlags(c.Flags, set) + if nerr != nil { + fmt.Println(nerr) + fmt.Println("") + ShowCommandHelp(ctx, c.Name) + fmt.Println("") + return nerr + } + context := NewContext(ctx.App, set, ctx.globalSet) + + if checkCommandCompletions(context, c.Name) { + return nil + } + + if checkCommandHelp(context, c.Name) { + return nil + } + context.Command = c + c.Action(context) + return nil +} + +// Returns true if Command.Name or Command.ShortName matches given name +func (c Command) HasName(name string) bool { + return c.Name == name || c.ShortName == name +} + +func (c Command) startApp(ctx *Context) error { + app := NewApp() + + // set the name and usage + app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) + if c.Description != "" { + app.Usage = c.Description + } else { + app.Usage = c.Usage + } + + // set the flags and commands + app.Commands = c.Subcommands + app.Flags = c.Flags + app.HideHelp = c.HideHelp + + // bash completion + app.EnableBashCompletion = ctx.App.EnableBashCompletion + if c.BashComplete != nil { + app.BashComplete = c.BashComplete + } + + // set the actions + app.Before = c.Before + if c.Action != nil { + app.Action = c.Action + } else { + app.Action = helpSubcommand.Action + } + + return app.RunAsSubcommand(ctx) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/command_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/command_test.go new file mode 100644 index 00000000000..3afd83e7a67 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/command_test.go @@ -0,0 +1,48 @@ +package cli_test + +import ( + "flag" + "github.com/codegangsta/cli" + "testing" +) + +func TestCommandDoNotIgnoreFlags(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + test := []string{"blah", "blah", "-break"} + set.Parse(test) + + c := cli.NewContext(app, set, set) + + command := cli.Command { + Name: "test-cmd", + ShortName: "tc", + Usage: "this is for testing", + Description: "testing", + Action: func(_ *cli.Context) { }, + } + err := command.Run(c) + + expect(t, err.Error(), "flag provided but not defined: -break") +} + +func TestCommandIgnoreFlags(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + test := []string{"blah", "blah"} + set.Parse(test) + + c := cli.NewContext(app, set, set) + + command := cli.Command { + Name: "test-cmd", + ShortName: "tc", + Usage: "this is for testing", + Description: "testing", + Action: func(_ *cli.Context) { }, + SkipFlagParsing: true, + } + err := command.Run(c) + + expect(t, err, nil) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/context.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/context.go new file mode 100644 index 00000000000..1e023ceff95 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/context.go @@ -0,0 +1,280 @@ +package cli + +import ( + "errors" + "flag" + "strconv" + "strings" +) + +// Context is a type that is passed through to +// each Handler action in a cli application. Context +// can be used to retrieve context-specific Args and +// parsed command-line options. +type Context struct { + App *App + Command Command + flagSet *flag.FlagSet + globalSet *flag.FlagSet + setFlags map[string]bool +} + +// Creates a new context. For use in when invoking an App or Command action. +func NewContext(app *App, set *flag.FlagSet, globalSet *flag.FlagSet) *Context { + return &Context{App: app, flagSet: set, globalSet: globalSet} +} + +// Looks up the value of a local int flag, returns 0 if no int flag exists +func (c *Context) Int(name string) int { + return lookupInt(name, c.flagSet) +} + +// Looks up the value of a local float64 flag, returns 0 if no float64 flag exists +func (c *Context) Float64(name string) float64 { + return lookupFloat64(name, c.flagSet) +} + +// Looks up the value of a local bool flag, returns false if no bool flag exists +func (c *Context) Bool(name string) bool { + return lookupBool(name, c.flagSet) +} + +// Looks up the value of a local boolT flag, returns false if no bool flag exists +func (c *Context) BoolT(name string) bool { + return lookupBoolT(name, c.flagSet) +} + +// Looks up the value of a local string flag, returns "" if no string flag exists +func (c *Context) String(name string) string { + return lookupString(name, c.flagSet) +} + +// Looks up the value of a local string slice flag, returns nil if no string slice flag exists +func (c *Context) StringSlice(name string) []string { + return lookupStringSlice(name, c.flagSet) +} + +// Looks up the value of a local int slice flag, returns nil if no int slice flag exists +func (c *Context) IntSlice(name string) []int { + return lookupIntSlice(name, c.flagSet) +} + +// Looks up the value of a local generic flag, returns nil if no generic flag exists +func (c *Context) Generic(name string) interface{} { + return lookupGeneric(name, c.flagSet) +} + +// Looks up the value of a global int flag, returns 0 if no int flag exists +func (c *Context) GlobalInt(name string) int { + return lookupInt(name, c.globalSet) +} + +// Looks up the value of a global bool flag, returns false if no bool flag exists +func (c *Context) GlobalBool(name string) bool { + return lookupBool(name, c.globalSet) +} + +// Looks up the value of a global string flag, returns "" if no string flag exists +func (c *Context) GlobalString(name string) string { + return lookupString(name, c.globalSet) +} + +// Looks up the value of a global string slice flag, returns nil if no string slice flag exists +func (c *Context) GlobalStringSlice(name string) []string { + return lookupStringSlice(name, c.globalSet) +} + +// Looks up the value of a global int slice flag, returns nil if no int slice flag exists +func (c *Context) GlobalIntSlice(name string) []int { + return lookupIntSlice(name, c.globalSet) +} + +// Looks up the value of a global generic flag, returns nil if no generic flag exists +func (c *Context) GlobalGeneric(name string) interface{} { + return lookupGeneric(name, c.globalSet) +} + +// Determines if the flag was actually set exists +func (c *Context) IsSet(name string) bool { + if c.setFlags == nil { + c.setFlags = make(map[string]bool) + c.flagSet.Visit(func(f *flag.Flag) { + c.setFlags[f.Name] = true + }) + } + return c.setFlags[name] == true +} + +type Args []string + +// Returns the command line arguments associated with the context. +func (c *Context) Args() Args { + args := Args(c.flagSet.Args()) + return args +} + +// Returns the nth argument, or else a blank string +func (a Args) Get(n int) string { + if len(a) > n { + return a[n] + } + return "" +} + +// Returns the first argument, or else a blank string +func (a Args) First() string { + return a.Get(0) +} + +// Return the rest of the arguments (not the first one) +// or else an empty string slice +func (a Args) Tail() []string { + if len(a) >= 2 { + return []string(a)[1:] + } + return []string{} +} + +// Checks if there are any arguments present +func (a Args) Present() bool { + return len(a) != 0 +} + +// Swaps arguments at the given indexes +func (a Args) Swap(from, to int) error { + if from >= len(a) || to >= len(a) { + return errors.New("index out of range") + } + a[from], a[to] = a[to], a[from] + return nil +} + +func lookupInt(name string, set *flag.FlagSet) int { + f := set.Lookup(name) + if f != nil { + val, err := strconv.Atoi(f.Value.String()) + if err != nil { + return 0 + } + return val + } + + return 0 +} + +func lookupFloat64(name string, set *flag.FlagSet) float64 { + f := set.Lookup(name) + if f != nil { + val, err := strconv.ParseFloat(f.Value.String(), 64) + if err != nil { + return 0 + } + return val + } + + return 0 +} + +func lookupString(name string, set *flag.FlagSet) string { + f := set.Lookup(name) + if f != nil { + return f.Value.String() + } + + return "" +} + +func lookupStringSlice(name string, set *flag.FlagSet) []string { + f := set.Lookup(name) + if f != nil { + return (f.Value.(*StringSlice)).Value() + + } + + return nil +} + +func lookupIntSlice(name string, set *flag.FlagSet) []int { + f := set.Lookup(name) + if f != nil { + return (f.Value.(*IntSlice)).Value() + + } + + return nil +} + +func lookupGeneric(name string, set *flag.FlagSet) interface{} { + f := set.Lookup(name) + if f != nil { + return f.Value + } + return nil +} + +func lookupBool(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + val, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return val + } + + return false +} + +func lookupBoolT(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + val, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return true + } + return val + } + + return false +} + +func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { + switch ff.Value.(type) { + case *StringSlice: + default: + set.Set(name, ff.Value.String()) + } +} + +func normalizeFlags(flags []Flag, set *flag.FlagSet) error { + visited := make(map[string]bool) + set.Visit(func(f *flag.Flag) { + visited[f.Name] = true + }) + for _, f := range flags { + parts := strings.Split(f.getName(), ",") + if len(parts) == 1 { + continue + } + var ff *flag.Flag + for _, name := range parts { + name = strings.Trim(name, " ") + if visited[name] { + if ff != nil { + return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name) + } + ff = set.Lookup(name) + } + } + if ff == nil { + continue + } + for _, name := range parts { + name = strings.Trim(name, " ") + if !visited[name] { + copyFlag(name, ff, set) + } + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/context_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/context_test.go new file mode 100644 index 00000000000..89041b99d40 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/context_test.go @@ -0,0 +1,68 @@ +package cli_test + +import ( + "flag" + "github.com/codegangsta/cli" + "testing" +) + +func TestNewContext(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Int("myflag", 12, "doc") + globalSet := flag.NewFlagSet("test", 0) + globalSet.Int("myflag", 42, "doc") + command := cli.Command{Name: "mycommand"} + c := cli.NewContext(nil, set, globalSet) + c.Command = command + expect(t, c.Int("myflag"), 12) + expect(t, c.GlobalInt("myflag"), 42) + expect(t, c.Command.Name, "mycommand") +} + +func TestContext_Int(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Int("myflag", 12, "doc") + c := cli.NewContext(nil, set, set) + expect(t, c.Int("myflag"), 12) +} + +func TestContext_String(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.String("myflag", "hello world", "doc") + c := cli.NewContext(nil, set, set) + expect(t, c.String("myflag"), "hello world") +} + +func TestContext_Bool(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Bool("myflag", false, "doc") + c := cli.NewContext(nil, set, set) + expect(t, c.Bool("myflag"), false) +} + +func TestContext_BoolT(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Bool("myflag", true, "doc") + c := cli.NewContext(nil, set, set) + expect(t, c.BoolT("myflag"), true) +} + +func TestContext_Args(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Bool("myflag", false, "doc") + c := cli.NewContext(nil, set, set) + set.Parse([]string{"--myflag", "bat", "baz"}) + expect(t, len(c.Args()), 2) + expect(t, c.Bool("myflag"), true) +} + +func TestContext_IsSet(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Bool("myflag", false, "doc") + set.String("otherflag", "hello world", "doc") + c := cli.NewContext(nil, set, set) + set.Parse([]string{"--myflag", "bat", "baz"}) + expect(t, c.IsSet("myflag"), true) + expect(t, c.IsSet("otherflag"), false) + expect(t, c.IsSet("bogusflag"), false) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/flag.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/flag.go new file mode 100644 index 00000000000..e6f8838a9d1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/flag.go @@ -0,0 +1,280 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" + "strings" +) + +// This flag enables bash-completion for all commands and subcommands +var BashCompletionFlag = BoolFlag{"generate-bash-completion", ""} + +// This flag prints the version for the application +var VersionFlag = BoolFlag{"version, v", "print the version"} + +// This flag prints the help for all commands and subcommands +var HelpFlag = BoolFlag{"help, h", "show help"} + +// Flag is a common interface related to parsing flags in cli. +// For more advanced flag parsing techniques, it is recomended that +// this interface be implemented. +type Flag interface { + fmt.Stringer + // Apply Flag settings to the given flag set + Apply(*flag.FlagSet) + getName() string +} + +func flagSet(name string, flags []Flag) *flag.FlagSet { + set := flag.NewFlagSet(name, flag.ContinueOnError) + + for _, f := range flags { + f.Apply(set) + } + return set +} + +func eachName(longName string, fn func(string)) { + parts := strings.Split(longName, ",") + for _, name := range parts { + name = strings.Trim(name, " ") + fn(name) + } +} + +// Generic is a generic parseable type identified by a specific flag +type Generic interface { + Set(value string) error + String() string +} + +// GenericFlag is the flag type for types implementing Generic +type GenericFlag struct { + Name string + Value Generic + Usage string +} + +func (f GenericFlag) String() string { + return fmt.Sprintf("%s%s %v\t`%v` %s", prefixFor(f.Name), f.Name, f.Value, "-"+f.Name+" option -"+f.Name+" option", f.Usage) +} + +func (f GenericFlag) Apply(set *flag.FlagSet) { + eachName(f.Name, func(name string) { + set.Var(f.Value, name, f.Usage) + }) +} + +func (f GenericFlag) getName() string { + return f.Name +} + +type StringSlice []string + +func (f *StringSlice) Set(value string) error { + *f = append(*f, value) + return nil +} + +func (f *StringSlice) String() string { + return fmt.Sprintf("%s", *f) +} + +func (f *StringSlice) Value() []string { + return *f +} + +type StringSliceFlag struct { + Name string + Value *StringSlice + Usage string +} + +func (f StringSliceFlag) String() string { + firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") + pref := prefixFor(firstName) + return fmt.Sprintf("%s '%v'\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage) +} + +func (f StringSliceFlag) Apply(set *flag.FlagSet) { + eachName(f.Name, func(name string) { + set.Var(f.Value, name, f.Usage) + }) +} + +func (f StringSliceFlag) getName() string { + return f.Name +} + +type IntSlice []int + +func (f *IntSlice) Set(value string) error { + + tmp, err := strconv.Atoi(value) + if err != nil { + return err + } else { + *f = append(*f, tmp) + } + return nil +} + +func (f *IntSlice) String() string { + return fmt.Sprintf("%d", *f) +} + +func (f *IntSlice) Value() []int { + return *f +} + +type IntSliceFlag struct { + Name string + Value *IntSlice + Usage string +} + +func (f IntSliceFlag) String() string { + firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") + pref := prefixFor(firstName) + return fmt.Sprintf("%s '%v'\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage) +} + +func (f IntSliceFlag) Apply(set *flag.FlagSet) { + eachName(f.Name, func(name string) { + set.Var(f.Value, name, f.Usage) + }) +} + +func (f IntSliceFlag) getName() string { + return f.Name +} + +type BoolFlag struct { + Name string + Usage string +} + +func (f BoolFlag) String() string { + return fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage) +} + +func (f BoolFlag) Apply(set *flag.FlagSet) { + eachName(f.Name, func(name string) { + set.Bool(name, false, f.Usage) + }) +} + +func (f BoolFlag) getName() string { + return f.Name +} + +type BoolTFlag struct { + Name string + Usage string +} + +func (f BoolTFlag) String() string { + return fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage) +} + +func (f BoolTFlag) Apply(set *flag.FlagSet) { + eachName(f.Name, func(name string) { + set.Bool(name, true, f.Usage) + }) +} + +func (f BoolTFlag) getName() string { + return f.Name +} + +type StringFlag struct { + Name string + Value string + Usage string +} + +func (f StringFlag) String() string { + var fmtString string + fmtString = "%s %v\t%v" + + if len(f.Value) > 0 { + fmtString = "%s '%v'\t%v" + } else { + fmtString = "%s %v\t%v" + } + + return fmt.Sprintf(fmtString, prefixedNames(f.Name), f.Value, f.Usage) +} + +func (f StringFlag) Apply(set *flag.FlagSet) { + eachName(f.Name, func(name string) { + set.String(name, f.Value, f.Usage) + }) +} + +func (f StringFlag) getName() string { + return f.Name +} + +type IntFlag struct { + Name string + Value int + Usage string +} + +func (f IntFlag) String() string { + return fmt.Sprintf("%s '%v'\t%v", prefixedNames(f.Name), f.Value, f.Usage) +} + +func (f IntFlag) Apply(set *flag.FlagSet) { + eachName(f.Name, func(name string) { + set.Int(name, f.Value, f.Usage) + }) +} + +func (f IntFlag) getName() string { + return f.Name +} + +type Float64Flag struct { + Name string + Value float64 + Usage string +} + +func (f Float64Flag) String() string { + return fmt.Sprintf("%s '%v'\t%v", prefixedNames(f.Name), f.Value, f.Usage) +} + +func (f Float64Flag) Apply(set *flag.FlagSet) { + eachName(f.Name, func(name string) { + set.Float64(name, f.Value, f.Usage) + }) +} + +func (f Float64Flag) getName() string { + return f.Name +} + +func prefixFor(name string) (prefix string) { + if len(name) == 1 { + prefix = "-" + } else { + prefix = "--" + } + + return +} + +func prefixedNames(fullName string) (prefixed string) { + parts := strings.Split(fullName, ",") + for i, name := range parts { + name = strings.Trim(name, " ") + prefixed += prefixFor(name) + name + if i < len(parts)-1 { + prefixed += ", " + } + } + return +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/flag_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/flag_test.go new file mode 100644 index 00000000000..1c05f0144d4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/flag_test.go @@ -0,0 +1,194 @@ +package cli_test + +import ( + "github.com/codegangsta/cli" + + "fmt" + "reflect" + "strings" + "testing" +) + +var boolFlagTests = []struct { + name string + expected string +}{ + {"help", "--help\t"}, + {"h", "-h\t"}, +} + +func TestBoolFlagHelpOutput(t *testing.T) { + + for _, test := range boolFlagTests { + flag := cli.BoolFlag{Name: test.name} + output := flag.String() + + if output != test.expected { + t.Errorf("%s does not match %s", output, test.expected) + } + } +} + +var stringFlagTests = []struct { + name string + value string + expected string +}{ + {"help", "", "--help \t"}, + {"h", "", "-h \t"}, + {"h", "", "-h \t"}, + {"test", "Something", "--test 'Something'\t"}, +} + +func TestStringFlagHelpOutput(t *testing.T) { + + for _, test := range stringFlagTests { + flag := cli.StringFlag{Name: test.name, Value: test.value} + output := flag.String() + + if output != test.expected { + t.Errorf("%s does not match %s", output, test.expected) + } + } +} + +var intFlagTests = []struct { + name string + expected string +}{ + {"help", "--help '0'\t"}, + {"h", "-h '0'\t"}, +} + +func TestIntFlagHelpOutput(t *testing.T) { + + for _, test := range intFlagTests { + flag := cli.IntFlag{Name: test.name} + output := flag.String() + + if output != test.expected { + t.Errorf("%s does not match %s", output, test.expected) + } + } +} + +var float64FlagTests = []struct { + name string + expected string +}{ + {"help", "--help '0'\t"}, + {"h", "-h '0'\t"}, +} + +func TestFloat64FlagHelpOutput(t *testing.T) { + + for _, test := range float64FlagTests { + flag := cli.Float64Flag{Name: test.name} + output := flag.String() + + if output != test.expected { + t.Errorf("%s does not match %s", output, test.expected) + } + } +} + +func TestParseMultiString(t *testing.T) { + (&cli.App{ + Flags: []cli.Flag{ + cli.StringFlag{Name: "serve, s"}, + }, + Action: func(ctx *cli.Context) { + if ctx.String("serve") != "10" { + t.Errorf("main name not set") + } + if ctx.String("s") != "10" { + t.Errorf("short name not set") + } + }, + }).Run([]string{"run", "-s", "10"}) +} + +func TestParseMultiStringSlice(t *testing.T) { + (&cli.App{ + Flags: []cli.Flag{ + cli.StringSliceFlag{Name: "serve, s", Value: &cli.StringSlice{}}, + }, + Action: func(ctx *cli.Context) { + if !reflect.DeepEqual(ctx.StringSlice("serve"), []string{"10", "20"}) { + t.Errorf("main name not set") + } + if !reflect.DeepEqual(ctx.StringSlice("s"), []string{"10", "20"}) { + t.Errorf("short name not set") + } + }, + }).Run([]string{"run", "-s", "10", "-s", "20"}) +} + +func TestParseMultiInt(t *testing.T) { + a := cli.App{ + Flags: []cli.Flag{ + cli.IntFlag{Name: "serve, s"}, + }, + Action: func(ctx *cli.Context) { + if ctx.Int("serve") != 10 { + t.Errorf("main name not set") + } + if ctx.Int("s") != 10 { + t.Errorf("short name not set") + } + }, + } + a.Run([]string{"run", "-s", "10"}) +} + +func TestParseMultiBool(t *testing.T) { + a := cli.App{ + Flags: []cli.Flag{ + cli.BoolFlag{Name: "serve, s"}, + }, + Action: func(ctx *cli.Context) { + if ctx.Bool("serve") != true { + t.Errorf("main name not set") + } + if ctx.Bool("s") != true { + t.Errorf("short name not set") + } + }, + } + a.Run([]string{"run", "--serve"}) +} + +type Parser [2]string + +func (p *Parser) Set(value string) error { + parts := strings.Split(value, ",") + if len(parts) != 2 { + return fmt.Errorf("invalid format") + } + + (*p)[0] = parts[0] + (*p)[1] = parts[1] + + return nil +} + +func (p *Parser) String() string { + return fmt.Sprintf("%s,%s", p[0], p[1]) +} + +func TestParseGeneric(t *testing.T) { + a := cli.App{ + Flags: []cli.Flag{ + cli.GenericFlag{Name: "serve, s", Value: &Parser{}}, + }, + Action: func(ctx *cli.Context) { + if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"10", "20"}) { + t.Errorf("main name not set") + } + if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"10", "20"}) { + t.Errorf("short name not set") + } + }, + } + a.Run([]string{"run", "-s", "10,20"}) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/help.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/help.go new file mode 100644 index 00000000000..ccca036276b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/help.go @@ -0,0 +1,213 @@ +package cli + +import ( + "fmt" + "os" + "text/tabwriter" + "text/template" +) + +// The text template for the Default help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var AppHelpTemplate = `NAME: + {{.Name}} - {{.Usage}} + +USAGE: + {{.Name}} {{ if .Flags }}[global options] {{ end }}command{{ if .Flags }} [command options]{{ end }} [arguments...] + +VERSION: + {{.Version}} + +COMMANDS: + {{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}} + {{end}}{{ if .Flags }} +GLOBAL OPTIONS: + {{range .Flags}}{{.}} + {{end}}{{ end }} +` + +// The text template for the command help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var CommandHelpTemplate = `NAME: + {{.Name}} - {{.Usage}} + +USAGE: + command {{.Name}}{{ if .Flags }} [command options]{{ end }} [arguments...] + +DESCRIPTION: + {{.Description}}{{ if .Flags }} + +OPTIONS: + {{range .Flags}}{{.}} + {{end}}{{ end }} +` + +// The text template for the subcommand help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var SubcommandHelpTemplate = `NAME: + {{.Name}} - {{.Usage}} + +USAGE: + {{.Name}} command{{ if .Flags }} [command options]{{ end }} [arguments...] + +COMMANDS: + {{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}} + {{end}}{{ if .Flags }} +OPTIONS: + {{range .Flags}}{{.}} + {{end}}{{ end }} +` + +var helpCommand = Command{ + Name: "help", + ShortName: "h", + Usage: "Shows a list of commands or help for one command", + Action: func(c *Context) { + args := c.Args() + if args.Present() { + ShowCommandHelp(c, args.First()) + } else { + ShowAppHelp(c) + } + }, +} + +var helpSubcommand = Command{ + Name: "help", + ShortName: "h", + Usage: "Shows a list of commands or help for one command", + Action: func(c *Context) { + args := c.Args() + if args.Present() { + ShowCommandHelp(c, args.First()) + } else { + ShowSubcommandHelp(c) + } + }, +} + +// Prints help for the App +var HelpPrinter = printHelp + +func ShowAppHelp(c *Context) { + HelpPrinter(AppHelpTemplate, c.App) +} + +// Prints the list of subcommands as the default app completion method +func DefaultAppComplete(c *Context) { + for _, command := range c.App.Commands { + fmt.Println(command.Name) + if command.ShortName != "" { + fmt.Println(command.ShortName) + } + } +} + +// Prints help for the given command +func ShowCommandHelp(c *Context, command string) { + for _, c := range c.App.Commands { + if c.HasName(command) { + HelpPrinter(CommandHelpTemplate, c) + return + } + } + + if c.App.CommandNotFound != nil { + c.App.CommandNotFound(c, command) + } else { + fmt.Printf("No help topic for '%v'\n", command) + } +} + +// Prints help for the given subcommand +func ShowSubcommandHelp(c *Context) { + HelpPrinter(SubcommandHelpTemplate, c.App) +} + +// Prints the version number of the App +func ShowVersion(c *Context) { + fmt.Printf("%v version %v\n", c.App.Name, c.App.Version) +} + +// Prints the lists of commands within a given context +func ShowCompletions(c *Context) { + a := c.App + if a != nil && a.BashComplete != nil { + a.BashComplete(c) + } +} + +// Prints the custom completions for a given command +func ShowCommandCompletions(ctx *Context, command string) { + c := ctx.App.Command(command) + if c != nil && c.BashComplete != nil { + c.BashComplete(ctx) + } +} + +func printHelp(templ string, data interface{}) { + w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0) + t := template.Must(template.New("help").Parse(templ)) + err := t.Execute(w, data) + if err != nil { + panic(err) + } + w.Flush() +} + +func checkVersion(c *Context) bool { + if c.GlobalBool("version") { + ShowVersion(c) + return true + } + + return false +} + +func checkHelp(c *Context) bool { + if c.GlobalBool("h") || c.GlobalBool("help") { + ShowAppHelp(c) + return true + } + + return false +} + +func checkCommandHelp(c *Context, name string) bool { + if c.Bool("h") || c.Bool("help") { + ShowCommandHelp(c, name) + return true + } + + return false +} + +func checkSubcommandHelp(c *Context) bool { + if c.GlobalBool("h") || c.GlobalBool("help") { + ShowSubcommandHelp(c) + return true + } + + return false +} + +func checkCompletions(c *Context) bool { + if c.GlobalBool(BashCompletionFlag.Name) && c.App.EnableBashCompletion { + ShowCompletions(c) + return true + } + + return false +} + +func checkCommandCompletions(c *Context, name string) bool { + if c.Bool(BashCompletionFlag.Name) && c.App.EnableBashCompletion { + ShowCommandCompletions(c, name) + return true + } + + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/helpers_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/helpers_test.go new file mode 100644 index 00000000000..cdc4feb2fcd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/codegangsta/cli/helpers_test.go @@ -0,0 +1,19 @@ +package cli_test + +import ( + "reflect" + "testing" +) + +/* Test Helpers */ +func expect(t *testing.T, a interface{}, b interface{}) { + if a != b { + t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) + } +} + +func refute(t *testing.T, a interface{}, b interface{}) { + if a == b { + t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/.travis.yml b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/.travis.yml new file mode 100644 index 00000000000..8c9f56e44a6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: 1.2 + +install: + - echo "Skip install" + +script: + - ./test diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/LICENSE b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 00000000000..37ec93a14fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/README.md b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/README.md new file mode 100644 index 00000000000..0ee09fec0ab --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/README.md @@ -0,0 +1,44 @@ +# go-systemd + +Go bindings to systemd. The project has three packages: + +- activation - for writing and using socket activation from Go +- journal - for writing to systemd's logging service, journal +- dbus - for starting/stopping/inspecting running services and units + +Go docs for the entire project are here: + +http://godoc.org/github.com/coreos/go-systemd + +## Socket Activation + +An example HTTP server using socket activation can be quickly setup by +following this README on a Linux machine running systemd: + +https://github.com/coreos/go-systemd/tree/master/examples/activation/httpserver + +## Journal + +Using this package you can submit journal entries directly to systemd's journal taking advantage of features like indexed key/value pairs for each log entry. + +## D-Bus + +The D-Bus API lets you start, stop and introspect systemd units. The API docs are here: + +http://godoc.org/github.com/coreos/go-systemd/dbus + +### Debugging + +Create `/etc/dbus-1/system-local.conf` that looks like this: + +``` + + + + + + + +``` diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/activation/files.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/activation/files.go new file mode 100644 index 00000000000..74b4fc10f3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/activation/files.go @@ -0,0 +1,56 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package activation implements primitives for systemd socket activation. +package activation + +import ( + "os" + "strconv" + "syscall" +) + +// based on: https://gist.github.com/alberts/4640792 +const ( + listenFdsStart = 3 +) + +func Files(unsetEnv bool) []*os.File { + if unsetEnv { + // there is no way to unset env in golang os package for now + // https://code.google.com/p/go/issues/detail?id=6423 + defer os.Setenv("LISTEN_PID", "") + defer os.Setenv("LISTEN_FDS", "") + } + + pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) + if err != nil || pid != os.Getpid() { + return nil + } + + nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) + if err != nil || nfds == 0 { + return nil + } + + var files []*os.File + for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { + syscall.CloseOnExec(fd) + files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd))) + } + + return files +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/activation/files_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/activation/files_test.go new file mode 100644 index 00000000000..a1c6948fb2c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/activation/files_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package activation + +import ( + "bytes" + "io" + "os" + "os/exec" + "testing" +) + +// correctStringWritten fails the text if the correct string wasn't written +// to the other side of the pipe. +func correctStringWritten(t *testing.T, r *os.File, expected string) bool { + bytes := make([]byte, len(expected)) + io.ReadAtLeast(r, bytes, len(expected)) + + if string(bytes) != expected { + t.Fatalf("Unexpected string %s", string(bytes)) + } + + return true +} + +// TestActivation forks out a copy of activation.go example and reads back two +// strings from the pipes that are passed in. +func TestActivation(t *testing.T) { + cmd := exec.Command("go", "run", "../examples/activation/activation.go") + + r1, w1, _ := os.Pipe() + r2, w2, _ := os.Pipe() + cmd.ExtraFiles = []*os.File{ + w1, + w2, + } + + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "LISTEN_FDS=2", "FIX_LISTEN_PID=1") + + err := cmd.Run() + if err != nil { + t.Fatalf(err.Error()) + } + + correctStringWritten(t, r1, "Hello world") + correctStringWritten(t, r2, "Goodbye world") +} + +func TestActivationNoFix(t *testing.T) { + cmd := exec.Command("go", "run", "../examples/activation/activation.go") + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "LISTEN_FDS=2") + + out, _ := cmd.CombinedOutput() + if bytes.Contains(out, []byte("No files")) == false { + t.Fatalf("Child didn't error out as expected") + } +} + +func TestActivationNoFiles(t *testing.T) { + cmd := exec.Command("go", "run", "../examples/activation/activation.go") + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "LISTEN_FDS=0", "FIX_LISTEN_PID=1") + + out, _ := cmd.CombinedOutput() + if bytes.Contains(out, []byte("No files")) == false { + t.Fatalf("Child didn't error out as expected") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/activation/listeners.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/activation/listeners.go new file mode 100644 index 00000000000..cdb2cf4bb4f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/activation/listeners.go @@ -0,0 +1,38 @@ +/* +Copyright 2014 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package activation + +import ( + "fmt" + "net" +) + +// Listeners returns net.Listeners for all socket activated fds passed to this process. +func Listeners(unsetEnv bool) ([]net.Listener, error) { + files := Files(unsetEnv) + listeners := make([]net.Listener, len(files)) + + for i, f := range files { + var err error + listeners[i], err = net.FileListener(f) + if err != nil { + return nil, fmt.Errorf("Error setting up FileListener for fd %d: %s", f.Fd(), err.Error()) + } + } + + return listeners, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/activation/listeners_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/activation/listeners_test.go new file mode 100644 index 00000000000..c3627d6d4d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/activation/listeners_test.go @@ -0,0 +1,88 @@ +/* +Copyright 2014 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package activation + +import ( + "io" + "net" + "os" + "os/exec" + "testing" +) + +// correctStringWritten fails the text if the correct string wasn't written +// to the other side of the pipe. +func correctStringWrittenNet(t *testing.T, r net.Conn, expected string) bool { + bytes := make([]byte, len(expected)) + io.ReadAtLeast(r, bytes, len(expected)) + + if string(bytes) != expected { + t.Fatalf("Unexpected string %s", string(bytes)) + } + + return true +} + +// TestActivation forks out a copy of activation.go example and reads back two +// strings from the pipes that are passed in. +func TestListeners(t *testing.T) { + cmd := exec.Command("go", "run", "../examples/activation/listen.go") + + l1, err := net.Listen("tcp", ":9999") + if err != nil { + t.Fatalf(err.Error()) + } + l2, err := net.Listen("tcp", ":1234") + if err != nil { + t.Fatalf(err.Error()) + } + + t1 := l1.(*net.TCPListener) + t2 := l2.(*net.TCPListener) + + f1, _ := t1.File() + f2, _ := t2.File() + + cmd.ExtraFiles = []*os.File{ + f1, + f2, + } + + r1, err := net.Dial("tcp", "127.0.0.1:9999") + if err != nil { + t.Fatalf(err.Error()) + } + r1.Write([]byte("Hi")) + + r2, err := net.Dial("tcp", "127.0.0.1:1234") + if err != nil { + t.Fatalf(err.Error()) + } + r2.Write([]byte("Hi")) + + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "LISTEN_FDS=2", "FIX_LISTEN_PID=1") + + out, err := cmd.Output() + if err != nil { + println(string(out)) + t.Fatalf(err.Error()) + } + + correctStringWrittenNet(t, r1, "Hello world") + correctStringWrittenNet(t, r2, "Goodbye world") +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/dbus.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/dbus.go new file mode 100644 index 00000000000..91d71121451 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/dbus.go @@ -0,0 +1,104 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ +package dbus + +import ( + "os" + "strconv" + "strings" + "sync" + + "github.com/godbus/dbus" +) + +const signalBuffer = 100 + +// ObjectPath creates a dbus.ObjectPath using the rules that systemd uses for +// serializing special characters. +func ObjectPath(path string) dbus.ObjectPath { + path = strings.Replace(path, ".", "_2e", -1) + path = strings.Replace(path, "-", "_2d", -1) + path = strings.Replace(path, "@", "_40", -1) + + return dbus.ObjectPath(path) +} + +// Conn is a connection to systemds dbus endpoint. +type Conn struct { + sysconn *dbus.Conn + sysobj *dbus.Object + jobListener struct { + jobs map[dbus.ObjectPath]chan string + sync.Mutex + } + subscriber struct { + updateCh chan<- *SubStateUpdate + errCh chan<- error + sync.Mutex + ignore map[dbus.ObjectPath]int64 + cleanIgnore int64 + } + dispatch map[string]func(dbus.Signal) +} + +// New() establishes a connection to the system bus and authenticates. +func New() (*Conn, error) { + c := new(Conn) + + if err := c.initConnection(); err != nil { + return nil, err + } + + c.initJobs() + return c, nil +} + +func (c *Conn) initConnection() error { + var err error + c.sysconn, err = dbus.SystemBusPrivate() + if err != nil { + return err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = c.sysconn.Auth(methods) + if err != nil { + c.sysconn.Close() + return err + } + + err = c.sysconn.Hello() + if err != nil { + c.sysconn.Close() + return err + } + + c.sysobj = c.sysconn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) + + // Setup the listeners on jobs so that we can get completions + c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") + c.initSubscription() + c.initDispatch() + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/dbus_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/dbus_test.go new file mode 100644 index 00000000000..2e80f73ef74 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/dbus_test.go @@ -0,0 +1,41 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbus + +import ( + "testing" +) + +// TestObjectPath ensures path encoding of the systemd rules works. +func TestObjectPath(t *testing.T) { + input := "/silly-path/to@a/unit..service" + output := ObjectPath(input) + expected := "/silly_2dpath/to_40a/unit_2e_2eservice" + + if string(output) != expected { + t.Fatalf("Output '%s' did not match expected '%s'", output, expected) + } +} + +// TestNew ensures that New() works without errors. +func TestNew(t *testing.T) { + _, err := New() + + if err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/methods.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/methods.go new file mode 100644 index 00000000000..a60de059e66 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/methods.go @@ -0,0 +1,396 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbus + +import ( + "errors" + "github.com/godbus/dbus" +) + +func (c *Conn) initJobs() { + c.jobListener.jobs = make(map[dbus.ObjectPath]chan string) +} + +func (c *Conn) jobComplete(signal *dbus.Signal) { + var id uint32 + var job dbus.ObjectPath + var unit string + var result string + dbus.Store(signal.Body, &id, &job, &unit, &result) + c.jobListener.Lock() + out, ok := c.jobListener.jobs[job] + if ok { + out <- result + delete(c.jobListener.jobs, job) + } + c.jobListener.Unlock() +} + +func (c *Conn) startJob(job string, args ...interface{}) (<-chan string, error) { + c.jobListener.Lock() + defer c.jobListener.Unlock() + + ch := make(chan string, 1) + var path dbus.ObjectPath + err := c.sysobj.Call(job, 0, args...).Store(&path) + if err != nil { + return nil, err + } + c.jobListener.jobs[path] = ch + return ch, nil +} + +func (c *Conn) runJob(job string, args ...interface{}) (string, error) { + respCh, err := c.startJob(job, args...) + if err != nil { + return "", err + } + return <-respCh, nil +} + +// StartUnit enqeues a start job and depending jobs, if any (unless otherwise +// specified by the mode string). +// +// Takes the unit to activate, plus a mode string. The mode needs to be one of +// replace, fail, isolate, ignore-dependencies, ignore-requirements. If +// "replace" the call will start the unit and its dependencies, possibly +// replacing already queued jobs that conflict with this. If "fail" the call +// will start the unit and its dependencies, but will fail if this would change +// an already queued job. If "isolate" the call will start the unit in question +// and terminate all units that aren't dependencies of it. If +// "ignore-dependencies" it will start a unit but ignore all its dependencies. +// If "ignore-requirements" it will start a unit but only ignore the +// requirement dependencies. It is not recommended to make use of the latter +// two options. +// +// Result string: one of done, canceled, timeout, failed, dependency, skipped. +// done indicates successful execution of a job. canceled indicates that a job +// has been canceled before it finished execution. timeout indicates that the +// job timeout was reached. failed indicates that the job failed. dependency +// indicates that a job this job has been depending on failed and the job hence +// has been removed too. skipped indicates that a job was skipped because it +// didn't apply to the units current state. +func (c *Conn) StartUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.StartUnit", name, mode) +} + +// StopUnit is similar to StartUnit but stops the specified unit rather +// than starting it. +func (c *Conn) StopUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.StopUnit", name, mode) +} + +// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. +func (c *Conn) ReloadUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +} + +// RestartUnit restarts a service. If a service is restarted that isn't +// running it will be started. +func (c *Conn) RestartUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +} + +// TryRestartUnit is like RestartUnit, except that a service that isn't running +// is not affected by the restart. +func (c *Conn) TryRestartUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) +} + +// ReloadOrRestart attempts a reload if the unit supports it and use a restart +// otherwise. +func (c *Conn) ReloadOrRestartUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +} + +// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try" +// flavored restart otherwise. +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) +} + +// StartTransientUnit() may be used to create and start a transient unit, which +// will be released as soon as it is not running or referenced anymore or the +// system is rebooted. name is the unit name including suffix, and must be +// unique. mode is the same as in StartUnit(), properties contains properties +// of the unit. +func (c *Conn) StartTransientUnit(name string, mode string, properties ...Property) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +} + +// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's +// processes are killed. +func (c *Conn) KillUnit(name string, signal int32) { + c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() +} + +// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface +func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) { + var err error + var props map[string]dbus.Variant + + path := ObjectPath("/org/freedesktop/systemd1/unit/" + unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) + if err != nil { + return nil, err + } + + out := make(map[string]interface{}, len(props)) + for k, v := range props { + out[k] = v.Value() + } + + return out, nil +} + +// GetUnitProperties takes the unit name and returns all of its dbus object properties. +func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { + return c.getProperties(unit, "org.freedesktop.systemd1.Unit") +} + +func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) { + var err error + var prop dbus.Variant + + path := ObjectPath("/org/freedesktop/systemd1/unit/" + unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: propertyName, Value: prop}, nil +} + +func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName) +} + +// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope +// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit +func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { + return c.getProperties(unit, "org.freedesktop.systemd1."+unitType) +} + +// SetUnitProperties() may be used to modify certain unit properties at runtime. +// Not all properties may be changed at runtime, but many resource management +// settings (primarily those in systemd.cgroup(5)) may. The changes are applied +// instantly, and stored on disk for future boots, unless runtime is true, in which +// case the settings only apply until the next reboot. name is the name of the unit +// to modify. properties are the settings to set, encoded as an array of property +// name and value pairs. +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() +} + +func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1." + unitType, propertyName) +} + +// ListUnits returns an array with all currently loaded units. Note that +// units may be known by multiple names at the same time, and hence there might +// be more unit names loaded than actual units behind them. +func (c *Conn) ListUnits() ([]UnitStatus, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]UnitStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + err = dbus.Store(resultInterface, statusInterface...) + if err != nil { + return nil, err + } + + return status, nil +} + +type UnitStatus struct { + Name string // The primary unit name as string + Description string // The human readable description string + LoadState string // The load state (i.e. whether the unit file has been loaded successfully) + ActiveState string // The active state (i.e. whether the unit is currently started or not) + SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) + Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. + Path dbus.ObjectPath // The unit object path + JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise + JobType string // The job type as string + JobPath dbus.ObjectPath // The job object path +} + +type LinkUnitFileChange EnableUnitFileChange + +// LinkUnitFiles() links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +// EnableUnitFiles() may be used to enable one or more units in the system (by +// creating symlinks to them in /etc or /run). +// +// It takes a list of unit files to enable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and two booleans: the first controls whether the unit shall +// be enabled for runtime only (true, /run), or persistently (false, /etc). +// The second one controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns one boolean and an array with the changes made. The +// boolean signals whether the unit files contained any enablement +// information (i.e. an [Install]) section. The changes list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + var carries_install_info bool + + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) + if err != nil { + return false, nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]EnableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return false, nil, err + } + + return carries_install_info, changes, nil +} + +type EnableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// DisableUnitFiles() may be used to disable one or more units in the system (by +// removing symlinks to them from /etc or /run). +// +// It takes a list of unit files to disable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and one boolean: whether the unit was enabled for runtime +// only (true, /run), or persistently (false, /etc). +// +// This call returns an array with the changes made. The changes list +// consists of structures with three strings: the type of the change (one of +// symlink or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]DisableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type DisableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Reload instructs systemd to scan for and reload unit files. This is +// equivalent to a 'systemctl daemon-reload'. +func (c *Conn) Reload() error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go new file mode 100644 index 00000000000..8c7ab93eb36 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go @@ -0,0 +1,332 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbus + +import ( + "fmt" + "math/rand" + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/godbus/dbus" +) + +func setupConn(t *testing.T) *Conn { + conn, err := New() + if err != nil { + t.Fatal(err) + } + + return conn +} + +func findFixture(target string, t *testing.T) string { + abs, err := filepath.Abs("../fixtures/" + target) + if err != nil { + t.Fatal(err) + } + return abs +} + +func setupUnit(target string, conn *Conn, t *testing.T) { + // Blindly stop the unit in case it is running + conn.StopUnit(target, "replace") + + // Blindly remove the symlink in case it exists + targetRun := filepath.Join("/run/systemd/system/", target) + os.Remove(targetRun) +} + +func linkUnit(target string, conn *Conn, t *testing.T) { + abs := findFixture(target, t) + fixture := []string{abs} + + changes, err := conn.LinkUnitFiles(fixture, true, true) + if err != nil { + t.Fatal(err) + } + + if len(changes) < 1 { + t.Fatalf("Expected one change, got %v", changes) + } + + runPath := filepath.Join("/run/systemd/system/", target) + if changes[0].Filename != runPath { + t.Fatal("Unexpected target filename") + } +} + +// Ensure that basic unit starting and stopping works. +func TestStartStopUnit(t *testing.T) { + target := "start-stop.service" + conn := setupConn(t) + + setupUnit(target, conn, t) + linkUnit(target, conn, t) + + // 2. Start the unit + job, err := conn.StartUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + if job != "done" { + t.Fatal("Job is not done:", job) + } + + units, err := conn.ListUnits() + + var unit *UnitStatus + for _, u := range units { + if u.Name == target { + unit = &u + } + } + + if unit == nil { + t.Fatalf("Test unit not found in list") + } + + if unit.ActiveState != "active" { + t.Fatalf("Test unit not active") + } + + // 3. Stop the unit + job, err = conn.StopUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + units, err = conn.ListUnits() + + unit = nil + for _, u := range units { + if u.Name == target { + unit = &u + } + } + + if unit != nil { + t.Fatalf("Test unit found in list, should be stopped") + } +} + +// Enables a unit and then immediately tears it down +func TestEnableDisableUnit(t *testing.T) { + target := "enable-disable.service" + conn := setupConn(t) + + setupUnit(target, conn, t) + abs := findFixture(target, t) + runPath := filepath.Join("/run/systemd/system/", target) + + // 1. Enable the unit + install, changes, err := conn.EnableUnitFiles([]string{abs}, true, true) + if err != nil { + t.Fatal(err) + } + + if install != false { + t.Fatal("Install was true") + } + + if len(changes) < 1 { + t.Fatalf("Expected one change, got %v", changes) + } + + if changes[0].Filename != runPath { + t.Fatal("Unexpected target filename") + } + + // 2. Disable the unit + dChanges, err := conn.DisableUnitFiles([]string{abs}, true) + if err != nil { + t.Fatal(err) + } + + if len(dChanges) != 1 { + t.Fatalf("Changes should include the path, %v", dChanges) + } + if dChanges[0].Filename != runPath { + t.Fatalf("Change should include correct filename, %+v", dChanges[0]) + } + if dChanges[0].Destination != "" { + t.Fatalf("Change destination should be empty, %+v", dChanges[0]) + } +} + +// TestGetUnitProperties reads the `-.mount` which should exist on all systemd +// systems and ensures that one of its properties is valid. +func TestGetUnitProperties(t *testing.T) { + conn := setupConn(t) + + unit := "-.mount" + + info, err := conn.GetUnitProperties(unit) + if err != nil { + t.Fatal(err) + } + + names := info["Wants"].([]string) + + if len(names) < 1 { + t.Fatal("/ is unwanted") + } + + if names[0] != "system.slice" { + t.Fatal("unexpected wants for /") + } + + prop, err := conn.GetUnitProperty(unit, "Wants") + if err != nil { + t.Fatal(err) + } + + if prop.Name != "Wants" { + t.Fatal("unexpected property name") + } + + val := prop.Value.Value().([]string) + if !reflect.DeepEqual(val, names) { + t.Fatal("unexpected property value") + } +} + +// TestGetUnitPropertiesRejectsInvalidName attempts to get the properties for a +// unit with an invalid name. This test should be run with --test.timeout set, +// as a fail will manifest as GetUnitProperties hanging indefinitely. +func TestGetUnitPropertiesRejectsInvalidName(t *testing.T) { + conn := setupConn(t) + + unit := "//invalid#$^/" + + _, err := conn.GetUnitProperties(unit) + if err == nil { + t.Fatal("Expected an error, got nil") + } + + _, err = conn.GetUnitProperty(unit, "Wants") + if err == nil { + t.Fatal("Expected an error, got nil") + } +} + +// TestSetUnitProperties changes a cgroup setting on the `tmp.mount` +// which should exist on all systemd systems and ensures that the +// property was set. +func TestSetUnitProperties(t *testing.T) { + conn := setupConn(t) + + unit := "tmp.mount" + + if err := conn.SetUnitProperties(unit, true, Property{"CPUShares", dbus.MakeVariant(uint64(1023))}); err != nil { + t.Fatal(err) + } + + info, err := conn.GetUnitTypeProperties(unit, "Mount") + if err != nil { + t.Fatal(err) + } + + value := info["CPUShares"].(uint64) + if value != 1023 { + t.Fatal("CPUShares of unit is not 1023:", value) + } +} + +// Ensure that basic transient unit starting and stopping works. +func TestStartStopTransientUnit(t *testing.T) { + conn := setupConn(t) + + props := []Property{ + PropExecStart([]string{"/bin/sleep", "400"}, false), + } + target := fmt.Sprintf("testing-transient-%d.service", rand.Int()) + + // Start the unit + job, err := conn.StartTransientUnit(target, "replace", props...) + if err != nil { + t.Fatal(err) + } + + if job != "done" { + t.Fatal("Job is not done:", job) + } + + units, err := conn.ListUnits() + + var unit *UnitStatus + for _, u := range units { + if u.Name == target { + unit = &u + } + } + + if unit == nil { + t.Fatalf("Test unit not found in list") + } + + if unit.ActiveState != "active" { + t.Fatalf("Test unit not active") + } + + // 3. Stop the unit + job, err = conn.StopUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + units, err = conn.ListUnits() + + unit = nil + for _, u := range units { + if u.Name == target { + unit = &u + } + } + + if unit != nil { + t.Fatalf("Test unit found in list, should be stopped") + } +} + +func TestConnJobListener(t *testing.T) { + target := "start-stop.service" + conn := setupConn(t) + + setupUnit(target, conn, t) + linkUnit(target, conn, t) + + jobSize := len(conn.jobListener.jobs) + + _, err := conn.StartUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + _, err = conn.StopUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + currentJobSize := len(conn.jobListener.jobs) + if jobSize != currentJobSize { + t.Fatal("JobListener jobs leaked") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/properties.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/properties.go new file mode 100644 index 00000000000..a06ccda761d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/properties.go @@ -0,0 +1,220 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbus + +import ( + "github.com/godbus/dbus" +) + +// From the systemd docs: +// +// The properties array of StartTransientUnit() may take many of the settings +// that may also be configured in unit files. Not all parameters are currently +// accepted though, but we plan to cover more properties with future release. +// Currently you may set the Description, Slice and all dependency types of +// units, as well as RemainAfterExit, ExecStart for service units, +// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, +// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, +// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, +// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map +// directly to their counterparts in unit files and as normal D-Bus object +// properties. The exception here is the PIDs field of scope units which is +// used for construction of the scope only and specifies the initial PIDs to +// add to the scope object. + +type Property struct { + Name string + Value dbus.Variant +} + +type PropertyCollection struct { + Name string + Properties []Property +} + +type execStart struct { + Path string // the binary path to execute + Args []string // an array with all arguments to pass to the executed command, starting with argument 0 + UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly +} + +// PropExecStart sets the ExecStart service property. The first argument is a +// slice with the binary path to execute followed by the arguments to pass to +// the executed command. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= +func PropExecStart(command []string, uncleanIsFailure bool) Property { + execStarts := []execStart{ + execStart{ + Path: command[0], + Args: command, + UncleanIsFailure: uncleanIsFailure, + }, + } + + return Property{ + Name: "ExecStart", + Value: dbus.MakeVariant(execStarts), + } +} + +// PropRemainAfterExit sets the RemainAfterExit service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= +func PropRemainAfterExit(b bool) Property { + return Property{ + Name: "RemainAfterExit", + Value: dbus.MakeVariant(b), + } +} + +// PropDescription sets the Description unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= +func PropDescription(desc string) Property { + return Property{ + Name: "Description", + Value: dbus.MakeVariant(desc), + } +} + +func propDependency(name string, units []string) Property { + return Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +// PropRequires sets the Requires unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= +func PropRequires(units ...string) Property { + return propDependency("Requires", units) +} + +// PropRequiresOverridable sets the RequiresOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= +func PropRequiresOverridable(units ...string) Property { + return propDependency("RequiresOverridable", units) +} + +// PropRequisite sets the Requisite unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= +func PropRequisite(units ...string) Property { + return propDependency("Requisite", units) +} + +// PropRequisiteOverridable sets the RequisiteOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= +func PropRequisiteOverridable(units ...string) Property { + return propDependency("RequisiteOverridable", units) +} + +// PropWants sets the Wants unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= +func PropWants(units ...string) Property { + return propDependency("Wants", units) +} + +// PropBindsTo sets the BindsTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= +func PropBindsTo(units ...string) Property { + return propDependency("BindsTo", units) +} + +// PropRequiredBy sets the RequiredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= +func PropRequiredBy(units ...string) Property { + return propDependency("RequiredBy", units) +} + +// PropRequiredByOverridable sets the RequiredByOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= +func PropRequiredByOverridable(units ...string) Property { + return propDependency("RequiredByOverridable", units) +} + +// PropWantedBy sets the WantedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= +func PropWantedBy(units ...string) Property { + return propDependency("WantedBy", units) +} + +// PropBoundBy sets the BoundBy unit property. See +// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= +func PropBoundBy(units ...string) Property { + return propDependency("BoundBy", units) +} + +// PropConflicts sets the Conflicts unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= +func PropConflicts(units ...string) Property { + return propDependency("Conflicts", units) +} + +// PropConflictedBy sets the ConflictedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= +func PropConflictedBy(units ...string) Property { + return propDependency("ConflictedBy", units) +} + +// PropBefore sets the Before unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= +func PropBefore(units ...string) Property { + return propDependency("Before", units) +} + +// PropAfter sets the After unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= +func PropAfter(units ...string) Property { + return propDependency("After", units) +} + +// PropOnFailure sets the OnFailure unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= +func PropOnFailure(units ...string) Property { + return propDependency("OnFailure", units) +} + +// PropTriggers sets the Triggers unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= +func PropTriggers(units ...string) Property { + return propDependency("Triggers", units) +} + +// PropTriggeredBy sets the TriggeredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= +func PropTriggeredBy(units ...string) Property { + return propDependency("TriggeredBy", units) +} + +// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= +func PropPropagatesReloadTo(units ...string) Property { + return propDependency("PropagatesReloadTo", units) +} + +// PropRequiresMountsFor sets the RequiresMountsFor unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= +func PropRequiresMountsFor(units ...string) Property { + return propDependency("RequiresMountsFor", units) +} + +// PropSlice sets the Slice unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= +func PropSlice(slice string) Property { + return Property{ + Name: "Slice", + Value: dbus.MakeVariant(slice), + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/set.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/set.go new file mode 100644 index 00000000000..45ad1fb3991 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/set.go @@ -0,0 +1,33 @@ +package dbus + +type set struct { + data map[string]bool +} + +func (s *set) Add(value string) { + s.data[value] = true +} + +func (s *set) Remove(value string) { + delete(s.data, value) +} + +func (s *set) Contains(value string) (exists bool) { + _, exists = s.data[value] + return +} + +func (s *set) Length() (int) { + return len(s.data) +} + +func (s *set) Values() (values []string) { + for val, _ := range s.data { + values = append(values, val) + } + return +} + +func newSet() (*set) { + return &set{make(map[string] bool)} +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/set_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/set_test.go new file mode 100644 index 00000000000..c4435f8800c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/set_test.go @@ -0,0 +1,39 @@ +package dbus + +import ( + "testing" +) + +// TestBasicSetActions asserts that Add & Remove behavior is correct +func TestBasicSetActions(t *testing.T) { + s := newSet() + + if s.Contains("foo") { + t.Fatal("set should not contain 'foo'") + } + + s.Add("foo") + + if !s.Contains("foo") { + t.Fatal("set should contain 'foo'") + } + + v := s.Values() + if len(v) != 1 { + t.Fatal("set.Values did not report correct number of values") + } + if v[0] != "foo" { + t.Fatal("set.Values did not report value") + } + + s.Remove("foo") + + if s.Contains("foo") { + t.Fatal("set should not contain 'foo'") + } + + v = s.Values() + if len(v) != 0 { + t.Fatal("set.Values did not report correct number of values") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go new file mode 100644 index 00000000000..fcd29b6e8fa --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go @@ -0,0 +1,251 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbus + +import ( + "errors" + "time" + + "github.com/godbus/dbus" +) + +const ( + cleanIgnoreInterval = int64(10 * time.Second) + ignoreInterval = int64(30 * time.Millisecond) +) + +// Subscribe sets up this connection to subscribe to all systemd dbus events. +// This is required before calling SubscribeUnits. When the connection closes +// systemd will automatically stop sending signals so there is no need to +// explicitly call Unsubscribe(). +func (c *Conn) Subscribe() error { + c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") + c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") + + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() + if err != nil { + return err + } + + return nil +} + +// Unsubscribe this connection from systemd dbus events. +func (c *Conn) Unsubscribe() error { + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() + if err != nil { + return err + } + + return nil +} + +func (c *Conn) initSubscription() { + c.subscriber.ignore = make(map[dbus.ObjectPath]int64) +} + +func (c *Conn) initDispatch() { + ch := make(chan *dbus.Signal, signalBuffer) + + c.sysconn.Signal(ch) + + go func() { + for { + signal, ok := <-ch + if !ok { + return + } + + switch signal.Name { + case "org.freedesktop.systemd1.Manager.JobRemoved": + c.jobComplete(signal) + + unitName := signal.Body[2].(string) + var unitPath dbus.ObjectPath + c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) + if unitPath != dbus.ObjectPath("") { + c.sendSubStateUpdate(unitPath) + } + case "org.freedesktop.systemd1.Manager.UnitNew": + c.sendSubStateUpdate(signal.Body[1].(dbus.ObjectPath)) + case "org.freedesktop.DBus.Properties.PropertiesChanged": + if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { + // we only care about SubState updates, which are a Unit property + c.sendSubStateUpdate(signal.Path) + } + } + } + }() +} + +// Returns two unbuffered channels which will receive all changed units every +// interval. Deleted units are sent as nil. +func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { + return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) +} + +// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer +// size of the channels, the comparison function for detecting changes and a filter +// function for cutting down on the noise that your channel receives. +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func (string) bool) (<-chan map[string]*UnitStatus, <-chan error) { + old := make(map[string]*UnitStatus) + statusChan := make(chan map[string]*UnitStatus, buffer) + errChan := make(chan error, buffer) + + go func() { + for { + timerChan := time.After(interval) + + units, err := c.ListUnits() + if err == nil { + cur := make(map[string]*UnitStatus) + for i := range units { + if filterUnit != nil && filterUnit(units[i].Name) { + continue + } + cur[units[i].Name] = &units[i] + } + + // add all new or changed units + changed := make(map[string]*UnitStatus) + for n, u := range cur { + if oldU, ok := old[n]; !ok || isChanged(oldU, u) { + changed[n] = u + } + delete(old, n) + } + + // add all deleted units + for oldN := range old { + changed[oldN] = nil + } + + old = cur + + if len(changed) != 0 { + statusChan <- changed + } + } else { + errChan <- err + } + + <-timerChan + } + }() + + return statusChan, errChan +} + +type SubStateUpdate struct { + UnitName string + SubState string +} + +// SetSubStateSubscriber writes to updateCh when any unit's substate changes. +// Although this writes to updateCh on every state change, the reported state +// may be more recent than the change that generated it (due to an unavoidable +// race in the systemd dbus interface). That is, this method provides a good +// way to keep a current view of all units' states, but is not guaranteed to +// show every state transition they go through. Furthermore, state changes +// will only be written to the channel with non-blocking writes. If updateCh +// is full, it attempts to write an error to errCh; if errCh is full, the error +// passes silently. +func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { + c.subscriber.Lock() + defer c.subscriber.Unlock() + c.subscriber.updateCh = updateCh + c.subscriber.errCh = errCh +} + +func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) { + c.subscriber.Lock() + defer c.subscriber.Unlock() + if c.subscriber.updateCh == nil { + return + } + + if c.shouldIgnore(path) { + return + } + + info, err := c.GetUnitProperties(string(path)) + if err != nil { + select { + case c.subscriber.errCh <- err: + default: + } + } + + name := info["Id"].(string) + substate := info["SubState"].(string) + + update := &SubStateUpdate{name, substate} + select { + case c.subscriber.updateCh <- update: + default: + select { + case c.subscriber.errCh <- errors.New("update channel full!"): + default: + } + } + + c.updateIgnore(path, info) +} + +// The ignore functions work around a wart in the systemd dbus interface. +// Requesting the properties of an unloaded unit will cause systemd to send a +// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's +// properties on UnitNew (as that's the only indication of a new unit coming up +// for the first time), we would enter an infinite loop if we did not attempt +// to detect and ignore these spurious signals. The signal themselves are +// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an +// unloaded unit's signals for a short time after requesting its properties. +// This means that we will miss e.g. a transient unit being restarted +// *immediately* upon failure and also a transient unit being started +// immediately after requesting its status (with systemctl status, for example, +// because this causes a UnitNew signal to be sent which then causes us to fetch +// the properties). + +func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { + t, ok := c.subscriber.ignore[path] + return ok && t >= time.Now().UnixNano() +} + +func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { + c.cleanIgnore() + + // unit is unloaded - it will trigger bad systemd dbus behavior + if info["LoadState"].(string) == "not-found" { + c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval + } +} + +// without this, ignore would grow unboundedly over time +func (c *Conn) cleanIgnore() { + now := time.Now().UnixNano() + if c.subscriber.cleanIgnore < now { + c.subscriber.cleanIgnore = now + cleanIgnoreInterval + + for p, t := range c.subscriber.ignore { + if t < now { + delete(c.subscriber.ignore, p) + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set.go new file mode 100644 index 00000000000..26257860522 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set.go @@ -0,0 +1,32 @@ +package dbus + +import ( + "time" +) + +// SubscriptionSet returns a subscription set which is like conn.Subscribe but +// can filter to only return events for a set of units. +type SubscriptionSet struct { + *set + conn *Conn +} + + +func (s *SubscriptionSet) filter(unit string) bool { + return !s.Contains(unit) +} + +// Subscribe starts listening for dbus events for all of the units in the set. +// Returns channels identical to conn.SubscribeUnits. +func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { + // TODO: Make fully evented by using systemd 209 with properties changed values + return s.conn.SubscribeUnitsCustom(time.Second, 0, + func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, + func(unit string) bool { return s.filter(unit) }, + ) +} + +// NewSubscriptionSet returns a new subscription set. +func (conn *Conn) NewSubscriptionSet() (*SubscriptionSet) { + return &SubscriptionSet{newSet(), conn} +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go new file mode 100644 index 00000000000..4ecd15376d3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go @@ -0,0 +1,66 @@ +package dbus + +import ( + "testing" + "time" +) + +// TestSubscribeUnit exercises the basics of subscription of a particular unit. +func TestSubscriptionSetUnit(t *testing.T) { + target := "subscribe-events-set.service" + + conn, err := New() + + if err != nil { + t.Fatal(err) + } + + err = conn.Subscribe() + if err != nil { + t.Fatal(err) + } + + subSet := conn.NewSubscriptionSet() + evChan, errChan := subSet.Subscribe() + + subSet.Add(target) + setupUnit(target, conn, t) + linkUnit(target, conn, t) + + job, err := conn.StartUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + if job != "done" { + t.Fatal("Couldn't start", target) + } + + timeout := make(chan bool, 1) + go func() { + time.Sleep(3 * time.Second) + close(timeout) + }() + + for { + select { + case changes := <-evChan: + tCh, ok := changes[target] + + if !ok { + t.Fatal("Unexpected event:", changes) + } + + if tCh.ActiveState == "active" && tCh.Name == target { + goto success + } + case err = <-errChan: + t.Fatal(err) + case <-timeout: + t.Fatal("Reached timeout") + } + } + +success: + return +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go new file mode 100644 index 00000000000..f2b5dfc28c1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go @@ -0,0 +1,91 @@ +package dbus + +import ( + "testing" + "time" +) + +// TestSubscribe exercises the basics of subscription +func TestSubscribe(t *testing.T) { + conn, err := New() + + if err != nil { + t.Fatal(err) + } + + err = conn.Subscribe() + if err != nil { + t.Fatal(err) + } + + err = conn.Unsubscribe() + if err != nil { + t.Fatal(err) + } +} + +// TestSubscribeUnit exercises the basics of subscription of a particular unit. +func TestSubscribeUnit(t *testing.T) { + target := "subscribe-events.service" + + conn, err := New() + + if err != nil { + t.Fatal(err) + } + + err = conn.Subscribe() + if err != nil { + t.Fatal(err) + } + + err = conn.Unsubscribe() + if err != nil { + t.Fatal(err) + } + + evChan, errChan := conn.SubscribeUnits(time.Second) + + setupUnit(target, conn, t) + linkUnit(target, conn, t) + + job, err := conn.StartUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + if job != "done" { + t.Fatal("Couldn't start", target) + } + + timeout := make(chan bool, 1) + go func() { + time.Sleep(3 * time.Second) + close(timeout) + }() + + for { + select { + case changes := <-evChan: + tCh, ok := changes[target] + + // Just continue until we see our event. + if !ok { + continue + } + + if tCh.ActiveState == "active" && tCh.Name == target { + goto success + } + case err = <-errChan: + t.Fatal(err) + case <-timeout: + t.Fatal("Reached timeout") + } + } + +success: + return +} + + diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/activation.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/activation.go new file mode 100644 index 00000000000..b3cf70ed84b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/activation.go @@ -0,0 +1,44 @@ +// Activation example used by the activation unit tests. +package main + +import ( + "fmt" + "os" + + "github.com/coreos/go-systemd/activation" +) + +func fixListenPid() { + if os.Getenv("FIX_LISTEN_PID") != "" { + // HACK: real systemd would set LISTEN_PID before exec'ing but + // this is too difficult in golang for the purpose of a test. + // Do not do this in real code. + os.Setenv("LISTEN_PID", fmt.Sprintf("%d", os.Getpid())) + } +} + +func main() { + fixListenPid() + + files := activation.Files(false) + + if len(files) == 0 { + panic("No files") + } + + if os.Getenv("LISTEN_PID") == "" || os.Getenv("LISTEN_FDS") == "" { + panic("Should not unset envs") + } + + files = activation.Files(true) + + if os.Getenv("LISTEN_PID") != "" || os.Getenv("LISTEN_FDS") != "" { + panic("Can not unset envs") + } + + // Write out the expected strings to the two pipes + files[0].Write([]byte("Hello world")) + files[1].Write([]byte("Goodbye world")) + + return +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/README.md b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/README.md new file mode 100644 index 00000000000..a350cca5e56 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/README.md @@ -0,0 +1,19 @@ +## socket activated http server + +This is a simple example of using socket activation with systemd to serve a +simple HTTP server on http://127.0.0.1:8076 + +To try it out `go get` the httpserver and run it under the systemd-activate helper + +``` +export GOPATH=`pwd` +go get github.com/coreos/go-systemd/examples/activation/httpserver +sudo /usr/lib/systemd/systemd-activate -l 127.0.0.1:8076 ./bin/httpserver +``` + +Then curl the URL and you will notice that it starts up: + +``` +curl 127.0.0.1:8076 +hello socket activated world! +``` diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.service b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.service new file mode 100644 index 00000000000..c8dea0f6b3f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.service @@ -0,0 +1,11 @@ +[Unit] +Description=Hello World HTTP +Requires=network.target +After=multi-user.target + +[Service] +Type=simple +ExecStart=/usr/local/bin/httpserver + +[Install] +WantedBy=multi-user.target diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.socket b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.socket new file mode 100644 index 00000000000..723ed7ed92d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.socket @@ -0,0 +1,5 @@ +[Socket] +ListenStream=127.0.0.1:8076 + +[Install] +WantedBy=sockets.target diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/httpserver.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/httpserver.go new file mode 100644 index 00000000000..380c325d61b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/httpserver.go @@ -0,0 +1,26 @@ +package main + +import ( + "io" + "net/http" + + "github.com/coreos/go-systemd/activation" +) + +func HelloServer(w http.ResponseWriter, req *http.Request) { + io.WriteString(w, "hello socket activated world!\n") +} + +func main() { + listeners, err := activation.Listeners(true) + if err != nil { + panic(err) + } + + if len(listeners) != 1 { + panic("Unexpected number of socket activation fds") + } + + http.HandleFunc("/", HelloServer) + http.Serve(listeners[0], nil) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/listen.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/listen.go new file mode 100644 index 00000000000..5850a8b796f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/examples/activation/listen.go @@ -0,0 +1,50 @@ +// Activation example used by the activation unit tests. +package main + +import ( + "fmt" + "os" + + "github.com/coreos/go-systemd/activation" +) + +func fixListenPid() { + if os.Getenv("FIX_LISTEN_PID") != "" { + // HACK: real systemd would set LISTEN_PID before exec'ing but + // this is too difficult in golang for the purpose of a test. + // Do not do this in real code. + os.Setenv("LISTEN_PID", fmt.Sprintf("%d", os.Getpid())) + } +} + +func main() { + fixListenPid() + + listeners, _ := activation.Listeners(false) + + if len(listeners) == 0 { + panic("No listeners") + } + + if os.Getenv("LISTEN_PID") == "" || os.Getenv("LISTEN_FDS") == "" { + panic("Should not unset envs") + } + + listeners, err := activation.Listeners(true) + if err != nil { + panic(err) + } + + if os.Getenv("LISTEN_PID") != "" || os.Getenv("LISTEN_FDS") != "" { + panic("Can not unset envs") + } + + c0, _ := listeners[0].Accept() + c1, _ := listeners[1].Accept() + + // Write out the expected strings to the two pipes + c0.Write([]byte("Hello world")) + c1.Write([]byte("Goodbye world")) + + return +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/fixtures/enable-disable.service b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/fixtures/enable-disable.service new file mode 100644 index 00000000000..74c9459088c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/fixtures/enable-disable.service @@ -0,0 +1,5 @@ +[Unit] +Description=enable disable test + +[Service] +ExecStart=/bin/sleep 400 diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/fixtures/start-stop.service b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/fixtures/start-stop.service new file mode 100644 index 00000000000..a1f8c367732 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/fixtures/start-stop.service @@ -0,0 +1,5 @@ +[Unit] +Description=start stop test + +[Service] +ExecStart=/bin/sleep 400 diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events-set.service b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events-set.service new file mode 100644 index 00000000000..a1f8c367732 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events-set.service @@ -0,0 +1,5 @@ +[Unit] +Description=start stop test + +[Service] +ExecStart=/bin/sleep 400 diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events.service b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events.service new file mode 100644 index 00000000000..a1f8c367732 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events.service @@ -0,0 +1,5 @@ +[Unit] +Description=start stop test + +[Service] +ExecStart=/bin/sleep 400 diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/journal/send.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/journal/send.go new file mode 100644 index 00000000000..b52e120988c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/journal/send.go @@ -0,0 +1,168 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package journal provides write bindings to the systemd journal +package journal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "syscall" +) + +// Priority of a journal message +type Priority int + +const ( + PriEmerg Priority = iota + PriAlert + PriCrit + PriErr + PriWarning + PriNotice + PriInfo + PriDebug +) + +var conn net.Conn + +func init() { + var err error + conn, err = net.Dial("unixgram", "/run/systemd/journal/socket") + if err != nil { + conn = nil + } +} + +// Enabled returns true iff the systemd journal is available for logging +func Enabled() bool { + return conn != nil +} + +// Send a message to the systemd journal. vars is a map of journald fields to +// values. Fields must be composed of uppercase letters, numbers, and +// underscores, but must not start with an underscore. Within these +// restrictions, any arbitrary field name may be used. Some names have special +// significance: see the journalctl documentation +// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) +// for more details. vars may be nil. +func Send(message string, priority Priority, vars map[string]string) error { + if conn == nil { + return journalError("could not connect to journald socket") + } + + data := new(bytes.Buffer) + appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) + appendVariable(data, "MESSAGE", message) + for k, v := range vars { + appendVariable(data, k, v) + } + + _, err := io.Copy(conn, data) + if err != nil && isSocketSpaceError(err) { + file, err := tempFd() + if err != nil { + return journalError(err.Error()) + } + _, err = io.Copy(file, data) + if err != nil { + return journalError(err.Error()) + } + + rights := syscall.UnixRights(int(file.Fd())) + + /* this connection should always be a UnixConn, but better safe than sorry */ + unixConn, ok := conn.(*net.UnixConn) + if !ok { + return journalError("can't send file through non-Unix connection") + } + unixConn.WriteMsgUnix([]byte{}, rights, nil) + } else if err != nil { + return journalError(err.Error()) + } + return nil +} + +func appendVariable(w io.Writer, name, value string) { + if !validVarName(name) { + journalError("variable name contains invalid character, ignoring") + } + if strings.ContainsRune(value, '\n') { + /* When the value contains a newline, we write: + * - the variable name, followed by a newline + * - the size (in 64bit little endian format) + * - the data, followed by a newline + */ + fmt.Fprintln(w, name) + binary.Write(w, binary.LittleEndian, uint64(len(value))) + fmt.Fprintln(w, value) + } else { + /* just write the variable and value all on one line */ + fmt.Fprintf(w, "%s=%s\n", name, value) + } +} + +func validVarName(name string) bool { + /* The variable name must be in uppercase and consist only of characters, + * numbers and underscores, and may not begin with an underscore. (from the docs) + */ + + valid := name[0] != '_' + for _, c := range name { + valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' + } + return valid +} + +func isSocketSpaceError(err error) bool { + opErr, ok := err.(*net.OpError) + if !ok { + return false + } + + sysErr, ok := opErr.Err.(syscall.Errno) + if !ok { + return false + } + + return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS +} + +func tempFd() (*os.File, error) { + file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + if err != nil { + return nil, err + } + syscall.Unlink(file.Name()) + if err != nil { + return nil, err + } + return file, nil +} + +func journalError(s string) error { + s = "journal error: " + s + fmt.Fprintln(os.Stderr, s) + return errors.New(s) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/login1/dbus.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/login1/dbus.go new file mode 100644 index 00000000000..d00dd110b51 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/login1/dbus.go @@ -0,0 +1,81 @@ +/* +Copyright 2014 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Integration with the systemd logind API. See http://www.freedesktop.org/wiki/Software/systemd/logind/ +package login1 + +import ( + "os" + "strconv" + + "github.com/godbus/dbus" +) + +const ( + dbusInterface = "org.freedesktop.login1.Manager" + dbusPath = "/org/freedesktop/login1" +) + +// Conn is a connection to systemds dbus endpoint. +type Conn struct { + conn *dbus.Conn + object *dbus.Object +} + +// New() establishes a connection to the system bus and authenticates. +func New() (*Conn, error) { + c := new(Conn) + + if err := c.initConnection(); err != nil { + return nil, err + } + + return c, nil +} + +func (c *Conn) initConnection() error { + var err error + c.conn, err = dbus.SystemBusPrivate() + if err != nil { + return err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = c.conn.Auth(methods) + if err != nil { + c.conn.Close() + return err + } + + err = c.conn.Hello() + if err != nil { + c.conn.Close() + return err + } + + c.object = c.conn.Object("org.freedesktop.login1", dbus.ObjectPath(dbusPath)) + + return nil +} + +// Reboot asks logind for a reboot optionally asking for auth. +func (c *Conn) Reboot(askForAuth bool) { + c.object.Call(dbusInterface+".Reboot", 0, askForAuth) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/login1/dbus_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/login1/dbus_test.go new file mode 100644 index 00000000000..4439d37380c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/login1/dbus_test.go @@ -0,0 +1,30 @@ +/* +Copyright 2014 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package login1 + +import ( + "testing" +) + +// TestNew ensures that New() works without errors. +func TestNew(t *testing.T) { + _, err := New() + + if err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/test b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/test new file mode 100644 index 00000000000..6e043658aee --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/coreos/go-systemd/test @@ -0,0 +1,3 @@ +#!/bin/sh -e + +go test -v ./... diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/LICENSE b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/LICENSE new file mode 100644 index 00000000000..06b252bcbc5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013, Georg Reinke () +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/README.markdown b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/README.markdown new file mode 100644 index 00000000000..3ab2116651a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/README.markdown @@ -0,0 +1,38 @@ +dbus +---- + +dbus is a simple library that implements native Go client bindings for the +D-Bus message bus system. + +### Features + +* Complete native implementation of the D-Bus message protocol +* Go-like API (channels for signals / asynchronous method calls, Goroutine-safe connections) +* Subpackages that help with the introspection / property interfaces + +### Installation + +This packages requires Go 1.1. If you installed it and set up your GOPATH, just run: + +``` +go get github.com/godbus/dbus +``` + +If you want to use the subpackages, you can install them the same way. + +### Usage + +The complete package documentation and some simple examples are available at +[godoc.org](http://godoc.org/github.com/godbus/dbus). Also, the +[_examples](https://github.com/godbus/dbus/tree/master/_examples) directory +gives a short overview over the basic usage. + +Please note that the API is considered unstable for now and may change without +further notice. + +### License + +go.dbus is available under the Simplified BSD License; see LICENSE for the full +text. + +Nearly all of the credit for this library goes to github.com/guelfey/go.dbus. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/auth.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/auth.go new file mode 100644 index 00000000000..98017b693ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/auth.go @@ -0,0 +1,253 @@ +package dbus + +import ( + "bufio" + "bytes" + "errors" + "io" + "os" + "strconv" +) + +// AuthStatus represents the Status of an authentication mechanism. +type AuthStatus byte + +const ( + // AuthOk signals that authentication is finished; the next command + // from the server should be an OK. + AuthOk AuthStatus = iota + + // AuthContinue signals that additional data is needed; the next command + // from the server should be a DATA. + AuthContinue + + // AuthError signals an error; the server sent invalid data or some + // other unexpected thing happened and the current authentication + // process should be aborted. + AuthError +) + +type authState byte + +const ( + waitingForData authState = iota + waitingForOk + waitingForReject +) + +// Auth defines the behaviour of an authentication mechanism. +type Auth interface { + // Return the name of the mechnism, the argument to the first AUTH command + // and the next status. + FirstData() (name, resp []byte, status AuthStatus) + + // Process the given DATA command, and return the argument to the DATA + // command and the next status. If len(resp) == 0, no DATA command is sent. + HandleData(data []byte) (resp []byte, status AuthStatus) +} + +// Auth authenticates the connection, trying the given list of authentication +// mechanisms (in that order). If nil is passed, the EXTERNAL and +// DBUS_COOKIE_SHA1 mechanisms are tried for the current user. For private +// connections, this method must be called before sending any messages to the +// bus. Auth must not be called on shared connections. +func (conn *Conn) Auth(methods []Auth) error { + if methods == nil { + uid := strconv.Itoa(os.Getuid()) + methods = []Auth{AuthExternal(uid), AuthCookieSha1(uid, getHomeDir())} + } + in := bufio.NewReader(conn.transport) + err := conn.transport.SendNullByte() + if err != nil { + return err + } + err = authWriteLine(conn.transport, []byte("AUTH")) + if err != nil { + return err + } + s, err := authReadLine(in) + if err != nil { + return err + } + if len(s) < 2 || !bytes.Equal(s[0], []byte("REJECTED")) { + return errors.New("dbus: authentication protocol error") + } + s = s[1:] + for _, v := range s { + for _, m := range methods { + if name, data, status := m.FirstData(); bytes.Equal(v, name) { + var ok bool + err = authWriteLine(conn.transport, []byte("AUTH"), []byte(v), data) + if err != nil { + return err + } + switch status { + case AuthOk: + err, ok = conn.tryAuth(m, waitingForOk, in) + case AuthContinue: + err, ok = conn.tryAuth(m, waitingForData, in) + default: + panic("dbus: invalid authentication status") + } + if err != nil { + return err + } + if ok { + if conn.transport.SupportsUnixFDs() { + err = authWriteLine(conn, []byte("NEGOTIATE_UNIX_FD")) + if err != nil { + return err + } + line, err := authReadLine(in) + if err != nil { + return err + } + switch { + case bytes.Equal(line[0], []byte("AGREE_UNIX_FD")): + conn.EnableUnixFDs() + conn.unixFD = true + case bytes.Equal(line[0], []byte("ERROR")): + default: + return errors.New("dbus: authentication protocol error") + } + } + err = authWriteLine(conn.transport, []byte("BEGIN")) + if err != nil { + return err + } + go conn.inWorker() + go conn.outWorker() + return nil + } + } + } + } + return errors.New("dbus: authentication failed") +} + +// tryAuth tries to authenticate with m as the mechanism, using state as the +// initial authState and in for reading input. It returns (nil, true) on +// success, (nil, false) on a REJECTED and (someErr, false) if some other +// error occured. +func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) { + for { + s, err := authReadLine(in) + if err != nil { + return err, false + } + switch { + case state == waitingForData && string(s[0]) == "DATA": + if len(s) != 2 { + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + continue + } + data, status := m.HandleData(s[1]) + switch status { + case AuthOk, AuthContinue: + if len(data) != 0 { + err = authWriteLine(conn.transport, []byte("DATA"), data) + if err != nil { + return err, false + } + } + if status == AuthOk { + state = waitingForOk + } + case AuthError: + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + } + case state == waitingForData && string(s[0]) == "REJECTED": + return nil, false + case state == waitingForData && string(s[0]) == "ERROR": + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + case state == waitingForData && string(s[0]) == "OK": + if len(s) != 2 { + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + } + conn.uuid = string(s[1]) + return nil, true + case state == waitingForData: + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + case state == waitingForOk && string(s[0]) == "OK": + if len(s) != 2 { + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + } + conn.uuid = string(s[1]) + return nil, true + case state == waitingForOk && string(s[0]) == "REJECTED": + return nil, false + case state == waitingForOk && (string(s[0]) == "DATA" || + string(s[0]) == "ERROR"): + + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + case state == waitingForOk: + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + case state == waitingForReject && string(s[0]) == "REJECTED": + return nil, false + case state == waitingForReject: + return errors.New("dbus: authentication protocol error"), false + default: + panic("dbus: invalid auth state") + } + } +} + +// authReadLine reads a line and separates it into its fields. +func authReadLine(in *bufio.Reader) ([][]byte, error) { + data, err := in.ReadBytes('\n') + if err != nil { + return nil, err + } + data = bytes.TrimSuffix(data, []byte("\r\n")) + return bytes.Split(data, []byte{' '}), nil +} + +// authWriteLine writes the given line in the authentication protocol format +// (elements of data separated by a " " and terminated by "\r\n"). +func authWriteLine(out io.Writer, data ...[]byte) error { + buf := make([]byte, 0) + for i, v := range data { + buf = append(buf, v...) + if i != len(data)-1 { + buf = append(buf, ' ') + } + } + buf = append(buf, '\r') + buf = append(buf, '\n') + n, err := out.Write(buf) + if err != nil { + return err + } + if n != len(buf) { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/auth_external.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/auth_external.go new file mode 100644 index 00000000000..7e376d3ef6a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/auth_external.go @@ -0,0 +1,26 @@ +package dbus + +import ( + "encoding/hex" +) + +// AuthExternal returns an Auth that authenticates as the given user with the +// EXTERNAL mechanism. +func AuthExternal(user string) Auth { + return authExternal{user} +} + +// AuthExternal implements the EXTERNAL authentication mechanism. +type authExternal struct { + user string +} + +func (a authExternal) FirstData() ([]byte, []byte, AuthStatus) { + b := make([]byte, 2*len(a.user)) + hex.Encode(b, []byte(a.user)) + return []byte("EXTERNAL"), b, AuthOk +} + +func (a authExternal) HandleData(b []byte) ([]byte, AuthStatus) { + return nil, AuthError +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/auth_sha1.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/auth_sha1.go new file mode 100644 index 00000000000..df15b461198 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/auth_sha1.go @@ -0,0 +1,102 @@ +package dbus + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/hex" + "os" +) + +// AuthCookieSha1 returns an Auth that authenticates as the given user with the +// DBUS_COOKIE_SHA1 mechanism. The home parameter should specify the home +// directory of the user. +func AuthCookieSha1(user, home string) Auth { + return authCookieSha1{user, home} +} + +type authCookieSha1 struct { + user, home string +} + +func (a authCookieSha1) FirstData() ([]byte, []byte, AuthStatus) { + b := make([]byte, 2*len(a.user)) + hex.Encode(b, []byte(a.user)) + return []byte("DBUS_COOKIE_SHA1"), b, AuthContinue +} + +func (a authCookieSha1) HandleData(data []byte) ([]byte, AuthStatus) { + challenge := make([]byte, len(data)/2) + _, err := hex.Decode(challenge, data) + if err != nil { + return nil, AuthError + } + b := bytes.Split(challenge, []byte{' '}) + if len(b) != 3 { + return nil, AuthError + } + context := b[0] + id := b[1] + svchallenge := b[2] + cookie := a.getCookie(context, id) + if cookie == nil { + return nil, AuthError + } + clchallenge := a.generateChallenge() + if clchallenge == nil { + return nil, AuthError + } + hash := sha1.New() + hash.Write(bytes.Join([][]byte{svchallenge, clchallenge, cookie}, []byte{':'})) + hexhash := make([]byte, 2*hash.Size()) + hex.Encode(hexhash, hash.Sum(nil)) + data = append(clchallenge, ' ') + data = append(data, hexhash...) + resp := make([]byte, 2*len(data)) + hex.Encode(resp, data) + return resp, AuthOk +} + +// getCookie searches for the cookie identified by id in context and returns +// the cookie content or nil. (Since HandleData can't return a specific error, +// but only whether an error occured, this function also doesn't bother to +// return an error.) +func (a authCookieSha1) getCookie(context, id []byte) []byte { + file, err := os.Open(a.home + "/.dbus-keyrings/" + string(context)) + if err != nil { + return nil + } + defer file.Close() + rd := bufio.NewReader(file) + for { + line, err := rd.ReadBytes('\n') + if err != nil { + return nil + } + line = line[:len(line)-1] + b := bytes.Split(line, []byte{' '}) + if len(b) != 3 { + return nil + } + if bytes.Equal(b[0], id) { + return b[2] + } + } +} + +// generateChallenge returns a random, hex-encoded challenge, or nil on error +// (see above). +func (a authCookieSha1) generateChallenge() []byte { + b := make([]byte, 16) + n, err := rand.Read(b) + if err != nil { + return nil + } + if n != 16 { + return nil + } + enc := make([]byte, 32) + hex.Encode(enc, b) + return enc +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/call.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/call.go new file mode 100644 index 00000000000..1d2fbc7efd8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/call.go @@ -0,0 +1,147 @@ +package dbus + +import ( + "errors" + "strings" +) + +// Call represents a pending or completed method call. +type Call struct { + Destination string + Path ObjectPath + Method string + Args []interface{} + + // Strobes when the call is complete. + Done chan *Call + + // After completion, the error status. If this is non-nil, it may be an + // error message from the peer (with Error as its type) or some other error. + Err error + + // Holds the response once the call is done. + Body []interface{} +} + +var errSignature = errors.New("dbus: mismatched signature") + +// Store stores the body of the reply into the provided pointers. It returns +// an error if the signatures of the body and retvalues don't match, or if +// the error status is not nil. +func (c *Call) Store(retvalues ...interface{}) error { + if c.Err != nil { + return c.Err + } + + return Store(c.Body, retvalues...) +} + +// Object represents a remote object on which methods can be invoked. +type Object struct { + conn *Conn + dest string + path ObjectPath +} + +// Call calls a method with (*Object).Go and waits for its reply. +func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call { + return <-o.Go(method, flags, make(chan *Call, 1), args...).Done +} + +// GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given +// object. The property name must be given in interface.member notation. +func (o *Object) GetProperty(p string) (Variant, error) { + idx := strings.LastIndex(p, ".") + if idx == -1 || idx+1 == len(p) { + return Variant{}, errors.New("dbus: invalid property " + p) + } + + iface := p[:idx] + prop := p[idx+1:] + + result := Variant{} + err := o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop).Store(&result) + + if err != nil { + return Variant{}, err + } + + return result, nil +} + +// Go calls a method with the given arguments asynchronously. It returns a +// Call structure representing this method call. The passed channel will +// return the same value once the call is done. If ch is nil, a new channel +// will be allocated. Otherwise, ch has to be buffered or Go will panic. +// +// If the flags include FlagNoReplyExpected, ch is ignored and a Call structure +// is returned of which only the Err member is valid. +// +// If the method parameter contains a dot ('.'), the part before the last dot +// specifies the interface on which the method is called. +func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call { + iface := "" + i := strings.LastIndex(method, ".") + if i != -1 { + iface = method[:i] + } + method = method[i+1:] + msg := new(Message) + msg.Type = TypeMethodCall + msg.serial = o.conn.getSerial() + msg.Flags = flags & (FlagNoAutoStart | FlagNoReplyExpected) + msg.Headers = make(map[HeaderField]Variant) + msg.Headers[FieldPath] = MakeVariant(o.path) + msg.Headers[FieldDestination] = MakeVariant(o.dest) + msg.Headers[FieldMember] = MakeVariant(method) + if iface != "" { + msg.Headers[FieldInterface] = MakeVariant(iface) + } + msg.Body = args + if len(args) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(args...)) + } + if msg.Flags&FlagNoReplyExpected == 0 { + if ch == nil { + ch = make(chan *Call, 10) + } else if cap(ch) == 0 { + panic("dbus: unbuffered channel passed to (*Object).Go") + } + call := &Call{ + Destination: o.dest, + Path: o.path, + Method: method, + Args: args, + Done: ch, + } + o.conn.callsLck.Lock() + o.conn.calls[msg.serial] = call + o.conn.callsLck.Unlock() + o.conn.outLck.RLock() + if o.conn.closed { + call.Err = ErrClosed + call.Done <- call + } else { + o.conn.out <- msg + } + o.conn.outLck.RUnlock() + return call + } + o.conn.outLck.RLock() + defer o.conn.outLck.RUnlock() + if o.conn.closed { + return &Call{Err: ErrClosed} + } + o.conn.out <- msg + return &Call{Err: nil} +} + +// Destination returns the destination that calls on o are sent to. +func (o *Object) Destination() string { + return o.dest +} + +// Path returns the path that calls on o are sent to. +func (o *Object) Path() ObjectPath { + return o.path +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/conn.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/conn.go new file mode 100644 index 00000000000..75dd22652a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/conn.go @@ -0,0 +1,601 @@ +package dbus + +import ( + "errors" + "io" + "os" + "reflect" + "strings" + "sync" +) + +const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket" + +var ( + systemBus *Conn + systemBusLck sync.Mutex + sessionBus *Conn + sessionBusLck sync.Mutex +) + +// ErrClosed is the error returned by calls on a closed connection. +var ErrClosed = errors.New("dbus: connection closed by user") + +// Conn represents a connection to a message bus (usually, the system or +// session bus). +// +// Connections are either shared or private. Shared connections +// are shared between calls to the functions that return them. As a result, +// the methods Close, Auth and Hello must not be called on them. +// +// Multiple goroutines may invoke methods on a connection simultaneously. +type Conn struct { + transport + + busObj *Object + unixFD bool + uuid string + + names []string + namesLck sync.RWMutex + + serialLck sync.Mutex + nextSerial uint32 + serialUsed map[uint32]bool + + calls map[uint32]*Call + callsLck sync.RWMutex + + handlers map[ObjectPath]map[string]interface{} + handlersLck sync.RWMutex + + out chan *Message + closed bool + outLck sync.RWMutex + + signals []chan<- *Signal + signalsLck sync.Mutex + + eavesdropped chan<- *Message + eavesdroppedLck sync.Mutex +} + +// SessionBus returns a shared connection to the session bus, connecting to it +// if not already done. +func SessionBus() (conn *Conn, err error) { + sessionBusLck.Lock() + defer sessionBusLck.Unlock() + if sessionBus != nil { + return sessionBus, nil + } + defer func() { + if conn != nil { + sessionBus = conn + } + }() + conn, err = SessionBusPrivate() + if err != nil { + return + } + if err = conn.Auth(nil); err != nil { + conn.Close() + conn = nil + return + } + if err = conn.Hello(); err != nil { + conn.Close() + conn = nil + } + return +} + +// SessionBusPrivate returns a new private connection to the session bus. +func SessionBusPrivate() (*Conn, error) { + address := os.Getenv("DBUS_SESSION_BUS_ADDRESS") + if address != "" && address != "autolaunch:" { + return Dial(address) + } + + return sessionBusPlatform() +} + +// SystemBus returns a shared connection to the system bus, connecting to it if +// not already done. +func SystemBus() (conn *Conn, err error) { + systemBusLck.Lock() + defer systemBusLck.Unlock() + if systemBus != nil { + return systemBus, nil + } + defer func() { + if conn != nil { + systemBus = conn + } + }() + conn, err = SystemBusPrivate() + if err != nil { + return + } + if err = conn.Auth(nil); err != nil { + conn.Close() + conn = nil + return + } + if err = conn.Hello(); err != nil { + conn.Close() + conn = nil + } + return +} + +// SystemBusPrivate returns a new private connection to the system bus. +func SystemBusPrivate() (*Conn, error) { + address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS") + if address != "" { + return Dial(address) + } + return Dial(defaultSystemBusAddress) +} + +// Dial establishes a new private connection to the message bus specified by address. +func Dial(address string) (*Conn, error) { + tr, err := getTransport(address) + if err != nil { + return nil, err + } + return newConn(tr) +} + +// NewConn creates a new private *Conn from an already established connection. +func NewConn(conn io.ReadWriteCloser) (*Conn, error) { + return newConn(genericTransport{conn}) +} + +// newConn creates a new *Conn from a transport. +func newConn(tr transport) (*Conn, error) { + conn := new(Conn) + conn.transport = tr + conn.calls = make(map[uint32]*Call) + conn.out = make(chan *Message, 10) + conn.handlers = make(map[ObjectPath]map[string]interface{}) + conn.nextSerial = 1 + conn.serialUsed = map[uint32]bool{0: true} + conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus") + return conn, nil +} + +// BusObject returns the object owned by the bus daemon which handles +// administrative requests. +func (conn *Conn) BusObject() *Object { + return conn.busObj +} + +// Close closes the connection. Any blocked operations will return with errors +// and the channels passed to Eavesdrop and Signal are closed. This method must +// not be called on shared connections. +func (conn *Conn) Close() error { + conn.outLck.Lock() + close(conn.out) + conn.closed = true + conn.outLck.Unlock() + conn.signalsLck.Lock() + for _, ch := range conn.signals { + close(ch) + } + conn.signalsLck.Unlock() + conn.eavesdroppedLck.Lock() + if conn.eavesdropped != nil { + close(conn.eavesdropped) + } + conn.eavesdroppedLck.Unlock() + return conn.transport.Close() +} + +// Eavesdrop causes conn to send all incoming messages to the given channel +// without further processing. Method replies, errors and signals will not be +// sent to the appropiate channels and method calls will not be handled. If nil +// is passed, the normal behaviour is restored. +// +// The caller has to make sure that ch is sufficiently buffered; +// if a message arrives when a write to ch is not possible, the message is +// discarded. +func (conn *Conn) Eavesdrop(ch chan<- *Message) { + conn.eavesdroppedLck.Lock() + conn.eavesdropped = ch + conn.eavesdroppedLck.Unlock() +} + +// getSerial returns an unused serial. +func (conn *Conn) getSerial() uint32 { + conn.serialLck.Lock() + defer conn.serialLck.Unlock() + n := conn.nextSerial + for conn.serialUsed[n] { + n++ + } + conn.serialUsed[n] = true + conn.nextSerial = n + 1 + return n +} + +// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be +// called after authentication, but before sending any other messages to the +// bus. Hello must not be called for shared connections. +func (conn *Conn) Hello() error { + var s string + err := conn.busObj.Call("org.freedesktop.DBus.Hello", 0).Store(&s) + if err != nil { + return err + } + conn.namesLck.Lock() + conn.names = make([]string, 1) + conn.names[0] = s + conn.namesLck.Unlock() + return nil +} + +// inWorker runs in an own goroutine, reading incoming messages from the +// transport and dispatching them appropiately. +func (conn *Conn) inWorker() { + for { + msg, err := conn.ReadMessage() + if err == nil { + conn.eavesdroppedLck.Lock() + if conn.eavesdropped != nil { + select { + case conn.eavesdropped <- msg: + default: + } + conn.eavesdroppedLck.Unlock() + continue + } + conn.eavesdroppedLck.Unlock() + dest, _ := msg.Headers[FieldDestination].value.(string) + found := false + if dest == "" { + found = true + } else { + conn.namesLck.RLock() + if len(conn.names) == 0 { + found = true + } + for _, v := range conn.names { + if dest == v { + found = true + break + } + } + conn.namesLck.RUnlock() + } + if !found { + // Eavesdropped a message, but no channel for it is registered. + // Ignore it. + continue + } + switch msg.Type { + case TypeMethodReply, TypeError: + serial := msg.Headers[FieldReplySerial].value.(uint32) + conn.callsLck.Lock() + if c, ok := conn.calls[serial]; ok { + if msg.Type == TypeError { + name, _ := msg.Headers[FieldErrorName].value.(string) + c.Err = Error{name, msg.Body} + } else { + c.Body = msg.Body + } + c.Done <- c + conn.serialLck.Lock() + delete(conn.serialUsed, serial) + conn.serialLck.Unlock() + delete(conn.calls, serial) + } + conn.callsLck.Unlock() + case TypeSignal: + iface := msg.Headers[FieldInterface].value.(string) + member := msg.Headers[FieldMember].value.(string) + // as per http://dbus.freedesktop.org/doc/dbus-specification.html , + // sender is optional for signals. + sender, _ := msg.Headers[FieldSender].value.(string) + if iface == "org.freedesktop.DBus" && member == "NameLost" && + sender == "org.freedesktop.DBus" { + + name, _ := msg.Body[0].(string) + conn.namesLck.Lock() + for i, v := range conn.names { + if v == name { + copy(conn.names[i:], conn.names[i+1:]) + conn.names = conn.names[:len(conn.names)-1] + } + } + conn.namesLck.Unlock() + } + signal := &Signal{ + Sender: sender, + Path: msg.Headers[FieldPath].value.(ObjectPath), + Name: iface + "." + member, + Body: msg.Body, + } + conn.signalsLck.Lock() + for _, ch := range conn.signals { + // don't block trying to send a signal + select { + case ch <- signal: + default: + } + } + conn.signalsLck.Unlock() + case TypeMethodCall: + go conn.handleCall(msg) + } + } else if _, ok := err.(InvalidMessageError); !ok { + // Some read error occured (usually EOF); we can't really do + // anything but to shut down all stuff and returns errors to all + // pending replies. + conn.Close() + conn.callsLck.RLock() + for _, v := range conn.calls { + v.Err = err + v.Done <- v + } + conn.callsLck.RUnlock() + return + } + // invalid messages are ignored + } +} + +// Names returns the list of all names that are currently owned by this +// connection. The slice is always at least one element long, the first element +// being the unique name of the connection. +func (conn *Conn) Names() []string { + conn.namesLck.RLock() + // copy the slice so it can't be modified + s := make([]string, len(conn.names)) + copy(s, conn.names) + conn.namesLck.RUnlock() + return s +} + +// Object returns the object identified by the given destination name and path. +func (conn *Conn) Object(dest string, path ObjectPath) *Object { + return &Object{conn, dest, path} +} + +// outWorker runs in an own goroutine, encoding and sending messages that are +// sent to conn.out. +func (conn *Conn) outWorker() { + for msg := range conn.out { + err := conn.SendMessage(msg) + conn.callsLck.RLock() + if err != nil { + if c := conn.calls[msg.serial]; c != nil { + c.Err = err + c.Done <- c + } + conn.serialLck.Lock() + delete(conn.serialUsed, msg.serial) + conn.serialLck.Unlock() + } else if msg.Type != TypeMethodCall { + conn.serialLck.Lock() + delete(conn.serialUsed, msg.serial) + conn.serialLck.Unlock() + } + conn.callsLck.RUnlock() + } +} + +// Send sends the given message to the message bus. You usually don't need to +// use this; use the higher-level equivalents (Call / Go, Emit and Export) +// instead. If msg is a method call and NoReplyExpected is not set, a non-nil +// call is returned and the same value is sent to ch (which must be buffered) +// once the call is complete. Otherwise, ch is ignored and a Call structure is +// returned of which only the Err member is valid. +func (conn *Conn) Send(msg *Message, ch chan *Call) *Call { + var call *Call + + msg.serial = conn.getSerial() + if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 { + if ch == nil { + ch = make(chan *Call, 5) + } else if cap(ch) == 0 { + panic("dbus: unbuffered channel passed to (*Conn).Send") + } + call = new(Call) + call.Destination, _ = msg.Headers[FieldDestination].value.(string) + call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath) + iface, _ := msg.Headers[FieldInterface].value.(string) + member, _ := msg.Headers[FieldMember].value.(string) + call.Method = iface + "." + member + call.Args = msg.Body + call.Done = ch + conn.callsLck.Lock() + conn.calls[msg.serial] = call + conn.callsLck.Unlock() + conn.outLck.RLock() + if conn.closed { + call.Err = ErrClosed + call.Done <- call + } else { + conn.out <- msg + } + conn.outLck.RUnlock() + } else { + conn.outLck.RLock() + if conn.closed { + call = &Call{Err: ErrClosed} + } else { + conn.out <- msg + call = &Call{Err: nil} + } + conn.outLck.RUnlock() + } + return call +} + +// sendError creates an error message corresponding to the parameters and sends +// it to conn.out. +func (conn *Conn) sendError(e Error, dest string, serial uint32) { + msg := new(Message) + msg.Type = TypeError + msg.serial = conn.getSerial() + msg.Headers = make(map[HeaderField]Variant) + if dest != "" { + msg.Headers[FieldDestination] = MakeVariant(dest) + } + msg.Headers[FieldErrorName] = MakeVariant(e.Name) + msg.Headers[FieldReplySerial] = MakeVariant(serial) + msg.Body = e.Body + if len(e.Body) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...)) + } + conn.outLck.RLock() + if !conn.closed { + conn.out <- msg + } + conn.outLck.RUnlock() +} + +// sendReply creates a method reply message corresponding to the parameters and +// sends it to conn.out. +func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) { + msg := new(Message) + msg.Type = TypeMethodReply + msg.serial = conn.getSerial() + msg.Headers = make(map[HeaderField]Variant) + if dest != "" { + msg.Headers[FieldDestination] = MakeVariant(dest) + } + msg.Headers[FieldReplySerial] = MakeVariant(serial) + msg.Body = values + if len(values) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) + } + conn.outLck.RLock() + if !conn.closed { + conn.out <- msg + } + conn.outLck.RUnlock() +} + +// Signal registers the given channel to be passed all received signal messages. +// The caller has to make sure that ch is sufficiently buffered; if a message +// arrives when a write to c is not possible, it is discarded. +// +// Multiple of these channels can be registered at the same time. Passing a +// channel that already is registered will remove it from the list of the +// registered channels. +// +// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a +// channel for eavesdropped messages, this channel receives all signals, and +// none of the channels passed to Signal will receive any signals. +func (conn *Conn) Signal(ch chan<- *Signal) { + conn.signalsLck.Lock() + conn.signals = append(conn.signals, ch) + conn.signalsLck.Unlock() +} + +// SupportsUnixFDs returns whether the underlying transport supports passing of +// unix file descriptors. If this is false, method calls containing unix file +// descriptors will return an error and emitted signals containing them will +// not be sent. +func (conn *Conn) SupportsUnixFDs() bool { + return conn.unixFD +} + +// Error represents a D-Bus message of type Error. +type Error struct { + Name string + Body []interface{} +} + +func (e Error) Error() string { + if len(e.Body) >= 1 { + s, ok := e.Body[0].(string) + if ok { + return s + } + } + return e.Name +} + +// Signal represents a D-Bus message of type Signal. The name member is given in +// "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost. +type Signal struct { + Sender string + Path ObjectPath + Name string + Body []interface{} +} + +// transport is a D-Bus transport. +type transport interface { + // Read and Write raw data (for example, for the authentication protocol). + io.ReadWriteCloser + + // Send the initial null byte used for the EXTERNAL mechanism. + SendNullByte() error + + // Returns whether this transport supports passing Unix FDs. + SupportsUnixFDs() bool + + // Signal the transport that Unix FD passing is enabled for this connection. + EnableUnixFDs() + + // Read / send a message, handling things like Unix FDs. + ReadMessage() (*Message, error) + SendMessage(*Message) error +} + +func getTransport(address string) (transport, error) { + var err error + var t transport + + m := map[string]func(string) (transport, error){ + "unix": newUnixTransport, + } + addresses := strings.Split(address, ";") + for _, v := range addresses { + i := strings.IndexRune(v, ':') + if i == -1 { + err = errors.New("dbus: invalid bus address (no transport)") + continue + } + f := m[v[:i]] + if f == nil { + err = errors.New("dbus: invalid bus address (invalid or unsupported transport)") + } + t, err = f(v[i+1:]) + if err == nil { + return t, nil + } + } + return nil, err +} + +// dereferenceAll returns a slice that, assuming that vs is a slice of pointers +// of arbitrary types, containes the values that are obtained from dereferencing +// all elements in vs. +func dereferenceAll(vs []interface{}) []interface{} { + for i := range vs { + v := reflect.ValueOf(vs[i]) + v = v.Elem() + vs[i] = v.Interface() + } + return vs +} + +// getKey gets a key from a the list of keys. Returns "" on error / not found... +func getKey(s, key string) string { + i := strings.Index(s, key) + if i == -1 { + return "" + } + if i+len(key)+1 >= len(s) || s[i+len(key)] != '=' { + return "" + } + j := strings.Index(s, ",") + if j == -1 { + j = len(s) + } + return s[i+len(key)+1 : j] +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/conn_darwin.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/conn_darwin.go new file mode 100644 index 00000000000..b67bb1b81da --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/conn_darwin.go @@ -0,0 +1,21 @@ +package dbus + +import ( + "errors" + "os/exec" +) + +func sessionBusPlatform() (*Conn, error) { + cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET") + b, err := cmd.CombinedOutput() + + if err != nil { + return nil, err + } + + if len(b) == 0 { + return nil, errors.New("dbus: couldn't determine address of session bus") + } + + return Dial("unix:path=" + string(b[:len(b)-1])) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/conn_other.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/conn_other.go new file mode 100644 index 00000000000..f74b8758d44 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/conn_other.go @@ -0,0 +1,27 @@ +// +build !darwin + +package dbus + +import ( + "bytes" + "errors" + "os/exec" +) + +func sessionBusPlatform() (*Conn, error) { + cmd := exec.Command("dbus-launch") + b, err := cmd.CombinedOutput() + + if err != nil { + return nil, err + } + + i := bytes.IndexByte(b, '=') + j := bytes.IndexByte(b, '\n') + + if i == -1 || j == -1 { + return nil, errors.New("dbus: couldn't determine address of session bus") + } + + return Dial(string(b[i+1 : j])) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/conn_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/conn_test.go new file mode 100644 index 00000000000..a2b14e8cc46 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/conn_test.go @@ -0,0 +1,199 @@ +package dbus + +import "testing" + +func TestSessionBus(t *testing.T) { + _, err := SessionBus() + if err != nil { + t.Error(err) + } +} + +func TestSystemBus(t *testing.T) { + _, err := SystemBus() + if err != nil { + t.Error(err) + } +} + +func TestSend(t *testing.T) { + bus, err := SessionBus() + if err != nil { + t.Error(err) + } + ch := make(chan *Call, 1) + msg := &Message{ + Type: TypeMethodCall, + Flags: 0, + Headers: map[HeaderField]Variant{ + FieldDestination: MakeVariant(bus.Names()[0]), + FieldPath: MakeVariant(ObjectPath("/org/freedesktop/DBus")), + FieldInterface: MakeVariant("org.freedesktop.DBus.Peer"), + FieldMember: MakeVariant("Ping"), + }, + } + call := bus.Send(msg, ch) + <-ch + if call.Err != nil { + t.Error(call.Err) + } +} + +type server struct{} + +func (server) Double(i int64) (int64, *Error) { + return 2 * i, nil +} + +func BenchmarkCall(b *testing.B) { + b.StopTimer() + var s string + bus, err := SessionBus() + if err != nil { + b.Fatal(err) + } + name := bus.Names()[0] + obj := bus.BusObject() + b.StartTimer() + for i := 0; i < b.N; i++ { + err := obj.Call("org.freedesktop.DBus.GetNameOwner", 0, name).Store(&s) + if err != nil { + b.Fatal(err) + } + if s != name { + b.Errorf("got %s, wanted %s", s, name) + } + } +} + +func BenchmarkCallAsync(b *testing.B) { + b.StopTimer() + bus, err := SessionBus() + if err != nil { + b.Fatal(err) + } + name := bus.Names()[0] + obj := bus.BusObject() + c := make(chan *Call, 50) + done := make(chan struct{}) + go func() { + for i := 0; i < b.N; i++ { + v := <-c + if v.Err != nil { + b.Error(v.Err) + } + s := v.Body[0].(string) + if s != name { + b.Errorf("got %s, wanted %s", s, name) + } + } + close(done) + }() + b.StartTimer() + for i := 0; i < b.N; i++ { + obj.Go("org.freedesktop.DBus.GetNameOwner", 0, c, name) + } + <-done +} + +func BenchmarkServe(b *testing.B) { + b.StopTimer() + srv, err := SessionBus() + if err != nil { + b.Fatal(err) + } + cli, err := SessionBusPrivate() + if err != nil { + b.Fatal(err) + } + if err = cli.Auth(nil); err != nil { + b.Fatal(err) + } + if err = cli.Hello(); err != nil { + b.Fatal(err) + } + benchmarkServe(b, srv, cli) +} + +func BenchmarkServeAsync(b *testing.B) { + b.StopTimer() + srv, err := SessionBus() + if err != nil { + b.Fatal(err) + } + cli, err := SessionBusPrivate() + if err != nil { + b.Fatal(err) + } + if err = cli.Auth(nil); err != nil { + b.Fatal(err) + } + if err = cli.Hello(); err != nil { + b.Fatal(err) + } + benchmarkServeAsync(b, srv, cli) +} + +func BenchmarkServeSameConn(b *testing.B) { + b.StopTimer() + bus, err := SessionBus() + if err != nil { + b.Fatal(err) + } + + benchmarkServe(b, bus, bus) +} + +func BenchmarkServeSameConnAsync(b *testing.B) { + b.StopTimer() + bus, err := SessionBus() + if err != nil { + b.Fatal(err) + } + + benchmarkServeAsync(b, bus, bus) +} + +func benchmarkServe(b *testing.B, srv, cli *Conn) { + var r int64 + var err error + dest := srv.Names()[0] + srv.Export(server{}, "/org/guelfey/DBus/Test", "org.guelfey.DBus.Test") + obj := cli.Object(dest, "/org/guelfey/DBus/Test") + b.StartTimer() + for i := 0; i < b.N; i++ { + err = obj.Call("org.guelfey.DBus.Test.Double", 0, int64(i)).Store(&r) + if err != nil { + b.Fatal(err) + } + if r != 2*int64(i) { + b.Errorf("got %d, wanted %d", r, 2*int64(i)) + } + } +} + +func benchmarkServeAsync(b *testing.B, srv, cli *Conn) { + dest := srv.Names()[0] + srv.Export(server{}, "/org/guelfey/DBus/Test", "org.guelfey.DBus.Test") + obj := cli.Object(dest, "/org/guelfey/DBus/Test") + c := make(chan *Call, 50) + done := make(chan struct{}) + go func() { + for i := 0; i < b.N; i++ { + v := <-c + if v.Err != nil { + b.Fatal(v.Err) + } + i, r := v.Args[0].(int64), v.Body[0].(int64) + if 2*i != r { + b.Errorf("got %d, wanted %d", r, 2*i) + } + } + close(done) + }() + b.StartTimer() + for i := 0; i < b.N; i++ { + obj.Go("org.guelfey.DBus.Test.Double", 0, c, int64(i)) + } + <-done +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/dbus.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/dbus.go new file mode 100644 index 00000000000..2ce68735cdf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/dbus.go @@ -0,0 +1,258 @@ +package dbus + +import ( + "errors" + "reflect" + "strings" +) + +var ( + byteType = reflect.TypeOf(byte(0)) + boolType = reflect.TypeOf(false) + uint8Type = reflect.TypeOf(uint8(0)) + int16Type = reflect.TypeOf(int16(0)) + uint16Type = reflect.TypeOf(uint16(0)) + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) + stringType = reflect.TypeOf("") + signatureType = reflect.TypeOf(Signature{""}) + objectPathType = reflect.TypeOf(ObjectPath("")) + variantType = reflect.TypeOf(Variant{Signature{""}, nil}) + interfacesType = reflect.TypeOf([]interface{}{}) + unixFDType = reflect.TypeOf(UnixFD(0)) + unixFDIndexType = reflect.TypeOf(UnixFDIndex(0)) +) + +// An InvalidTypeError signals that a value which cannot be represented in the +// D-Bus wire format was passed to a function. +type InvalidTypeError struct { + Type reflect.Type +} + +func (e InvalidTypeError) Error() string { + return "dbus: invalid type " + e.Type.String() +} + +// Store copies the values contained in src to dest, which must be a slice of +// pointers. It converts slices of interfaces from src to corresponding structs +// in dest. An error is returned if the lengths of src and dest or the types of +// their elements don't match. +func Store(src []interface{}, dest ...interface{}) error { + if len(src) != len(dest) { + return errors.New("dbus.Store: length mismatch") + } + + for i := range src { + if err := store(src[i], dest[i]); err != nil { + return err + } + } + return nil +} + +func store(src, dest interface{}) error { + if reflect.TypeOf(dest).Elem() == reflect.TypeOf(src) { + reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src)) + return nil + } else if hasStruct(dest) { + rv := reflect.ValueOf(dest).Elem() + switch rv.Kind() { + case reflect.Struct: + vs, ok := src.([]interface{}) + if !ok { + return errors.New("dbus.Store: type mismatch") + } + t := rv.Type() + ndest := make([]interface{}, 0, rv.NumField()) + for i := 0; i < rv.NumField(); i++ { + field := t.Field(i) + if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { + ndest = append(ndest, rv.Field(i).Addr().Interface()) + } + } + if len(vs) != len(ndest) { + return errors.New("dbus.Store: type mismatch") + } + err := Store(vs, ndest...) + if err != nil { + return errors.New("dbus.Store: type mismatch") + } + case reflect.Slice: + sv := reflect.ValueOf(src) + if sv.Kind() != reflect.Slice { + return errors.New("dbus.Store: type mismatch") + } + rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len())) + for i := 0; i < sv.Len(); i++ { + if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil { + return err + } + } + case reflect.Map: + sv := reflect.ValueOf(src) + if sv.Kind() != reflect.Map { + return errors.New("dbus.Store: type mismatch") + } + keys := sv.MapKeys() + rv.Set(reflect.MakeMap(sv.Type())) + for _, key := range keys { + v := reflect.New(sv.Type().Elem()) + if err := store(v, sv.MapIndex(key).Interface()); err != nil { + return err + } + rv.SetMapIndex(key, v.Elem()) + } + default: + return errors.New("dbus.Store: type mismatch") + } + return nil + } else { + return errors.New("dbus.Store: type mismatch") + } +} + +func hasStruct(v interface{}) bool { + t := reflect.TypeOf(v) + for { + switch t.Kind() { + case reflect.Struct: + return true + case reflect.Slice, reflect.Ptr, reflect.Map: + t = t.Elem() + default: + return false + } + } +} + +// An ObjectPath is an object path as defined by the D-Bus spec. +type ObjectPath string + +// IsValid returns whether the object path is valid. +func (o ObjectPath) IsValid() bool { + s := string(o) + if len(s) == 0 { + return false + } + if s[0] != '/' { + return false + } + if s[len(s)-1] == '/' && len(s) != 1 { + return false + } + // probably not used, but technically possible + if s == "/" { + return true + } + split := strings.Split(s[1:], "/") + for _, v := range split { + if len(v) == 0 { + return false + } + for _, c := range v { + if !isMemberChar(c) { + return false + } + } + } + return true +} + +// A UnixFD is a Unix file descriptor sent over the wire. See the package-level +// documentation for more information about Unix file descriptor passsing. +type UnixFD int32 + +// A UnixFDIndex is the representation of a Unix file descriptor in a message. +type UnixFDIndex uint32 + +// alignment returns the alignment of values of type t. +func alignment(t reflect.Type) int { + switch t { + case variantType: + return 1 + case objectPathType: + return 4 + case signatureType: + return 1 + case interfacesType: // sometimes used for structs + return 8 + } + switch t.Kind() { + case reflect.Uint8: + return 1 + case reflect.Uint16, reflect.Int16: + return 2 + case reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map: + return 4 + case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct: + return 8 + case reflect.Ptr: + return alignment(t.Elem()) + } + return 1 +} + +// isKeyType returns whether t is a valid type for a D-Bus dict. +func isKeyType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64, + reflect.String: + + return true + } + return false +} + +// isValidInterface returns whether s is a valid name for an interface. +func isValidInterface(s string) bool { + if len(s) == 0 || len(s) > 255 || s[0] == '.' { + return false + } + elem := strings.Split(s, ".") + if len(elem) < 2 { + return false + } + for _, v := range elem { + if len(v) == 0 { + return false + } + if v[0] >= '0' && v[0] <= '9' { + return false + } + for _, c := range v { + if !isMemberChar(c) { + return false + } + } + } + return true +} + +// isValidMember returns whether s is a valid name for a member. +func isValidMember(s string) bool { + if len(s) == 0 || len(s) > 255 { + return false + } + i := strings.Index(s, ".") + if i != -1 { + return false + } + if s[0] >= '0' && s[0] <= '9' { + return false + } + for _, c := range s { + if !isMemberChar(c) { + return false + } + } + return true +} + +func isMemberChar(c rune) bool { + return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') || + (c >= 'a' && c <= 'z') || c == '_' +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/decoder.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/decoder.go new file mode 100644 index 00000000000..ef50dcab98d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/decoder.go @@ -0,0 +1,228 @@ +package dbus + +import ( + "encoding/binary" + "io" + "reflect" +) + +type decoder struct { + in io.Reader + order binary.ByteOrder + pos int +} + +// newDecoder returns a new decoder that reads values from in. The input is +// expected to be in the given byte order. +func newDecoder(in io.Reader, order binary.ByteOrder) *decoder { + dec := new(decoder) + dec.in = in + dec.order = order + return dec +} + +// align aligns the input to the given boundary and panics on error. +func (dec *decoder) align(n int) { + if dec.pos%n != 0 { + newpos := (dec.pos + n - 1) & ^(n - 1) + empty := make([]byte, newpos-dec.pos) + if _, err := io.ReadFull(dec.in, empty); err != nil { + panic(err) + } + dec.pos = newpos + } +} + +// Calls binary.Read(dec.in, dec.order, v) and panics on read errors. +func (dec *decoder) binread(v interface{}) { + if err := binary.Read(dec.in, dec.order, v); err != nil { + panic(err) + } +} + +func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) { + defer func() { + var ok bool + v := recover() + if err, ok = v.(error); ok { + if err == io.EOF || err == io.ErrUnexpectedEOF { + err = FormatError("unexpected EOF") + } + } + }() + vs = make([]interface{}, 0) + s := sig.str + for s != "" { + err, rem := validSingle(s, 0) + if err != nil { + return nil, err + } + v := dec.decode(s[:len(s)-len(rem)], 0) + vs = append(vs, v) + s = rem + } + return vs, nil +} + +func (dec *decoder) decode(s string, depth int) interface{} { + dec.align(alignment(typeFor(s))) + switch s[0] { + case 'y': + var b [1]byte + if _, err := dec.in.Read(b[:]); err != nil { + panic(err) + } + dec.pos++ + return b[0] + case 'b': + i := dec.decode("u", depth).(uint32) + switch { + case i == 0: + return false + case i == 1: + return true + default: + panic(FormatError("invalid value for boolean")) + } + case 'n': + var i int16 + dec.binread(&i) + dec.pos += 2 + return i + case 'i': + var i int32 + dec.binread(&i) + dec.pos += 4 + return i + case 'x': + var i int64 + dec.binread(&i) + dec.pos += 8 + return i + case 'q': + var i uint16 + dec.binread(&i) + dec.pos += 2 + return i + case 'u': + var i uint32 + dec.binread(&i) + dec.pos += 4 + return i + case 't': + var i uint64 + dec.binread(&i) + dec.pos += 8 + return i + case 'd': + var f float64 + dec.binread(&f) + dec.pos += 8 + return f + case 's': + length := dec.decode("u", depth).(uint32) + b := make([]byte, int(length)+1) + if _, err := io.ReadFull(dec.in, b); err != nil { + panic(err) + } + dec.pos += int(length) + 1 + return string(b[:len(b)-1]) + case 'o': + return ObjectPath(dec.decode("s", depth).(string)) + case 'g': + length := dec.decode("y", depth).(byte) + b := make([]byte, int(length)+1) + if _, err := io.ReadFull(dec.in, b); err != nil { + panic(err) + } + dec.pos += int(length) + 1 + sig, err := ParseSignature(string(b[:len(b)-1])) + if err != nil { + panic(err) + } + return sig + case 'v': + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + var variant Variant + sig := dec.decode("g", depth).(Signature) + if len(sig.str) == 0 { + panic(FormatError("variant signature is empty")) + } + err, rem := validSingle(sig.str, 0) + if err != nil { + panic(err) + } + if rem != "" { + panic(FormatError("variant signature has multiple types")) + } + variant.sig = sig + variant.value = dec.decode(sig.str, depth+1) + return variant + case 'h': + return UnixFDIndex(dec.decode("u", depth).(uint32)) + case 'a': + if len(s) > 1 && s[1] == '{' { + ksig := s[2:3] + vsig := s[3 : len(s)-1] + v := reflect.MakeMap(reflect.MapOf(typeFor(ksig), typeFor(vsig))) + if depth >= 63 { + panic(FormatError("input exceeds container depth limit")) + } + length := dec.decode("u", depth).(uint32) + // Even for empty maps, the correct padding must be included + dec.align(8) + spos := dec.pos + for dec.pos < spos+int(length) { + dec.align(8) + if !isKeyType(v.Type().Key()) { + panic(InvalidTypeError{v.Type()}) + } + kv := dec.decode(ksig, depth+2) + vv := dec.decode(vsig, depth+2) + v.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv)) + } + return v.Interface() + } + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + length := dec.decode("u", depth).(uint32) + v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length)) + // Even for empty arrays, the correct padding must be included + dec.align(alignment(typeFor(s[1:]))) + spos := dec.pos + for dec.pos < spos+int(length) { + ev := dec.decode(s[1:], depth+1) + v = reflect.Append(v, reflect.ValueOf(ev)) + } + return v.Interface() + case '(': + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + dec.align(8) + v := make([]interface{}, 0) + s = s[1 : len(s)-1] + for s != "" { + err, rem := validSingle(s, 0) + if err != nil { + panic(err) + } + ev := dec.decode(s[:len(s)-len(rem)], depth+1) + v = append(v, ev) + s = rem + } + return v + default: + panic(SignatureError{Sig: s}) + } +} + +// A FormatError is an error in the wire format. +type FormatError string + +func (e FormatError) Error() string { + return "dbus: wire format error: " + string(e) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/doc.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/doc.go new file mode 100644 index 00000000000..deff554a381 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/doc.go @@ -0,0 +1,63 @@ +/* +Package dbus implements bindings to the D-Bus message bus system. + +To use the message bus API, you first need to connect to a bus (usually the +session or system bus). The acquired connection then can be used to call methods +on remote objects and emit or receive signals. Using the Export method, you can +arrange D-Bus methods calls to be directly translated to method calls on a Go +value. + +Conversion Rules + +For outgoing messages, Go types are automatically converted to the +corresponding D-Bus types. The following types are directly encoded as their +respective D-Bus equivalents: + + Go type | D-Bus type + ------------+----------- + byte | BYTE + bool | BOOLEAN + int16 | INT16 + uint16 | UINT16 + int32 | INT32 + uint32 | UINT32 + int64 | INT64 + uint64 | UINT64 + float64 | DOUBLE + string | STRING + ObjectPath | OBJECT_PATH + Signature | SIGNATURE + Variant | VARIANT + UnixFDIndex | UNIX_FD + +Slices and arrays encode as ARRAYs of their element type. + +Maps encode as DICTs, provided that their key type can be used as a key for +a DICT. + +Structs other than Variant and Signature encode as a STRUCT containing their +exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will +be skipped. + +Pointers encode as the value they're pointed to. + +Trying to encode any other type or a slice, map or struct containing an +unsupported type will result in an InvalidTypeError. + +For incoming messages, the inverse of these rules are used, with the exception +of STRUCTs. Incoming STRUCTS are represented as a slice of empty interfaces +containing the struct fields in the correct order. The Store function can be +used to convert such values to Go structs. + +Unix FD passing + +Handling Unix file descriptors deserves special mention. To use them, you should +first check that they are supported on a connection by calling SupportsUnixFDs. +If it returns true, all method of Connection will translate messages containing +UnixFD's to messages that are accompanied by the given file descriptors with the +UnixFD values being substituted by the correct indices. Similarily, the indices +of incoming messages are automatically resolved. It shouldn't be necessary to use +UnixFDIndex. + +*/ +package dbus diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/encoder.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/encoder.go new file mode 100644 index 00000000000..f9d2f057160 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/encoder.go @@ -0,0 +1,179 @@ +package dbus + +import ( + "bytes" + "encoding/binary" + "io" + "reflect" +) + +// An encoder encodes values to the D-Bus wire format. +type encoder struct { + out io.Writer + order binary.ByteOrder + pos int +} + +// NewEncoder returns a new encoder that writes to out in the given byte order. +func newEncoder(out io.Writer, order binary.ByteOrder) *encoder { + enc := new(encoder) + enc.out = out + enc.order = order + return enc +} + +// Aligns the next output to be on a multiple of n. Panics on write errors. +func (enc *encoder) align(n int) { + if enc.pos%n != 0 { + newpos := (enc.pos + n - 1) & ^(n - 1) + empty := make([]byte, newpos-enc.pos) + if _, err := enc.out.Write(empty); err != nil { + panic(err) + } + enc.pos = newpos + } +} + +// Calls binary.Write(enc.out, enc.order, v) and panics on write errors. +func (enc *encoder) binwrite(v interface{}) { + if err := binary.Write(enc.out, enc.order, v); err != nil { + panic(err) + } +} + +// Encode encodes the given values to the underyling reader. All written values +// are aligned properly as required by the D-Bus spec. +func (enc *encoder) Encode(vs ...interface{}) (err error) { + defer func() { + err, _ = recover().(error) + }() + for _, v := range vs { + enc.encode(reflect.ValueOf(v), 0) + } + return nil +} + +// encode encodes the given value to the writer and panics on error. depth holds +// the depth of the container nesting. +func (enc *encoder) encode(v reflect.Value, depth int) { + enc.align(alignment(v.Type())) + switch v.Kind() { + case reflect.Uint8: + var b [1]byte + b[0] = byte(v.Uint()) + if _, err := enc.out.Write(b[:]); err != nil { + panic(err) + } + enc.pos++ + case reflect.Bool: + if v.Bool() { + enc.encode(reflect.ValueOf(uint32(1)), depth) + } else { + enc.encode(reflect.ValueOf(uint32(0)), depth) + } + case reflect.Int16: + enc.binwrite(int16(v.Int())) + enc.pos += 2 + case reflect.Uint16: + enc.binwrite(uint16(v.Uint())) + enc.pos += 2 + case reflect.Int32: + enc.binwrite(int32(v.Int())) + enc.pos += 4 + case reflect.Uint32: + enc.binwrite(uint32(v.Uint())) + enc.pos += 4 + case reflect.Int64: + enc.binwrite(v.Int()) + enc.pos += 8 + case reflect.Uint64: + enc.binwrite(v.Uint()) + enc.pos += 8 + case reflect.Float64: + enc.binwrite(v.Float()) + enc.pos += 8 + case reflect.String: + enc.encode(reflect.ValueOf(uint32(len(v.String()))), depth) + b := make([]byte, v.Len()+1) + copy(b, v.String()) + b[len(b)-1] = 0 + n, err := enc.out.Write(b) + if err != nil { + panic(err) + } + enc.pos += n + case reflect.Ptr: + enc.encode(v.Elem(), depth) + case reflect.Slice, reflect.Array: + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + var buf bytes.Buffer + bufenc := newEncoder(&buf, enc.order) + + for i := 0; i < v.Len(); i++ { + bufenc.encode(v.Index(i), depth+1) + } + enc.encode(reflect.ValueOf(uint32(buf.Len())), depth) + length := buf.Len() + enc.align(alignment(v.Type().Elem())) + if _, err := buf.WriteTo(enc.out); err != nil { + panic(err) + } + enc.pos += length + case reflect.Struct: + if depth >= 64 && v.Type() != signatureType { + panic(FormatError("input exceeds container depth limit")) + } + switch t := v.Type(); t { + case signatureType: + str := v.Field(0) + enc.encode(reflect.ValueOf(byte(str.Len())), depth+1) + b := make([]byte, str.Len()+1) + copy(b, str.String()) + b[len(b)-1] = 0 + n, err := enc.out.Write(b) + if err != nil { + panic(err) + } + enc.pos += n + case variantType: + variant := v.Interface().(Variant) + enc.encode(reflect.ValueOf(variant.sig), depth+1) + enc.encode(reflect.ValueOf(variant.value), depth+1) + default: + for i := 0; i < v.Type().NumField(); i++ { + field := t.Field(i) + if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { + enc.encode(v.Field(i), depth+1) + } + } + } + case reflect.Map: + // Maps are arrays of structures, so they actually increase the depth by + // 2. + if depth >= 63 { + panic(FormatError("input exceeds container depth limit")) + } + if !isKeyType(v.Type().Key()) { + panic(InvalidTypeError{v.Type()}) + } + keys := v.MapKeys() + var buf bytes.Buffer + bufenc := newEncoder(&buf, enc.order) + for _, k := range keys { + bufenc.align(8) + bufenc.encode(k, depth+2) + bufenc.encode(v.MapIndex(k), depth+2) + } + enc.encode(reflect.ValueOf(uint32(buf.Len())), depth) + length := buf.Len() + enc.align(8) + if _, err := buf.WriteTo(enc.out); err != nil { + panic(err) + } + enc.pos += length + default: + panic(InvalidTypeError{v.Type()}) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/examples_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/examples_test.go new file mode 100644 index 00000000000..0218ac55986 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/examples_test.go @@ -0,0 +1,50 @@ +package dbus + +import "fmt" + +func ExampleConn_Emit() { + conn, err := SessionBus() + if err != nil { + panic(err) + } + + conn.Emit("/foo/bar", "foo.bar.Baz", uint32(0xDAEDBEEF)) +} + +func ExampleObject_Call() { + var list []string + + conn, err := SessionBus() + if err != nil { + panic(err) + } + + err = conn.BusObject().Call("org.freedesktop.DBus.ListNames", 0).Store(&list) + if err != nil { + panic(err) + } + for _, v := range list { + fmt.Println(v) + } +} + +func ExampleObject_Go() { + conn, err := SessionBus() + if err != nil { + panic(err) + } + + ch := make(chan *Call, 10) + conn.BusObject().Go("org.freedesktop.DBus.ListActivatableNames", 0, ch) + select { + case call := <-ch: + if call.Err != nil { + panic(err) + } + list := call.Body[0].([]string) + for _, v := range list { + fmt.Println(v) + } + // put some other cases here + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/export.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/export.go new file mode 100644 index 00000000000..1dd15915280 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/export.go @@ -0,0 +1,302 @@ +package dbus + +import ( + "errors" + "reflect" + "strings" + "unicode" +) + +var ( + errmsgInvalidArg = Error{ + "org.freedesktop.DBus.Error.InvalidArgs", + []interface{}{"Invalid type / number of args"}, + } + errmsgNoObject = Error{ + "org.freedesktop.DBus.Error.NoSuchObject", + []interface{}{"No such object"}, + } + errmsgUnknownMethod = Error{ + "org.freedesktop.DBus.Error.UnknownMethod", + []interface{}{"Unknown / invalid method"}, + } +) + +// Sender is a type which can be used in exported methods to receive the message +// sender. +type Sender string + +func exportedMethod(v interface{}, name string) reflect.Value { + if v == nil { + return reflect.Value{} + } + m := reflect.ValueOf(v).MethodByName(name) + if !m.IsValid() { + return reflect.Value{} + } + t := m.Type() + if t.NumOut() == 0 || + t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) { + + return reflect.Value{} + } + return m +} + +// handleCall handles the given method call (i.e. looks if it's one of the +// pre-implemented ones and searches for a corresponding handler if not). +func (conn *Conn) handleCall(msg *Message) { + name := msg.Headers[FieldMember].value.(string) + path := msg.Headers[FieldPath].value.(ObjectPath) + ifaceName, hasIface := msg.Headers[FieldInterface].value.(string) + sender, hasSender := msg.Headers[FieldSender].value.(string) + serial := msg.serial + if ifaceName == "org.freedesktop.DBus.Peer" { + switch name { + case "Ping": + conn.sendReply(sender, serial) + case "GetMachineId": + conn.sendReply(sender, serial, conn.uuid) + default: + conn.sendError(errmsgUnknownMethod, sender, serial) + } + return + } + if len(name) == 0 || unicode.IsLower([]rune(name)[0]) { + conn.sendError(errmsgUnknownMethod, sender, serial) + } + var m reflect.Value + if hasIface { + conn.handlersLck.RLock() + obj, ok := conn.handlers[path] + if !ok { + conn.sendError(errmsgNoObject, sender, serial) + conn.handlersLck.RUnlock() + return + } + iface := obj[ifaceName] + conn.handlersLck.RUnlock() + m = exportedMethod(iface, name) + } else { + conn.handlersLck.RLock() + if _, ok := conn.handlers[path]; !ok { + conn.sendError(errmsgNoObject, sender, serial) + conn.handlersLck.RUnlock() + return + } + for _, v := range conn.handlers[path] { + m = exportedMethod(v, name) + if m.IsValid() { + break + } + } + conn.handlersLck.RUnlock() + } + if !m.IsValid() { + conn.sendError(errmsgUnknownMethod, sender, serial) + return + } + t := m.Type() + vs := msg.Body + pointers := make([]interface{}, t.NumIn()) + decode := make([]interface{}, 0, len(vs)) + for i := 0; i < t.NumIn(); i++ { + tp := t.In(i) + val := reflect.New(tp) + pointers[i] = val.Interface() + if tp == reflect.TypeOf((*Sender)(nil)).Elem() { + val.Elem().SetString(sender) + } else { + decode = append(decode, pointers[i]) + } + } + if len(decode) != len(vs) { + conn.sendError(errmsgInvalidArg, sender, serial) + return + } + if err := Store(vs, decode...); err != nil { + conn.sendError(errmsgInvalidArg, sender, serial) + return + } + params := make([]reflect.Value, len(pointers)) + for i := 0; i < len(pointers); i++ { + params[i] = reflect.ValueOf(pointers[i]).Elem() + } + ret := m.Call(params) + if em := ret[t.NumOut()-1].Interface().(*Error); em != nil { + conn.sendError(*em, sender, serial) + return + } + if msg.Flags&FlagNoReplyExpected == 0 { + reply := new(Message) + reply.Type = TypeMethodReply + reply.serial = conn.getSerial() + reply.Headers = make(map[HeaderField]Variant) + if hasSender { + reply.Headers[FieldDestination] = msg.Headers[FieldSender] + } + reply.Headers[FieldReplySerial] = MakeVariant(msg.serial) + reply.Body = make([]interface{}, len(ret)-1) + for i := 0; i < len(ret)-1; i++ { + reply.Body[i] = ret[i].Interface() + } + if len(ret) != 1 { + reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...)) + } + conn.outLck.RLock() + if !conn.closed { + conn.out <- reply + } + conn.outLck.RUnlock() + } +} + +// Emit emits the given signal on the message bus. The name parameter must be +// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost". +func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error { + if !path.IsValid() { + return errors.New("dbus: invalid object path") + } + i := strings.LastIndex(name, ".") + if i == -1 { + return errors.New("dbus: invalid method name") + } + iface := name[:i] + member := name[i+1:] + if !isValidMember(member) { + return errors.New("dbus: invalid method name") + } + if !isValidInterface(iface) { + return errors.New("dbus: invalid interface name") + } + msg := new(Message) + msg.Type = TypeSignal + msg.serial = conn.getSerial() + msg.Headers = make(map[HeaderField]Variant) + msg.Headers[FieldInterface] = MakeVariant(iface) + msg.Headers[FieldMember] = MakeVariant(member) + msg.Headers[FieldPath] = MakeVariant(path) + msg.Body = values + if len(values) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) + } + conn.outLck.RLock() + defer conn.outLck.RUnlock() + if conn.closed { + return ErrClosed + } + conn.out <- msg + return nil +} + +// Export registers the given value to be exported as an object on the +// message bus. +// +// If a method call on the given path and interface is received, an exported +// method with the same name is called with v as the receiver if the +// parameters match and the last return value is of type *Error. If this +// *Error is not nil, it is sent back to the caller as an error. +// Otherwise, a method reply is sent with the other return values as its body. +// +// Any parameters with the special type Sender are set to the sender of the +// dbus message when the method is called. Parameters of this type do not +// contribute to the dbus signature of the method (i.e. the method is exposed +// as if the parameters of type Sender were not there). +// +// Every method call is executed in a new goroutine, so the method may be called +// in multiple goroutines at once. +// +// Method calls on the interface org.freedesktop.DBus.Peer will be automatically +// handled for every object. +// +// Passing nil as the first parameter will cause conn to cease handling calls on +// the given combination of path and interface. +// +// Export returns an error if path is not a valid path name. +func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error { + if !path.IsValid() { + return errors.New("dbus: invalid path name") + } + conn.handlersLck.Lock() + if v == nil { + if _, ok := conn.handlers[path]; ok { + delete(conn.handlers[path], iface) + if len(conn.handlers[path]) == 0 { + delete(conn.handlers, path) + } + } + return nil + } + if _, ok := conn.handlers[path]; !ok { + conn.handlers[path] = make(map[string]interface{}) + } + conn.handlers[path][iface] = v + conn.handlersLck.Unlock() + return nil +} + +// ReleaseName calls org.freedesktop.DBus.ReleaseName. You should use only this +// method to release a name (see below). +func (conn *Conn) ReleaseName(name string) (ReleaseNameReply, error) { + var r uint32 + err := conn.busObj.Call("org.freedesktop.DBus.ReleaseName", 0, name).Store(&r) + if err != nil { + return 0, err + } + if r == uint32(ReleaseNameReplyReleased) { + conn.namesLck.Lock() + for i, v := range conn.names { + if v == name { + copy(conn.names[i:], conn.names[i+1:]) + conn.names = conn.names[:len(conn.names)-1] + } + } + conn.namesLck.Unlock() + } + return ReleaseNameReply(r), nil +} + +// RequestName calls org.freedesktop.DBus.RequestName. You should use only this +// method to request a name because package dbus needs to keep track of all +// names that the connection has. +func (conn *Conn) RequestName(name string, flags RequestNameFlags) (RequestNameReply, error) { + var r uint32 + err := conn.busObj.Call("org.freedesktop.DBus.RequestName", 0, name, flags).Store(&r) + if err != nil { + return 0, err + } + if r == uint32(RequestNameReplyPrimaryOwner) { + conn.namesLck.Lock() + conn.names = append(conn.names, name) + conn.namesLck.Unlock() + } + return RequestNameReply(r), nil +} + +// ReleaseNameReply is the reply to a ReleaseName call. +type ReleaseNameReply uint32 + +const ( + ReleaseNameReplyReleased ReleaseNameReply = 1 + iota + ReleaseNameReplyNonExistent + ReleaseNameReplyNotOwner +) + +// RequestNameFlags represents the possible flags for a RequestName call. +type RequestNameFlags uint32 + +const ( + NameFlagAllowReplacement RequestNameFlags = 1 << iota + NameFlagReplaceExisting + NameFlagDoNotQueue +) + +// RequestNameReply is the reply to a RequestName call. +type RequestNameReply uint32 + +const ( + RequestNameReplyPrimaryOwner RequestNameReply = 1 + iota + RequestNameReplyInQueue + RequestNameReplyExists + RequestNameReplyAlreadyOwner +) diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/homedir.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/homedir.go new file mode 100644 index 00000000000..0b745f9313a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/homedir.go @@ -0,0 +1,28 @@ +package dbus + +import ( + "os" + "sync" +) + +var ( + homeDir string + homeDirLock sync.Mutex +) + +func getHomeDir() string { + homeDirLock.Lock() + defer homeDirLock.Unlock() + + if homeDir != "" { + return homeDir + } + + homeDir = os.Getenv("HOME") + if homeDir != "" { + return homeDir + } + + homeDir = lookupHomeDir() + return homeDir +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/homedir_dynamic.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/homedir_dynamic.go new file mode 100644 index 00000000000..2732081e73b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/homedir_dynamic.go @@ -0,0 +1,15 @@ +// +build !static_build + +package dbus + +import ( + "os/user" +) + +func lookupHomeDir() string { + u, err := user.Current() + if err != nil { + return "/" + } + return u.HomeDir +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/homedir_static.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/homedir_static.go new file mode 100644 index 00000000000..b9d9cb5525a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/homedir_static.go @@ -0,0 +1,45 @@ +// +build static_build + +package dbus + +import ( + "bufio" + "os" + "strconv" + "strings" +) + +func lookupHomeDir() string { + myUid := os.Getuid() + + f, err := os.Open("/etc/passwd") + if err != nil { + return "/" + } + defer f.Close() + + s := bufio.NewScanner(f) + + for s.Scan() { + if err := s.Err(); err != nil { + break + } + + line := strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + parts := strings.Split(line, ":") + + if len(parts) >= 6 { + uid, err := strconv.Atoi(parts[2]) + if err == nil && uid == myUid { + return parts[5] + } + } + } + + // Default to / if we can't get a better value + return "/" +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/introspect/call.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/introspect/call.go new file mode 100644 index 00000000000..4aca2ea63e6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/introspect/call.go @@ -0,0 +1,27 @@ +package introspect + +import ( + "encoding/xml" + "github.com/godbus/dbus" + "strings" +) + +// Call calls org.freedesktop.Introspectable.Introspect on a remote object +// and returns the introspection data. +func Call(o *dbus.Object) (*Node, error) { + var xmldata string + var node Node + + err := o.Call("org.freedesktop.DBus.Introspectable.Introspect", 0).Store(&xmldata) + if err != nil { + return nil, err + } + err = xml.NewDecoder(strings.NewReader(xmldata)).Decode(&node) + if err != nil { + return nil, err + } + if node.Name == "" { + node.Name = string(o.Path()) + } + return &node, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/introspect/introspect.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/introspect/introspect.go new file mode 100644 index 00000000000..dafcdb8b7ae --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/introspect/introspect.go @@ -0,0 +1,80 @@ +// Package introspect provides some utilities for dealing with the DBus +// introspection format. +package introspect + +import "encoding/xml" + +// The introspection data for the org.freedesktop.DBus.Introspectable interface. +var IntrospectData = Interface{ + Name: "org.freedesktop.DBus.Introspectable", + Methods: []Method{ + { + Name: "Introspect", + Args: []Arg{ + {"out", "s", "out"}, + }, + }, + }, +} + +// The introspection data for the org.freedesktop.DBus.Introspectable interface, +// as a string. +const IntrospectDataString = ` + + + + + +` + +// Node is the root element of an introspection. +type Node struct { + XMLName xml.Name `xml:"node"` + Name string `xml:"name,attr,omitempty"` + Interfaces []Interface `xml:"interface"` + Children []Node `xml:"node,omitempty"` +} + +// Interface describes a DBus interface that is available on the message bus. +type Interface struct { + Name string `xml:"name,attr"` + Methods []Method `xml:"method"` + Signals []Signal `xml:"signal"` + Properties []Property `xml:"property"` + Annotations []Annotation `xml:"annotation"` +} + +// Method describes a Method on an Interface as retured by an introspection. +type Method struct { + Name string `xml:"name,attr"` + Args []Arg `xml:"arg"` + Annotations []Annotation `xml:"annotation"` +} + +// Signal describes a Signal emitted on an Interface. +type Signal struct { + Name string `xml:"name,attr"` + Args []Arg `xml:"arg"` + Annotations []Annotation `xml:"annotation"` +} + +// Property describes a property of an Interface. +type Property struct { + Name string `xml:"name,attr"` + Type string `xml:"type,attr"` + Access string `xml:"access,attr"` + Annotations []Annotation `xml:"annotation"` +} + +// Arg represents an argument of a method or a signal. +type Arg struct { + Name string `xml:"name,attr,omitempty"` + Type string `xml:"type,attr"` + Direction string `xml:"direction,attr,omitempty"` +} + +// Annotation is an annotation in the introspection format. +type Annotation struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/introspect/introspectable.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/introspect/introspectable.go new file mode 100644 index 00000000000..a2a965a3431 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/introspect/introspectable.go @@ -0,0 +1,74 @@ +package introspect + +import ( + "encoding/xml" + "github.com/godbus/dbus" + "reflect" +) + +// Introspectable implements org.freedesktop.Introspectable. +// +// You can create it by converting the XML-formatted introspection data from a +// string to an Introspectable or call NewIntrospectable with a Node. Then, +// export it as org.freedesktop.Introspectable on you object. +type Introspectable string + +// NewIntrospectable returns an Introspectable that returns the introspection +// data that corresponds to the given Node. If n.Interfaces doesn't contain the +// data for org.freedesktop.DBus.Introspectable, it is added automatically. +func NewIntrospectable(n *Node) Introspectable { + found := false + for _, v := range n.Interfaces { + if v.Name == "org.freedesktop.DBus.Introspectable" { + found = true + break + } + } + if !found { + n.Interfaces = append(n.Interfaces, IntrospectData) + } + b, err := xml.Marshal(n) + if err != nil { + panic(err) + } + return Introspectable(b) +} + +// Introspect implements org.freedesktop.Introspectable.Introspect. +func (i Introspectable) Introspect() (string, *dbus.Error) { + return string(i), nil +} + +// Methods returns the description of the methods of v. This can be used to +// create a Node which can be passed to NewIntrospectable. +func Methods(v interface{}) []Method { + t := reflect.TypeOf(v) + ms := make([]Method, 0, t.NumMethod()) + for i := 0; i < t.NumMethod(); i++ { + if t.Method(i).PkgPath != "" { + continue + } + mt := t.Method(i).Type + if mt.NumOut() == 0 || + mt.Out(mt.NumOut()-1) != reflect.TypeOf(&dbus.Error{"", nil}) { + + continue + } + var m Method + m.Name = t.Method(i).Name + m.Args = make([]Arg, 0, mt.NumIn()+mt.NumOut()-2) + for j := 1; j < mt.NumIn(); j++ { + if mt.In(j) != reflect.TypeOf((*dbus.Sender)(nil)).Elem() { + arg := Arg{"", dbus.SignatureOfType(mt.In(j)).String(), "in"} + m.Args = append(m.Args, arg) + } + } + for j := 0; j < mt.NumOut()-1; j++ { + arg := Arg{"", dbus.SignatureOfType(mt.Out(j)).String(), "out"} + m.Args = append(m.Args, arg) + } + m.Annotations = make([]Annotation, 0) + ms = append(ms, m) + } + return ms +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/message.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/message.go new file mode 100644 index 00000000000..075d6e38bae --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/message.go @@ -0,0 +1,346 @@ +package dbus + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "reflect" + "strconv" +) + +const protoVersion byte = 1 + +// Flags represents the possible flags of a D-Bus message. +type Flags byte + +const ( + // FlagNoReplyExpected signals that the message is not expected to generate + // a reply. If this flag is set on outgoing messages, any possible reply + // will be discarded. + FlagNoReplyExpected Flags = 1 << iota + // FlagNoAutoStart signals that the message bus should not automatically + // start an application when handling this message. + FlagNoAutoStart +) + +// Type represents the possible types of a D-Bus message. +type Type byte + +const ( + TypeMethodCall Type = 1 + iota + TypeMethodReply + TypeError + TypeSignal + typeMax +) + +func (t Type) String() string { + switch t { + case TypeMethodCall: + return "method call" + case TypeMethodReply: + return "reply" + case TypeError: + return "error" + case TypeSignal: + return "signal" + } + return "invalid" +} + +// HeaderField represents the possible byte codes for the headers +// of a D-Bus message. +type HeaderField byte + +const ( + FieldPath HeaderField = 1 + iota + FieldInterface + FieldMember + FieldErrorName + FieldReplySerial + FieldDestination + FieldSender + FieldSignature + FieldUnixFDs + fieldMax +) + +// An InvalidMessageError describes the reason why a D-Bus message is regarded as +// invalid. +type InvalidMessageError string + +func (e InvalidMessageError) Error() string { + return "dbus: invalid message: " + string(e) +} + +// fieldType are the types of the various header fields. +var fieldTypes = [fieldMax]reflect.Type{ + FieldPath: objectPathType, + FieldInterface: stringType, + FieldMember: stringType, + FieldErrorName: stringType, + FieldReplySerial: uint32Type, + FieldDestination: stringType, + FieldSender: stringType, + FieldSignature: signatureType, + FieldUnixFDs: uint32Type, +} + +// requiredFields lists the header fields that are required by the different +// message types. +var requiredFields = [typeMax][]HeaderField{ + TypeMethodCall: {FieldPath, FieldMember}, + TypeMethodReply: {FieldReplySerial}, + TypeError: {FieldErrorName, FieldReplySerial}, + TypeSignal: {FieldPath, FieldInterface, FieldMember}, +} + +// Message represents a single D-Bus message. +type Message struct { + Type + Flags + Headers map[HeaderField]Variant + Body []interface{} + + serial uint32 +} + +type header struct { + Field byte + Variant +} + +// DecodeMessage tries to decode a single message in the D-Bus wire format +// from the given reader. The byte order is figured out from the first byte. +// The possibly returned error can be an error of the underlying reader, an +// InvalidMessageError or a FormatError. +func DecodeMessage(rd io.Reader) (msg *Message, err error) { + var order binary.ByteOrder + var hlength, length uint32 + var typ, flags, proto byte + var headers []header + + b := make([]byte, 1) + _, err = rd.Read(b) + if err != nil { + return + } + switch b[0] { + case 'l': + order = binary.LittleEndian + case 'B': + order = binary.BigEndian + default: + return nil, InvalidMessageError("invalid byte order") + } + + dec := newDecoder(rd, order) + dec.pos = 1 + + msg = new(Message) + vs, err := dec.Decode(Signature{"yyyuu"}) + if err != nil { + return nil, err + } + if err = Store(vs, &typ, &flags, &proto, &length, &msg.serial); err != nil { + return nil, err + } + msg.Type = Type(typ) + msg.Flags = Flags(flags) + + // get the header length separately because we need it later + b = make([]byte, 4) + _, err = io.ReadFull(rd, b) + if err != nil { + return nil, err + } + binary.Read(bytes.NewBuffer(b), order, &hlength) + if hlength+length+16 > 1<<27 { + return nil, InvalidMessageError("message is too long") + } + dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order) + dec.pos = 12 + vs, err = dec.Decode(Signature{"a(yv)"}) + if err != nil { + return nil, err + } + if err = Store(vs, &headers); err != nil { + return nil, err + } + + msg.Headers = make(map[HeaderField]Variant) + for _, v := range headers { + msg.Headers[HeaderField(v.Field)] = v.Variant + } + + dec.align(8) + body := make([]byte, int(length)) + if length != 0 { + _, err := io.ReadFull(rd, body) + if err != nil { + return nil, err + } + } + + if err = msg.IsValid(); err != nil { + return nil, err + } + sig, _ := msg.Headers[FieldSignature].value.(Signature) + if sig.str != "" { + buf := bytes.NewBuffer(body) + dec = newDecoder(buf, order) + vs, err := dec.Decode(sig) + if err != nil { + return nil, err + } + msg.Body = vs + } + + return +} + +// EncodeTo encodes and sends a message to the given writer. The byte order must +// be either binary.LittleEndian or binary.BigEndian. If the message is not +// valid or an error occurs when writing, an error is returned. +func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error { + if err := msg.IsValid(); err != nil { + return err + } + var vs [7]interface{} + switch order { + case binary.LittleEndian: + vs[0] = byte('l') + case binary.BigEndian: + vs[0] = byte('B') + default: + return errors.New("dbus: invalid byte order") + } + body := new(bytes.Buffer) + enc := newEncoder(body, order) + if len(msg.Body) != 0 { + enc.Encode(msg.Body...) + } + vs[1] = msg.Type + vs[2] = msg.Flags + vs[3] = protoVersion + vs[4] = uint32(len(body.Bytes())) + vs[5] = msg.serial + headers := make([]header, 0, len(msg.Headers)) + for k, v := range msg.Headers { + headers = append(headers, header{byte(k), v}) + } + vs[6] = headers + var buf bytes.Buffer + enc = newEncoder(&buf, order) + enc.Encode(vs[:]...) + enc.align(8) + body.WriteTo(&buf) + if buf.Len() > 1<<27 { + return InvalidMessageError("message is too long") + } + if _, err := buf.WriteTo(out); err != nil { + return err + } + return nil +} + +// IsValid checks whether msg is a valid message and returns an +// InvalidMessageError if it is not. +func (msg *Message) IsValid() error { + if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected) != 0 { + return InvalidMessageError("invalid flags") + } + if msg.Type == 0 || msg.Type >= typeMax { + return InvalidMessageError("invalid message type") + } + for k, v := range msg.Headers { + if k == 0 || k >= fieldMax { + return InvalidMessageError("invalid header") + } + if reflect.TypeOf(v.value) != fieldTypes[k] { + return InvalidMessageError("invalid type of header field") + } + } + for _, v := range requiredFields[msg.Type] { + if _, ok := msg.Headers[v]; !ok { + return InvalidMessageError("missing required header") + } + } + if path, ok := msg.Headers[FieldPath]; ok { + if !path.value.(ObjectPath).IsValid() { + return InvalidMessageError("invalid path name") + } + } + if iface, ok := msg.Headers[FieldInterface]; ok { + if !isValidInterface(iface.value.(string)) { + return InvalidMessageError("invalid interface name") + } + } + if member, ok := msg.Headers[FieldMember]; ok { + if !isValidMember(member.value.(string)) { + return InvalidMessageError("invalid member name") + } + } + if errname, ok := msg.Headers[FieldErrorName]; ok { + if !isValidInterface(errname.value.(string)) { + return InvalidMessageError("invalid error name") + } + } + if len(msg.Body) != 0 { + if _, ok := msg.Headers[FieldSignature]; !ok { + return InvalidMessageError("missing signature") + } + } + return nil +} + +// Serial returns the message's serial number. The returned value is only valid +// for messages received by eavesdropping. +func (msg *Message) Serial() uint32 { + return msg.serial +} + +// String returns a string representation of a message similar to the format of +// dbus-monitor. +func (msg *Message) String() string { + if err := msg.IsValid(); err != nil { + return "" + } + s := msg.Type.String() + if v, ok := msg.Headers[FieldSender]; ok { + s += " from " + v.value.(string) + } + if v, ok := msg.Headers[FieldDestination]; ok { + s += " to " + v.value.(string) + } + s += " serial " + strconv.FormatUint(uint64(msg.serial), 10) + if v, ok := msg.Headers[FieldReplySerial]; ok { + s += " reply_serial " + strconv.FormatUint(uint64(v.value.(uint32)), 10) + } + if v, ok := msg.Headers[FieldUnixFDs]; ok { + s += " unixfds " + strconv.FormatUint(uint64(v.value.(uint32)), 10) + } + if v, ok := msg.Headers[FieldPath]; ok { + s += " path " + string(v.value.(ObjectPath)) + } + if v, ok := msg.Headers[FieldInterface]; ok { + s += " interface " + v.value.(string) + } + if v, ok := msg.Headers[FieldErrorName]; ok { + s += " error " + v.value.(string) + } + if v, ok := msg.Headers[FieldMember]; ok { + s += " member " + v.value.(string) + } + if len(msg.Body) != 0 { + s += "\n" + } + for i, v := range msg.Body { + s += " " + MakeVariant(v).String() + if i != len(msg.Body)-1 { + s += "\n" + } + } + return s +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/prop/prop.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/prop/prop.go new file mode 100644 index 00000000000..ed5bdf2243c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/prop/prop.go @@ -0,0 +1,264 @@ +// Package prop provides the Properties struct which can be used to implement +// org.freedesktop.DBus.Properties. +package prop + +import ( + "github.com/godbus/dbus" + "github.com/godbus/dbus/introspect" + "sync" +) + +// EmitType controls how org.freedesktop.DBus.Properties.PropertiesChanged is +// emitted for a property. If it is EmitTrue, the signal is emitted. If it is +// EmitInvalidates, the signal is also emitted, but the new value of the property +// is not disclosed. +type EmitType byte + +const ( + EmitFalse EmitType = iota + EmitTrue + EmitInvalidates +) + +// ErrIfaceNotFound is the error returned to peers who try to access properties +// on interfaces that aren't found. +var ErrIfaceNotFound = &dbus.Error{"org.freedesktop.DBus.Properties.Error.InterfaceNotFound", nil} + +// ErrPropNotFound is the error returned to peers trying to access properties +// that aren't found. +var ErrPropNotFound = &dbus.Error{"org.freedesktop.DBus.Properties.Error.PropertyNotFound", nil} + +// ErrReadOnly is the error returned to peers trying to set a read-only +// property. +var ErrReadOnly = &dbus.Error{"org.freedesktop.DBus.Properties.Error.ReadOnly", nil} + +// ErrInvalidArg is returned to peers if the type of the property that is being +// changed and the argument don't match. +var ErrInvalidArg = &dbus.Error{"org.freedesktop.DBus.Properties.Error.InvalidArg", nil} + +// The introspection data for the org.freedesktop.DBus.Properties interface. +var IntrospectData = introspect.Interface{ + Name: "org.freedesktop.DBus.Properties", + Methods: []introspect.Method{ + { + Name: "Get", + Args: []introspect.Arg{ + {"interface", "s", "in"}, + {"property", "s", "in"}, + {"value", "v", "out"}, + }, + }, + { + Name: "GetAll", + Args: []introspect.Arg{ + {"interface", "s", "in"}, + {"props", "a{sv}", "out"}, + }, + }, + { + Name: "Set", + Args: []introspect.Arg{ + {"interface", "s", "in"}, + {"property", "s", "in"}, + {"value", "v", "in"}, + }, + }, + }, + Signals: []introspect.Signal{ + { + Name: "PropertiesChanged", + Args: []introspect.Arg{ + {"interface", "s", "out"}, + {"changed_properties", "a{sv}", "out"}, + {"invalidates_properties", "as", "out"}, + }, + }, + }, +} + +// The introspection data for the org.freedesktop.DBus.Properties interface, as +// a string. +const IntrospectDataString = ` + + + + + + + + + + + + + + + + + + + + + +` + +// Prop represents a single property. It is used for creating a Properties +// value. +type Prop struct { + // Initial value. Must be a DBus-representable type. + Value interface{} + + // If true, the value can be modified by calls to Set. + Writable bool + + // Controls how org.freedesktop.DBus.Properties.PropertiesChanged is + // emitted if this property changes. + Emit EmitType + + // If not nil, anytime this property is changed by Set, this function is + // called with an appropiate Change as its argument. If the returned error + // is not nil, it is sent back to the caller of Set and the property is not + // changed. + Callback func(*Change) *dbus.Error +} + +// Change represents a change of a property by a call to Set. +type Change struct { + Props *Properties + Iface string + Name string + Value interface{} +} + +// Properties is a set of values that can be made available to the message bus +// using the org.freedesktop.DBus.Properties interface. It is safe for +// concurrent use by multiple goroutines. +type Properties struct { + m map[string]map[string]*Prop + mut sync.RWMutex + conn *dbus.Conn + path dbus.ObjectPath +} + +// New returns a new Properties structure that manages the given properties. +// The key for the first-level map of props is the name of the interface; the +// second-level key is the name of the property. The returned structure will be +// exported as org.freedesktop.DBus.Properties on path. +func New(conn *dbus.Conn, path dbus.ObjectPath, props map[string]map[string]*Prop) *Properties { + p := &Properties{m: props, conn: conn, path: path} + conn.Export(p, path, "org.freedesktop.DBus.Properties") + return p +} + +// Get implements org.freedesktop.DBus.Properties.Get. +func (p *Properties) Get(iface, property string) (dbus.Variant, *dbus.Error) { + p.mut.RLock() + defer p.mut.RUnlock() + m, ok := p.m[iface] + if !ok { + return dbus.Variant{}, ErrIfaceNotFound + } + prop, ok := m[property] + if !ok { + return dbus.Variant{}, ErrPropNotFound + } + return dbus.MakeVariant(prop.Value), nil +} + +// GetAll implements org.freedesktop.DBus.Properties.GetAll. +func (p *Properties) GetAll(iface string) (map[string]dbus.Variant, *dbus.Error) { + p.mut.RLock() + defer p.mut.RUnlock() + m, ok := p.m[iface] + if !ok { + return nil, ErrIfaceNotFound + } + rm := make(map[string]dbus.Variant, len(m)) + for k, v := range m { + rm[k] = dbus.MakeVariant(v.Value) + } + return rm, nil +} + +// GetMust returns the value of the given property and panics if either the +// interface or the property name are invalid. +func (p *Properties) GetMust(iface, property string) interface{} { + p.mut.RLock() + defer p.mut.RUnlock() + return p.m[iface][property].Value +} + +// Introspection returns the introspection data that represents the properties +// of iface. +func (p *Properties) Introspection(iface string) []introspect.Property { + p.mut.RLock() + defer p.mut.RUnlock() + m := p.m[iface] + s := make([]introspect.Property, 0, len(m)) + for k, v := range m { + p := introspect.Property{Name: k, Type: dbus.SignatureOf(v.Value).String()} + if v.Writable { + p.Access = "readwrite" + } else { + p.Access = "read" + } + s = append(s, p) + } + return s +} + +// set sets the given property and emits PropertyChanged if appropiate. p.mut +// must already be locked. +func (p *Properties) set(iface, property string, v interface{}) { + prop := p.m[iface][property] + prop.Value = v + switch prop.Emit { + case EmitFalse: + // do nothing + case EmitInvalidates: + p.conn.Emit(p.path, "org.freedesktop.DBus.Properties.PropertiesChanged", + iface, map[string]dbus.Variant{}, []string{property}) + case EmitTrue: + p.conn.Emit(p.path, "org.freedesktop.DBus.Properties.PropertiesChanged", + iface, map[string]dbus.Variant{property: dbus.MakeVariant(v)}, + []string{}) + default: + panic("invalid value for EmitType") + } +} + +// Set implements org.freedesktop.Properties.Set. +func (p *Properties) Set(iface, property string, newv dbus.Variant) *dbus.Error { + p.mut.Lock() + defer p.mut.Unlock() + m, ok := p.m[iface] + if !ok { + return ErrIfaceNotFound + } + prop, ok := m[property] + if !ok { + return ErrPropNotFound + } + if !prop.Writable { + return ErrReadOnly + } + if newv.Signature() != dbus.SignatureOf(prop.Value) { + return ErrInvalidArg + } + if prop.Callback != nil { + err := prop.Callback(&Change{p, iface, property, newv.Value()}) + if err != nil { + return err + } + } + p.set(iface, property, newv.Value()) + return nil +} + +// SetMust sets the value of the given property and panics if the interface or +// the property name are invalid. +func (p *Properties) SetMust(iface, property string, v interface{}) { + p.mut.Lock() + p.set(iface, property, v) + p.mut.Unlock() +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/proto_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/proto_test.go new file mode 100644 index 00000000000..608a770d41f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/proto_test.go @@ -0,0 +1,369 @@ +package dbus + +import ( + "bytes" + "encoding/binary" + "io/ioutil" + "math" + "reflect" + "testing" +) + +var protoTests = []struct { + vs []interface{} + bigEndian []byte + littleEndian []byte +}{ + { + []interface{}{int32(0)}, + []byte{0, 0, 0, 0}, + []byte{0, 0, 0, 0}, + }, + { + []interface{}{true, false}, + []byte{0, 0, 0, 1, 0, 0, 0, 0}, + []byte{1, 0, 0, 0, 0, 0, 0, 0}, + }, + { + []interface{}{byte(0), uint16(12), int16(32), uint32(43)}, + []byte{0, 0, 0, 12, 0, 32, 0, 0, 0, 0, 0, 43}, + []byte{0, 0, 12, 0, 32, 0, 0, 0, 43, 0, 0, 0}, + }, + { + []interface{}{int64(-1), uint64(1<<64 - 1)}, + bytes.Repeat([]byte{255}, 16), + bytes.Repeat([]byte{255}, 16), + }, + { + []interface{}{math.Inf(+1)}, + []byte{0x7f, 0xf0, 0, 0, 0, 0, 0, 0}, + []byte{0, 0, 0, 0, 0, 0, 0xf0, 0x7f}, + }, + { + []interface{}{"foo"}, + []byte{0, 0, 0, 3, 'f', 'o', 'o', 0}, + []byte{3, 0, 0, 0, 'f', 'o', 'o', 0}, + }, + { + []interface{}{Signature{"ai"}}, + []byte{2, 'a', 'i', 0}, + []byte{2, 'a', 'i', 0}, + }, + { + []interface{}{[]int16{42, 256}}, + []byte{0, 0, 0, 4, 0, 42, 1, 0}, + []byte{4, 0, 0, 0, 42, 0, 0, 1}, + }, + { + []interface{}{MakeVariant("foo")}, + []byte{1, 's', 0, 0, 0, 0, 0, 3, 'f', 'o', 'o', 0}, + []byte{1, 's', 0, 0, 3, 0, 0, 0, 'f', 'o', 'o', 0}, + }, + { + []interface{}{MakeVariant(MakeVariant(Signature{"v"}))}, + []byte{1, 'v', 0, 1, 'g', 0, 1, 'v', 0}, + []byte{1, 'v', 0, 1, 'g', 0, 1, 'v', 0}, + }, + { + []interface{}{map[int32]bool{42: true}}, + []byte{0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 1}, + []byte{8, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 1, 0, 0, 0}, + }, + { + []interface{}{map[string]Variant{}, byte(42)}, + []byte{0, 0, 0, 0, 0, 0, 0, 0, 42}, + []byte{0, 0, 0, 0, 0, 0, 0, 0, 42}, + }, + { + []interface{}{[]uint64{}, byte(42)}, + []byte{0, 0, 0, 0, 0, 0, 0, 0, 42}, + []byte{0, 0, 0, 0, 0, 0, 0, 0, 42}, + }, +} + +func TestProto(t *testing.T) { + for i, v := range protoTests { + buf := new(bytes.Buffer) + bigEnc := newEncoder(buf, binary.BigEndian) + bigEnc.Encode(v.vs...) + marshalled := buf.Bytes() + if bytes.Compare(marshalled, v.bigEndian) != 0 { + t.Errorf("test %d (marshal be): got '%v', but expected '%v'\n", i+1, marshalled, + v.bigEndian) + } + buf.Reset() + litEnc := newEncoder(buf, binary.LittleEndian) + litEnc.Encode(v.vs...) + marshalled = buf.Bytes() + if bytes.Compare(marshalled, v.littleEndian) != 0 { + t.Errorf("test %d (marshal le): got '%v', but expected '%v'\n", i+1, marshalled, + v.littleEndian) + } + unmarshalled := reflect.MakeSlice(reflect.TypeOf(v.vs), + 0, 0) + for i := range v.vs { + unmarshalled = reflect.Append(unmarshalled, + reflect.New(reflect.TypeOf(v.vs[i]))) + } + bigDec := newDecoder(bytes.NewReader(v.bigEndian), binary.BigEndian) + vs, err := bigDec.Decode(SignatureOf(v.vs...)) + if err != nil { + t.Errorf("test %d (unmarshal be): %s\n", i+1, err) + continue + } + if !reflect.DeepEqual(vs, v.vs) { + t.Errorf("test %d (unmarshal be): got %#v, but expected %#v\n", i+1, vs, v.vs) + } + litDec := newDecoder(bytes.NewReader(v.littleEndian), binary.LittleEndian) + vs, err = litDec.Decode(SignatureOf(v.vs...)) + if err != nil { + t.Errorf("test %d (unmarshal le): %s\n", i+1, err) + continue + } + if !reflect.DeepEqual(vs, v.vs) { + t.Errorf("test %d (unmarshal le): got %#v, but expected %#v\n", i+1, vs, v.vs) + } + + } +} + +func TestProtoMap(t *testing.T) { + m := map[string]uint8{ + "foo": 23, + "bar": 2, + } + var n map[string]uint8 + buf := new(bytes.Buffer) + enc := newEncoder(buf, binary.LittleEndian) + enc.Encode(m) + dec := newDecoder(buf, binary.LittleEndian) + vs, err := dec.Decode(Signature{"a{sy}"}) + if err != nil { + t.Fatal(err) + } + if err = Store(vs, &n); err != nil { + t.Fatal(err) + } + if len(n) != 2 || n["foo"] != 23 || n["bar"] != 2 { + t.Error("got", n) + } +} + +func TestProtoVariantStruct(t *testing.T) { + var variant Variant + v := MakeVariant(struct { + A int32 + B int16 + }{1, 2}) + buf := new(bytes.Buffer) + enc := newEncoder(buf, binary.LittleEndian) + enc.Encode(v) + dec := newDecoder(buf, binary.LittleEndian) + vs, err := dec.Decode(Signature{"v"}) + if err != nil { + t.Fatal(err) + } + if err = Store(vs, &variant); err != nil { + t.Fatal(err) + } + sl := variant.Value().([]interface{}) + v1, v2 := sl[0].(int32), sl[1].(int16) + if v1 != int32(1) { + t.Error("got", v1, "as first int") + } + if v2 != int16(2) { + t.Error("got", v2, "as second int") + } +} + +func TestProtoStructTag(t *testing.T) { + type Bar struct { + A int32 + B chan interface{} `dbus:"-"` + C int32 + } + var bar1, bar2 Bar + bar1.A = 234 + bar2.C = 345 + buf := new(bytes.Buffer) + enc := newEncoder(buf, binary.LittleEndian) + enc.Encode(bar1) + dec := newDecoder(buf, binary.LittleEndian) + vs, err := dec.Decode(Signature{"(ii)"}) + if err != nil { + t.Fatal(err) + } + if err = Store(vs, &bar2); err != nil { + t.Fatal(err) + } + if bar1 != bar2 { + t.Error("struct tag test: got", bar2) + } +} + +func TestProtoStoreStruct(t *testing.T) { + var foo struct { + A int32 + B string + c chan interface{} + D interface{} `dbus:"-"` + } + src := []interface{}{[]interface{}{int32(42), "foo"}} + err := Store(src, &foo) + if err != nil { + t.Fatal(err) + } +} + +func TestProtoStoreNestedStruct(t *testing.T) { + var foo struct { + A int32 + B struct { + C string + D float64 + } + } + src := []interface{}{ + []interface{}{ + int32(42), + []interface{}{ + "foo", + 3.14, + }, + }, + } + err := Store(src, &foo) + if err != nil { + t.Fatal(err) + } +} + +func TestMessage(t *testing.T) { + buf := new(bytes.Buffer) + message := new(Message) + message.Type = TypeMethodCall + message.serial = 32 + message.Headers = map[HeaderField]Variant{ + FieldPath: MakeVariant(ObjectPath("/org/foo/bar")), + FieldMember: MakeVariant("baz"), + } + message.Body = make([]interface{}, 0) + err := message.EncodeTo(buf, binary.LittleEndian) + if err != nil { + t.Error(err) + } + _, err = DecodeMessage(buf) + if err != nil { + t.Error(err) + } +} + +func TestProtoStructInterfaces(t *testing.T) { + b := []byte{42} + vs, err := newDecoder(bytes.NewReader(b), binary.LittleEndian).Decode(Signature{"(y)"}) + if err != nil { + t.Fatal(err) + } + if vs[0].([]interface{})[0].(byte) != 42 { + t.Errorf("wrongs results (got %v)", vs) + } +} + +// ordinary org.freedesktop.DBus.Hello call +var smallMessage = &Message{ + Type: TypeMethodCall, + serial: 1, + Headers: map[HeaderField]Variant{ + FieldDestination: MakeVariant("org.freedesktop.DBus"), + FieldPath: MakeVariant(ObjectPath("/org/freedesktop/DBus")), + FieldInterface: MakeVariant("org.freedesktop.DBus"), + FieldMember: MakeVariant("Hello"), + }, +} + +// org.freedesktop.Notifications.Notify +var bigMessage = &Message{ + Type: TypeMethodCall, + serial: 2, + Headers: map[HeaderField]Variant{ + FieldDestination: MakeVariant("org.freedesktop.Notifications"), + FieldPath: MakeVariant(ObjectPath("/org/freedesktop/Notifications")), + FieldInterface: MakeVariant("org.freedesktop.Notifications"), + FieldMember: MakeVariant("Notify"), + FieldSignature: MakeVariant(Signature{"susssasa{sv}i"}), + }, + Body: []interface{}{ + "app_name", + uint32(0), + "dialog-information", + "Notification", + "This is the body of a notification", + []string{"ok", "Ok"}, + map[string]Variant{ + "sound-name": MakeVariant("dialog-information"), + }, + int32(-1), + }, +} + +func BenchmarkDecodeMessageSmall(b *testing.B) { + var err error + var rd *bytes.Reader + + b.StopTimer() + buf := new(bytes.Buffer) + err = smallMessage.EncodeTo(buf, binary.LittleEndian) + if err != nil { + b.Fatal(err) + } + decoded := buf.Bytes() + b.StartTimer() + for i := 0; i < b.N; i++ { + rd = bytes.NewReader(decoded) + _, err = DecodeMessage(rd) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecodeMessageBig(b *testing.B) { + var err error + var rd *bytes.Reader + + b.StopTimer() + buf := new(bytes.Buffer) + err = bigMessage.EncodeTo(buf, binary.LittleEndian) + if err != nil { + b.Fatal(err) + } + decoded := buf.Bytes() + b.StartTimer() + for i := 0; i < b.N; i++ { + rd = bytes.NewReader(decoded) + _, err = DecodeMessage(rd) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeMessageSmall(b *testing.B) { + var err error + for i := 0; i < b.N; i++ { + err = smallMessage.EncodeTo(ioutil.Discard, binary.LittleEndian) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeMessageBig(b *testing.B) { + var err error + for i := 0; i < b.N; i++ { + err = bigMessage.EncodeTo(ioutil.Discard, binary.LittleEndian) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/sig.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/sig.go new file mode 100644 index 00000000000..f45b53ce1b2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/sig.go @@ -0,0 +1,257 @@ +package dbus + +import ( + "fmt" + "reflect" + "strings" +) + +var sigToType = map[byte]reflect.Type{ + 'y': byteType, + 'b': boolType, + 'n': int16Type, + 'q': uint16Type, + 'i': int32Type, + 'u': uint32Type, + 'x': int64Type, + 't': uint64Type, + 'd': float64Type, + 's': stringType, + 'g': signatureType, + 'o': objectPathType, + 'v': variantType, + 'h': unixFDIndexType, +} + +// Signature represents a correct type signature as specified by the D-Bus +// specification. The zero value represents the empty signature, "". +type Signature struct { + str string +} + +// SignatureOf returns the concatenation of all the signatures of the given +// values. It panics if one of them is not representable in D-Bus. +func SignatureOf(vs ...interface{}) Signature { + var s string + for _, v := range vs { + s += getSignature(reflect.TypeOf(v)) + } + return Signature{s} +} + +// SignatureOfType returns the signature of the given type. It panics if the +// type is not representable in D-Bus. +func SignatureOfType(t reflect.Type) Signature { + return Signature{getSignature(t)} +} + +// getSignature returns the signature of the given type and panics on unknown types. +func getSignature(t reflect.Type) string { + // handle simple types first + switch t.Kind() { + case reflect.Uint8: + return "y" + case reflect.Bool: + return "b" + case reflect.Int16: + return "n" + case reflect.Uint16: + return "q" + case reflect.Int32: + if t == unixFDType { + return "h" + } + return "i" + case reflect.Uint32: + if t == unixFDIndexType { + return "h" + } + return "u" + case reflect.Int64: + return "x" + case reflect.Uint64: + return "t" + case reflect.Float64: + return "d" + case reflect.Ptr: + return getSignature(t.Elem()) + case reflect.String: + if t == objectPathType { + return "o" + } + return "s" + case reflect.Struct: + if t == variantType { + return "v" + } else if t == signatureType { + return "g" + } + var s string + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { + s += getSignature(t.Field(i).Type) + } + } + return "(" + s + ")" + case reflect.Array, reflect.Slice: + return "a" + getSignature(t.Elem()) + case reflect.Map: + if !isKeyType(t.Key()) { + panic(InvalidTypeError{t}) + } + return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}" + } + panic(InvalidTypeError{t}) +} + +// ParseSignature returns the signature represented by this string, or a +// SignatureError if the string is not a valid signature. +func ParseSignature(s string) (sig Signature, err error) { + if len(s) == 0 { + return + } + if len(s) > 255 { + return Signature{""}, SignatureError{s, "too long"} + } + sig.str = s + for err == nil && len(s) != 0 { + err, s = validSingle(s, 0) + } + if err != nil { + sig = Signature{""} + } + + return +} + +// ParseSignatureMust behaves like ParseSignature, except that it panics if s +// is not valid. +func ParseSignatureMust(s string) Signature { + sig, err := ParseSignature(s) + if err != nil { + panic(err) + } + return sig +} + +// Empty retruns whether the signature is the empty signature. +func (s Signature) Empty() bool { + return s.str == "" +} + +// Single returns whether the signature represents a single, complete type. +func (s Signature) Single() bool { + err, r := validSingle(s.str, 0) + return err != nil && r == "" +} + +// String returns the signature's string representation. +func (s Signature) String() string { + return s.str +} + +// A SignatureError indicates that a signature passed to a function or received +// on a connection is not a valid signature. +type SignatureError struct { + Sig string + Reason string +} + +func (e SignatureError) Error() string { + return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason) +} + +// Try to read a single type from this string. If it was successfull, err is nil +// and rem is the remaining unparsed part. Otherwise, err is a non-nil +// SignatureError and rem is "". depth is the current recursion depth which may +// not be greater than 64 and should be given as 0 on the first call. +func validSingle(s string, depth int) (err error, rem string) { + if s == "" { + return SignatureError{Sig: s, Reason: "empty signature"}, "" + } + if depth > 64 { + return SignatureError{Sig: s, Reason: "container nesting too deep"}, "" + } + switch s[0] { + case 'y', 'b', 'n', 'q', 'i', 'u', 'x', 't', 'd', 's', 'g', 'o', 'v', 'h': + return nil, s[1:] + case 'a': + if len(s) > 1 && s[1] == '{' { + i := findMatching(s[1:], '{', '}') + if i == -1 { + return SignatureError{Sig: s, Reason: "unmatched '{'"}, "" + } + i++ + rem = s[i+1:] + s = s[2:i] + if err, _ = validSingle(s[:1], depth+1); err != nil { + return err, "" + } + err, nr := validSingle(s[1:], depth+1) + if err != nil { + return err, "" + } + if nr != "" { + return SignatureError{Sig: s, Reason: "too many types in dict"}, "" + } + return nil, rem + } + return validSingle(s[1:], depth+1) + case '(': + i := findMatching(s, '(', ')') + if i == -1 { + return SignatureError{Sig: s, Reason: "unmatched ')'"}, "" + } + rem = s[i+1:] + s = s[1:i] + for err == nil && s != "" { + err, s = validSingle(s, depth+1) + } + if err != nil { + rem = "" + } + return + } + return SignatureError{Sig: s, Reason: "invalid type character"}, "" +} + +func findMatching(s string, left, right rune) int { + n := 0 + for i, v := range s { + if v == left { + n++ + } else if v == right { + n-- + } + if n == 0 { + return i + } + } + return -1 +} + +// typeFor returns the type of the given signature. It ignores any left over +// characters and panics if s doesn't start with a valid type signature. +func typeFor(s string) (t reflect.Type) { + err, _ := validSingle(s, 0) + if err != nil { + panic(err) + } + + if t, ok := sigToType[s[0]]; ok { + return t + } + switch s[0] { + case 'a': + if s[1] == '{' { + i := strings.LastIndex(s, "}") + t = reflect.MapOf(sigToType[s[2]], typeFor(s[3:i])) + } else { + t = reflect.SliceOf(typeFor(s[1:])) + } + case '(': + t = interfacesType + } + return +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/sig_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/sig_test.go new file mode 100644 index 00000000000..da37bc968e7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/sig_test.go @@ -0,0 +1,70 @@ +package dbus + +import ( + "testing" +) + +var sigTests = []struct { + vs []interface{} + sig Signature +}{ + { + []interface{}{new(int32)}, + Signature{"i"}, + }, + { + []interface{}{new(string)}, + Signature{"s"}, + }, + { + []interface{}{new(Signature)}, + Signature{"g"}, + }, + { + []interface{}{new([]int16)}, + Signature{"an"}, + }, + { + []interface{}{new(int16), new(uint32)}, + Signature{"nu"}, + }, + { + []interface{}{new(map[byte]Variant)}, + Signature{"a{yv}"}, + }, + { + []interface{}{new(Variant), new([]map[int32]string)}, + Signature{"vaa{is}"}, + }, +} + +func TestSig(t *testing.T) { + for i, v := range sigTests { + sig := SignatureOf(v.vs...) + if sig != v.sig { + t.Errorf("test %d: got %q, expected %q", i+1, sig.str, v.sig.str) + } + } +} + +var getSigTest = []interface{}{ + []struct { + b byte + i int32 + t uint64 + s string + }{}, + map[string]Variant{}, +} + +func BenchmarkGetSignatureSimple(b *testing.B) { + for i := 0; i < b.N; i++ { + SignatureOf("", int32(0)) + } +} + +func BenchmarkGetSignatureLong(b *testing.B) { + for i := 0; i < b.N; i++ { + SignatureOf(getSigTest...) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/transport_darwin.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/transport_darwin.go new file mode 100644 index 00000000000..1bba0d6bf78 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/transport_darwin.go @@ -0,0 +1,6 @@ +package dbus + +func (t *unixTransport) SendNullByte() error { + _, err := t.Write([]byte{0}) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/transport_generic.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/transport_generic.go new file mode 100644 index 00000000000..46f8f49d699 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/transport_generic.go @@ -0,0 +1,35 @@ +package dbus + +import ( + "encoding/binary" + "errors" + "io" +) + +type genericTransport struct { + io.ReadWriteCloser +} + +func (t genericTransport) SendNullByte() error { + _, err := t.Write([]byte{0}) + return err +} + +func (t genericTransport) SupportsUnixFDs() bool { + return false +} + +func (t genericTransport) EnableUnixFDs() {} + +func (t genericTransport) ReadMessage() (*Message, error) { + return DecodeMessage(t) +} + +func (t genericTransport) SendMessage(msg *Message) error { + for _, v := range msg.Body { + if _, ok := v.(UnixFD); ok { + return errors.New("dbus: unix fd passing not enabled") + } + } + return msg.EncodeTo(t, binary.LittleEndian) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/transport_unix.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/transport_unix.go new file mode 100644 index 00000000000..d16229be400 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/transport_unix.go @@ -0,0 +1,190 @@ +package dbus + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "net" + "syscall" +) + +type oobReader struct { + conn *net.UnixConn + oob []byte + buf [4096]byte +} + +func (o *oobReader) Read(b []byte) (n int, err error) { + n, oobn, flags, _, err := o.conn.ReadMsgUnix(b, o.buf[:]) + if err != nil { + return n, err + } + if flags&syscall.MSG_CTRUNC != 0 { + return n, errors.New("dbus: control data truncated (too many fds received)") + } + o.oob = append(o.oob, o.buf[:oobn]...) + return n, nil +} + +type unixTransport struct { + *net.UnixConn + hasUnixFDs bool +} + +func newUnixTransport(keys string) (transport, error) { + var err error + + t := new(unixTransport) + abstract := getKey(keys, "abstract") + path := getKey(keys, "path") + switch { + case abstract == "" && path == "": + return nil, errors.New("dbus: invalid address (neither path nor abstract set)") + case abstract != "" && path == "": + t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: "@" + abstract, Net: "unix"}) + if err != nil { + return nil, err + } + return t, nil + case abstract == "" && path != "": + t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: path, Net: "unix"}) + if err != nil { + return nil, err + } + return t, nil + default: + return nil, errors.New("dbus: invalid address (both path and abstract set)") + } +} + +func (t *unixTransport) EnableUnixFDs() { + t.hasUnixFDs = true +} + +func (t *unixTransport) ReadMessage() (*Message, error) { + var ( + blen, hlen uint32 + csheader [16]byte + headers []header + order binary.ByteOrder + unixfds uint32 + ) + // To be sure that all bytes of out-of-band data are read, we use a special + // reader that uses ReadUnix on the underlying connection instead of Read + // and gathers the out-of-band data in a buffer. + rd := &oobReader{conn: t.UnixConn} + // read the first 16 bytes (the part of the header that has a constant size), + // from which we can figure out the length of the rest of the message + if _, err := io.ReadFull(rd, csheader[:]); err != nil { + return nil, err + } + switch csheader[0] { + case 'l': + order = binary.LittleEndian + case 'B': + order = binary.BigEndian + default: + return nil, InvalidMessageError("invalid byte order") + } + // csheader[4:8] -> length of message body, csheader[12:16] -> length of + // header fields (without alignment) + binary.Read(bytes.NewBuffer(csheader[4:8]), order, &blen) + binary.Read(bytes.NewBuffer(csheader[12:]), order, &hlen) + if hlen%8 != 0 { + hlen += 8 - (hlen % 8) + } + + // decode headers and look for unix fds + headerdata := make([]byte, hlen+4) + copy(headerdata, csheader[12:]) + if _, err := io.ReadFull(t, headerdata[4:]); err != nil { + return nil, err + } + dec := newDecoder(bytes.NewBuffer(headerdata), order) + dec.pos = 12 + vs, err := dec.Decode(Signature{"a(yv)"}) + if err != nil { + return nil, err + } + Store(vs, &headers) + for _, v := range headers { + if v.Field == byte(FieldUnixFDs) { + unixfds, _ = v.Variant.value.(uint32) + } + } + all := make([]byte, 16+hlen+blen) + copy(all, csheader[:]) + copy(all[16:], headerdata[4:]) + if _, err := io.ReadFull(rd, all[16+hlen:]); err != nil { + return nil, err + } + if unixfds != 0 { + if !t.hasUnixFDs { + return nil, errors.New("dbus: got unix fds on unsupported transport") + } + // read the fds from the OOB data + scms, err := syscall.ParseSocketControlMessage(rd.oob) + if err != nil { + return nil, err + } + if len(scms) != 1 { + return nil, errors.New("dbus: received more than one socket control message") + } + fds, err := syscall.ParseUnixRights(&scms[0]) + if err != nil { + return nil, err + } + msg, err := DecodeMessage(bytes.NewBuffer(all)) + if err != nil { + return nil, err + } + // substitute the values in the message body (which are indices for the + // array receiver via OOB) with the actual values + for i, v := range msg.Body { + if j, ok := v.(UnixFDIndex); ok { + if uint32(j) >= unixfds { + return nil, InvalidMessageError("invalid index for unix fd") + } + msg.Body[i] = UnixFD(fds[j]) + } + } + return msg, nil + } + return DecodeMessage(bytes.NewBuffer(all)) +} + +func (t *unixTransport) SendMessage(msg *Message) error { + fds := make([]int, 0) + for i, v := range msg.Body { + if fd, ok := v.(UnixFD); ok { + msg.Body[i] = UnixFDIndex(len(fds)) + fds = append(fds, int(fd)) + } + } + if len(fds) != 0 { + if !t.hasUnixFDs { + return errors.New("dbus: unix fd passing not enabled") + } + msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds))) + oob := syscall.UnixRights(fds...) + buf := new(bytes.Buffer) + msg.EncodeTo(buf, binary.LittleEndian) + n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil) + if err != nil { + return err + } + if n != buf.Len() || oobn != len(oob) { + return io.ErrShortWrite + } + } else { + if err := msg.EncodeTo(t, binary.LittleEndian); err != nil { + return nil + } + } + return nil +} + +func (t *unixTransport) SupportsUnixFDs() bool { + return true +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/transport_unix_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/transport_unix_test.go new file mode 100644 index 00000000000..302233fc65e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/transport_unix_test.go @@ -0,0 +1,49 @@ +package dbus + +import ( + "os" + "testing" +) + +const testString = `This is a test! +This text should be read from the file that is created by this test.` + +type unixFDTest struct{} + +func (t unixFDTest) Test(fd UnixFD) (string, *Error) { + var b [4096]byte + file := os.NewFile(uintptr(fd), "testfile") + defer file.Close() + n, err := file.Read(b[:]) + if err != nil { + return "", &Error{"com.github.guelfey.test.Error", nil} + } + return string(b[:n]), nil +} + +func TestUnixFDs(t *testing.T) { + conn, err := SessionBus() + if err != nil { + t.Fatal(err) + } + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer w.Close() + if _, err := w.Write([]byte(testString)); err != nil { + t.Fatal(err) + } + name := conn.Names()[0] + test := unixFDTest{} + conn.Export(test, "/com/github/guelfey/test", "com.github.guelfey.test") + var s string + obj := conn.Object(name, "/com/github/guelfey/test") + err = obj.Call("com.github.guelfey.test.Test", 0, UnixFD(r.Fd())).Store(&s) + if err != nil { + t.Fatal(err) + } + if s != testString { + t.Fatal("got", s, "wanted", testString) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/transport_unixcred.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/transport_unixcred.go new file mode 100644 index 00000000000..42a0e769ef0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/transport_unixcred.go @@ -0,0 +1,22 @@ +// +build !darwin + +package dbus + +import ( + "io" + "os" + "syscall" +) + +func (t *unixTransport) SendNullByte() error { + ucred := &syscall.Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())} + b := syscall.UnixCredentials(ucred) + _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil) + if err != nil { + return err + } + if oobn != len(b) { + return io.ErrShortWrite + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/variant.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/variant.go new file mode 100644 index 00000000000..b1b53ceb472 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/variant.go @@ -0,0 +1,129 @@ +package dbus + +import ( + "bytes" + "fmt" + "reflect" + "strconv" +) + +// Variant represents the D-Bus variant type. +type Variant struct { + sig Signature + value interface{} +} + +// MakeVariant converts the given value to a Variant. It panics if v cannot be +// represented as a D-Bus type. +func MakeVariant(v interface{}) Variant { + return Variant{SignatureOf(v), v} +} + +// ParseVariant parses the given string as a variant as described at +// https://developer.gnome.org/glib/unstable/gvariant-text.html. If sig is not +// empty, it is taken to be the expected signature for the variant. +func ParseVariant(s string, sig Signature) (Variant, error) { + tokens := varLex(s) + p := &varParser{tokens: tokens} + n, err := varMakeNode(p) + if err != nil { + return Variant{}, err + } + if sig.str == "" { + sig, err = varInfer(n) + if err != nil { + return Variant{}, err + } + } + v, err := n.Value(sig) + if err != nil { + return Variant{}, err + } + return MakeVariant(v), nil +} + +// format returns a formatted version of v and whether this string can be parsed +// unambigously. +func (v Variant) format() (string, bool) { + switch v.sig.str[0] { + case 'b', 'i': + return fmt.Sprint(v.value), true + case 'n', 'q', 'u', 'x', 't', 'd', 'h': + return fmt.Sprint(v.value), false + case 's': + return strconv.Quote(v.value.(string)), true + case 'o': + return strconv.Quote(string(v.value.(ObjectPath))), false + case 'g': + return strconv.Quote(v.value.(Signature).str), false + case 'v': + s, unamb := v.value.(Variant).format() + if !unamb { + return "<@" + v.value.(Variant).sig.str + " " + s + ">", true + } + return "<" + s + ">", true + case 'y': + return fmt.Sprintf("%#x", v.value.(byte)), false + } + rv := reflect.ValueOf(v.value) + switch rv.Kind() { + case reflect.Slice: + if rv.Len() == 0 { + return "[]", false + } + unamb := true + buf := bytes.NewBuffer([]byte("[")) + for i := 0; i < rv.Len(); i++ { + // TODO: slooow + s, b := MakeVariant(rv.Index(i).Interface()).format() + unamb = unamb && b + buf.WriteString(s) + if i != rv.Len()-1 { + buf.WriteString(", ") + } + } + buf.WriteByte(']') + return buf.String(), unamb + case reflect.Map: + if rv.Len() == 0 { + return "{}", false + } + unamb := true + buf := bytes.NewBuffer([]byte("{")) + for i, k := range rv.MapKeys() { + s, b := MakeVariant(k.Interface()).format() + unamb = unamb && b + buf.WriteString(s) + buf.WriteString(": ") + s, b = MakeVariant(rv.MapIndex(k).Interface()).format() + unamb = unamb && b + buf.WriteString(s) + if i != rv.Len()-1 { + buf.WriteString(", ") + } + } + buf.WriteByte('}') + return buf.String(), unamb + } + return `"INVALID"`, true +} + +// Signature returns the D-Bus signature of the underlying value of v. +func (v Variant) Signature() Signature { + return v.sig +} + +// String returns the string representation of the underlying value of v as +// described at https://developer.gnome.org/glib/unstable/gvariant-text.html. +func (v Variant) String() string { + s, unamb := v.format() + if !unamb { + return "@" + v.sig.str + " " + s + } + return s +} + +// Value returns the underlying value of v. +func (v Variant) Value() interface{} { + return v.value +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/variant_lexer.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/variant_lexer.go new file mode 100644 index 00000000000..332007d6f12 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/variant_lexer.go @@ -0,0 +1,284 @@ +package dbus + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +// Heavily inspired by the lexer from text/template. + +type varToken struct { + typ varTokenType + val string +} + +type varTokenType byte + +const ( + tokEOF varTokenType = iota + tokError + tokNumber + tokString + tokBool + tokArrayStart + tokArrayEnd + tokDictStart + tokDictEnd + tokVariantStart + tokVariantEnd + tokComma + tokColon + tokType + tokByteString +) + +type varLexer struct { + input string + start int + pos int + width int + tokens []varToken +} + +type lexState func(*varLexer) lexState + +func varLex(s string) []varToken { + l := &varLexer{input: s} + l.run() + return l.tokens +} + +func (l *varLexer) accept(valid string) bool { + if strings.IndexRune(valid, l.next()) >= 0 { + return true + } + l.backup() + return false +} + +func (l *varLexer) backup() { + l.pos -= l.width +} + +func (l *varLexer) emit(t varTokenType) { + l.tokens = append(l.tokens, varToken{t, l.input[l.start:l.pos]}) + l.start = l.pos +} + +func (l *varLexer) errorf(format string, v ...interface{}) lexState { + l.tokens = append(l.tokens, varToken{ + tokError, + fmt.Sprintf(format, v...), + }) + return nil +} + +func (l *varLexer) ignore() { + l.start = l.pos +} + +func (l *varLexer) next() rune { + var r rune + + if l.pos >= len(l.input) { + l.width = 0 + return -1 + } + r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) + l.pos += l.width + return r +} + +func (l *varLexer) run() { + for state := varLexNormal; state != nil; { + state = state(l) + } +} + +func (l *varLexer) peek() rune { + r := l.next() + l.backup() + return r +} + +func varLexNormal(l *varLexer) lexState { + for { + r := l.next() + switch { + case r == -1: + l.emit(tokEOF) + return nil + case r == '[': + l.emit(tokArrayStart) + case r == ']': + l.emit(tokArrayEnd) + case r == '{': + l.emit(tokDictStart) + case r == '}': + l.emit(tokDictEnd) + case r == '<': + l.emit(tokVariantStart) + case r == '>': + l.emit(tokVariantEnd) + case r == ':': + l.emit(tokColon) + case r == ',': + l.emit(tokComma) + case r == '\'' || r == '"': + l.backup() + return varLexString + case r == '@': + l.backup() + return varLexType + case unicode.IsSpace(r): + l.ignore() + case unicode.IsNumber(r) || r == '+' || r == '-': + l.backup() + return varLexNumber + case r == 'b': + pos := l.start + if n := l.peek(); n == '"' || n == '\'' { + return varLexByteString + } + // not a byte string; try to parse it as a type or bool below + l.pos = pos + 1 + l.width = 1 + fallthrough + default: + // either a bool or a type. Try bools first. + l.backup() + if l.pos+4 <= len(l.input) { + if l.input[l.pos:l.pos+4] == "true" { + l.pos += 4 + l.emit(tokBool) + continue + } + } + if l.pos+5 <= len(l.input) { + if l.input[l.pos:l.pos+5] == "false" { + l.pos += 5 + l.emit(tokBool) + continue + } + } + // must be a type. + return varLexType + } + } +} + +var varTypeMap = map[string]string{ + "boolean": "b", + "byte": "y", + "int16": "n", + "uint16": "q", + "int32": "i", + "uint32": "u", + "int64": "x", + "uint64": "t", + "double": "f", + "string": "s", + "objectpath": "o", + "signature": "g", +} + +func varLexByteString(l *varLexer) lexState { + q := l.next() +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != -1 { + break + } + fallthrough + case -1: + return l.errorf("unterminated bytestring") + case q: + break Loop + } + } + l.emit(tokByteString) + return varLexNormal +} + +func varLexNumber(l *varLexer) lexState { + l.accept("+-") + digits := "0123456789" + if l.accept("0") { + if l.accept("x") { + digits = "0123456789abcdefABCDEF" + } else { + digits = "01234567" + } + } + for strings.IndexRune(digits, l.next()) >= 0 { + } + l.backup() + if l.accept(".") { + for strings.IndexRune(digits, l.next()) >= 0 { + } + l.backup() + } + if l.accept("eE") { + l.accept("+-") + for strings.IndexRune("0123456789", l.next()) >= 0 { + } + l.backup() + } + if r := l.peek(); unicode.IsLetter(r) { + l.next() + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + l.emit(tokNumber) + return varLexNormal +} + +func varLexString(l *varLexer) lexState { + q := l.next() +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != -1 { + break + } + fallthrough + case -1: + return l.errorf("unterminated string") + case q: + break Loop + } + } + l.emit(tokString) + return varLexNormal +} + +func varLexType(l *varLexer) lexState { + at := l.accept("@") + for { + r := l.next() + if r == -1 { + break + } + if unicode.IsSpace(r) { + l.backup() + break + } + } + if at { + if _, err := ParseSignature(l.input[l.start+1 : l.pos]); err != nil { + return l.errorf("%s", err) + } + } else { + if _, ok := varTypeMap[l.input[l.start:l.pos]]; ok { + l.emit(tokType) + return varLexNormal + } + return l.errorf("unrecognized type %q", l.input[l.start:l.pos]) + } + l.emit(tokType) + return varLexNormal +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/variant_parser.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/variant_parser.go new file mode 100644 index 00000000000..d20f5da6dd2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/variant_parser.go @@ -0,0 +1,817 @@ +package dbus + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +type varParser struct { + tokens []varToken + i int +} + +func (p *varParser) backup() { + p.i-- +} + +func (p *varParser) next() varToken { + if p.i < len(p.tokens) { + t := p.tokens[p.i] + p.i++ + return t + } + return varToken{typ: tokEOF} +} + +type varNode interface { + Infer() (Signature, error) + String() string + Sigs() sigSet + Value(Signature) (interface{}, error) +} + +func varMakeNode(p *varParser) (varNode, error) { + var sig Signature + + for { + t := p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokNumber: + return varMakeNumNode(t, sig) + case tokString: + return varMakeStringNode(t, sig) + case tokBool: + if sig.str != "" && sig.str != "b" { + return nil, varTypeError{t.val, sig} + } + b, err := strconv.ParseBool(t.val) + if err != nil { + return nil, err + } + return boolNode(b), nil + case tokArrayStart: + return varMakeArrayNode(p, sig) + case tokVariantStart: + return varMakeVariantNode(p, sig) + case tokDictStart: + return varMakeDictNode(p, sig) + case tokType: + if sig.str != "" { + return nil, errors.New("unexpected type annotation") + } + if t.val[0] == '@' { + sig.str = t.val[1:] + } else { + sig.str = varTypeMap[t.val] + } + case tokByteString: + if sig.str != "" && sig.str != "ay" { + return nil, varTypeError{t.val, sig} + } + b, err := varParseByteString(t.val) + if err != nil { + return nil, err + } + return byteStringNode(b), nil + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + } +} + +type varTypeError struct { + val string + sig Signature +} + +func (e varTypeError) Error() string { + return fmt.Sprintf("dbus: can't parse %q as type %q", e.val, e.sig.str) +} + +type sigSet map[Signature]bool + +func (s sigSet) Empty() bool { + return len(s) == 0 +} + +func (s sigSet) Intersect(s2 sigSet) sigSet { + r := make(sigSet) + for k := range s { + if s2[k] { + r[k] = true + } + } + return r +} + +func (s sigSet) Single() (Signature, bool) { + if len(s) == 1 { + for k := range s { + return k, true + } + } + return Signature{}, false +} + +func (s sigSet) ToArray() sigSet { + r := make(sigSet, len(s)) + for k := range s { + r[Signature{"a" + k.str}] = true + } + return r +} + +type numNode struct { + sig Signature + str string + val interface{} +} + +var numSigSet = sigSet{ + Signature{"y"}: true, + Signature{"n"}: true, + Signature{"q"}: true, + Signature{"i"}: true, + Signature{"u"}: true, + Signature{"x"}: true, + Signature{"t"}: true, + Signature{"d"}: true, +} + +func (n numNode) Infer() (Signature, error) { + if strings.ContainsAny(n.str, ".e") { + return Signature{"d"}, nil + } + return Signature{"i"}, nil +} + +func (n numNode) String() string { + return n.str +} + +func (n numNode) Sigs() sigSet { + if n.sig.str != "" { + return sigSet{n.sig: true} + } + if strings.ContainsAny(n.str, ".e") { + return sigSet{Signature{"d"}: true} + } + return numSigSet +} + +func (n numNode) Value(sig Signature) (interface{}, error) { + if n.sig.str != "" && n.sig != sig { + return nil, varTypeError{n.str, sig} + } + if n.val != nil { + return n.val, nil + } + return varNumAs(n.str, sig) +} + +func varMakeNumNode(tok varToken, sig Signature) (varNode, error) { + if sig.str == "" { + return numNode{str: tok.val}, nil + } + num, err := varNumAs(tok.val, sig) + if err != nil { + return nil, err + } + return numNode{sig: sig, val: num}, nil +} + +func varNumAs(s string, sig Signature) (interface{}, error) { + isUnsigned := false + size := 32 + switch sig.str { + case "n": + size = 16 + case "i": + case "x": + size = 64 + case "y": + size = 8 + isUnsigned = true + case "q": + size = 16 + isUnsigned = true + case "u": + isUnsigned = true + case "t": + size = 64 + isUnsigned = true + case "d": + d, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, err + } + return d, nil + default: + return nil, varTypeError{s, sig} + } + base := 10 + if strings.HasPrefix(s, "0x") { + base = 16 + s = s[2:] + } + if strings.HasPrefix(s, "0") && len(s) != 1 { + base = 8 + s = s[1:] + } + if isUnsigned { + i, err := strconv.ParseUint(s, base, size) + if err != nil { + return nil, err + } + var v interface{} = i + switch sig.str { + case "y": + v = byte(i) + case "q": + v = uint16(i) + case "u": + v = uint32(i) + } + return v, nil + } + i, err := strconv.ParseInt(s, base, size) + if err != nil { + return nil, err + } + var v interface{} = i + switch sig.str { + case "n": + v = int16(i) + case "i": + v = int32(i) + } + return v, nil +} + +type stringNode struct { + sig Signature + str string // parsed + val interface{} // has correct type +} + +var stringSigSet = sigSet{ + Signature{"s"}: true, + Signature{"g"}: true, + Signature{"o"}: true, +} + +func (n stringNode) Infer() (Signature, error) { + return Signature{"s"}, nil +} + +func (n stringNode) String() string { + return n.str +} + +func (n stringNode) Sigs() sigSet { + if n.sig.str != "" { + return sigSet{n.sig: true} + } + return stringSigSet +} + +func (n stringNode) Value(sig Signature) (interface{}, error) { + if n.sig.str != "" && n.sig != sig { + return nil, varTypeError{n.str, sig} + } + if n.val != nil { + return n.val, nil + } + switch { + case sig.str == "g": + return Signature{n.str}, nil + case sig.str == "o": + return ObjectPath(n.str), nil + case sig.str == "s": + return n.str, nil + default: + return nil, varTypeError{n.str, sig} + } +} + +func varMakeStringNode(tok varToken, sig Signature) (varNode, error) { + if sig.str != "" && sig.str != "s" && sig.str != "g" && sig.str != "o" { + return nil, fmt.Errorf("invalid type %q for string", sig.str) + } + s, err := varParseString(tok.val) + if err != nil { + return nil, err + } + n := stringNode{str: s} + if sig.str == "" { + return stringNode{str: s}, nil + } + n.sig = sig + switch sig.str { + case "o": + n.val = ObjectPath(s) + case "g": + n.val = Signature{s} + case "s": + n.val = s + } + return n, nil +} + +func varParseString(s string) (string, error) { + // quotes are guaranteed to be there + s = s[1 : len(s)-1] + buf := new(bytes.Buffer) + for len(s) != 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && size == 1 { + return "", errors.New("invalid UTF-8") + } + s = s[size:] + if r != '\\' { + buf.WriteRune(r) + continue + } + r, size = utf8.DecodeRuneInString(s) + if r == utf8.RuneError && size == 1 { + return "", errors.New("invalid UTF-8") + } + s = s[size:] + switch r { + case 'a': + buf.WriteRune(0x7) + case 'b': + buf.WriteRune(0x8) + case 'f': + buf.WriteRune(0xc) + case 'n': + buf.WriteRune('\n') + case 'r': + buf.WriteRune('\r') + case 't': + buf.WriteRune('\t') + case '\n': + case 'u': + if len(s) < 4 { + return "", errors.New("short unicode escape") + } + r, err := strconv.ParseUint(s[:4], 16, 32) + if err != nil { + return "", err + } + buf.WriteRune(rune(r)) + s = s[4:] + case 'U': + if len(s) < 8 { + return "", errors.New("short unicode escape") + } + r, err := strconv.ParseUint(s[:8], 16, 32) + if err != nil { + return "", err + } + buf.WriteRune(rune(r)) + s = s[8:] + default: + buf.WriteRune(r) + } + } + return buf.String(), nil +} + +var boolSigSet = sigSet{Signature{"b"}: true} + +type boolNode bool + +func (boolNode) Infer() (Signature, error) { + return Signature{"b"}, nil +} + +func (b boolNode) String() string { + if b { + return "true" + } + return "false" +} + +func (boolNode) Sigs() sigSet { + return boolSigSet +} + +func (b boolNode) Value(sig Signature) (interface{}, error) { + if sig.str != "b" { + return nil, varTypeError{b.String(), sig} + } + return bool(b), nil +} + +type arrayNode struct { + set sigSet + children []varNode + val interface{} +} + +func (n arrayNode) Infer() (Signature, error) { + for _, v := range n.children { + csig, err := varInfer(v) + if err != nil { + continue + } + return Signature{"a" + csig.str}, nil + } + return Signature{}, fmt.Errorf("can't infer type for %q", n.String()) +} + +func (n arrayNode) String() string { + s := "[" + for i, v := range n.children { + s += v.String() + if i != len(n.children)-1 { + s += ", " + } + } + return s + "]" +} + +func (n arrayNode) Sigs() sigSet { + return n.set +} + +func (n arrayNode) Value(sig Signature) (interface{}, error) { + if n.set.Empty() { + // no type information whatsoever, so this must be an empty slice + return reflect.MakeSlice(typeFor(sig.str), 0, 0).Interface(), nil + } + if !n.set[sig] { + return nil, varTypeError{n.String(), sig} + } + s := reflect.MakeSlice(typeFor(sig.str), len(n.children), len(n.children)) + for i, v := range n.children { + rv, err := v.Value(Signature{sig.str[1:]}) + if err != nil { + return nil, err + } + s.Index(i).Set(reflect.ValueOf(rv)) + } + return s.Interface(), nil +} + +func varMakeArrayNode(p *varParser, sig Signature) (varNode, error) { + var n arrayNode + if sig.str != "" { + n.set = sigSet{sig: true} + } + if t := p.next(); t.typ == tokArrayEnd { + return n, nil + } else { + p.backup() + } +Loop: + for { + t := p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + } + p.backup() + cn, err := varMakeNode(p) + if err != nil { + return nil, err + } + if cset := cn.Sigs(); !cset.Empty() { + if n.set.Empty() { + n.set = cset.ToArray() + } else { + nset := cset.ToArray().Intersect(n.set) + if nset.Empty() { + return nil, fmt.Errorf("can't parse %q with given type information", cn.String()) + } + n.set = nset + } + } + n.children = append(n.children, cn) + switch t := p.next(); t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokArrayEnd: + break Loop + case tokComma: + continue + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + } + return n, nil +} + +type variantNode struct { + n varNode +} + +var variantSet = sigSet{ + Signature{"v"}: true, +} + +func (variantNode) Infer() (Signature, error) { + return Signature{"v"}, nil +} + +func (n variantNode) String() string { + return "<" + n.n.String() + ">" +} + +func (variantNode) Sigs() sigSet { + return variantSet +} + +func (n variantNode) Value(sig Signature) (interface{}, error) { + if sig.str != "v" { + return nil, varTypeError{n.String(), sig} + } + sig, err := varInfer(n.n) + if err != nil { + return nil, err + } + v, err := n.n.Value(sig) + if err != nil { + return nil, err + } + return MakeVariant(v), nil +} + +func varMakeVariantNode(p *varParser, sig Signature) (varNode, error) { + n, err := varMakeNode(p) + if err != nil { + return nil, err + } + if t := p.next(); t.typ != tokVariantEnd { + return nil, fmt.Errorf("unexpected %q", t.val) + } + vn := variantNode{n} + if sig.str != "" && sig.str != "v" { + return nil, varTypeError{vn.String(), sig} + } + return variantNode{n}, nil +} + +type dictEntry struct { + key, val varNode +} + +type dictNode struct { + kset, vset sigSet + children []dictEntry + val interface{} +} + +func (n dictNode) Infer() (Signature, error) { + for _, v := range n.children { + ksig, err := varInfer(v.key) + if err != nil { + continue + } + vsig, err := varInfer(v.val) + if err != nil { + continue + } + return Signature{"a{" + ksig.str + vsig.str + "}"}, nil + } + return Signature{}, fmt.Errorf("can't infer type for %q", n.String()) +} + +func (n dictNode) String() string { + s := "{" + for i, v := range n.children { + s += v.key.String() + ": " + v.val.String() + if i != len(n.children)-1 { + s += ", " + } + } + return s + "}" +} + +func (n dictNode) Sigs() sigSet { + r := sigSet{} + for k := range n.kset { + for v := range n.vset { + sig := "a{" + k.str + v.str + "}" + r[Signature{sig}] = true + } + } + return r +} + +func (n dictNode) Value(sig Signature) (interface{}, error) { + set := n.Sigs() + if set.Empty() { + // no type information -> empty dict + return reflect.MakeMap(typeFor(sig.str)).Interface(), nil + } + if !set[sig] { + return nil, varTypeError{n.String(), sig} + } + m := reflect.MakeMap(typeFor(sig.str)) + ksig := Signature{sig.str[2:3]} + vsig := Signature{sig.str[3 : len(sig.str)-1]} + for _, v := range n.children { + kv, err := v.key.Value(ksig) + if err != nil { + return nil, err + } + vv, err := v.val.Value(vsig) + if err != nil { + return nil, err + } + m.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv)) + } + return m.Interface(), nil +} + +func varMakeDictNode(p *varParser, sig Signature) (varNode, error) { + var n dictNode + + if sig.str != "" { + if len(sig.str) < 5 { + return nil, fmt.Errorf("invalid signature %q for dict type", sig) + } + ksig := Signature{string(sig.str[2])} + vsig := Signature{sig.str[3 : len(sig.str)-1]} + n.kset = sigSet{ksig: true} + n.vset = sigSet{vsig: true} + } + if t := p.next(); t.typ == tokDictEnd { + return n, nil + } else { + p.backup() + } +Loop: + for { + t := p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + } + p.backup() + kn, err := varMakeNode(p) + if err != nil { + return nil, err + } + if kset := kn.Sigs(); !kset.Empty() { + if n.kset.Empty() { + n.kset = kset + } else { + n.kset = kset.Intersect(n.kset) + if n.kset.Empty() { + return nil, fmt.Errorf("can't parse %q with given type information", kn.String()) + } + } + } + t = p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokColon: + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + t = p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + } + p.backup() + vn, err := varMakeNode(p) + if err != nil { + return nil, err + } + if vset := vn.Sigs(); !vset.Empty() { + if n.vset.Empty() { + n.vset = vset + } else { + n.vset = n.vset.Intersect(vset) + if n.vset.Empty() { + return nil, fmt.Errorf("can't parse %q with given type information", vn.String()) + } + } + } + n.children = append(n.children, dictEntry{kn, vn}) + t = p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokDictEnd: + break Loop + case tokComma: + continue + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + } + return n, nil +} + +type byteStringNode []byte + +var byteStringSet = sigSet{ + Signature{"ay"}: true, +} + +func (byteStringNode) Infer() (Signature, error) { + return Signature{"ay"}, nil +} + +func (b byteStringNode) String() string { + return string(b) +} + +func (b byteStringNode) Sigs() sigSet { + return byteStringSet +} + +func (b byteStringNode) Value(sig Signature) (interface{}, error) { + if sig.str != "ay" { + return nil, varTypeError{b.String(), sig} + } + return []byte(b), nil +} + +func varParseByteString(s string) ([]byte, error) { + // quotes and b at start are guaranteed to be there + b := make([]byte, 0, 1) + s = s[2 : len(s)-1] + for len(s) != 0 { + c := s[0] + s = s[1:] + if c != '\\' { + b = append(b, c) + continue + } + c = s[0] + s = s[1:] + switch c { + case 'a': + b = append(b, 0x7) + case 'b': + b = append(b, 0x8) + case 'f': + b = append(b, 0xc) + case 'n': + b = append(b, '\n') + case 'r': + b = append(b, '\r') + case 't': + b = append(b, '\t') + case 'x': + if len(s) < 2 { + return nil, errors.New("short escape") + } + n, err := strconv.ParseUint(s[:2], 16, 8) + if err != nil { + return nil, err + } + b = append(b, byte(n)) + s = s[2:] + case '0': + if len(s) < 3 { + return nil, errors.New("short escape") + } + n, err := strconv.ParseUint(s[:3], 8, 8) + if err != nil { + return nil, err + } + b = append(b, byte(n)) + s = s[3:] + default: + b = append(b, c) + } + } + return append(b, 0), nil +} + +func varInfer(n varNode) (Signature, error) { + if sig, ok := n.Sigs().Single(); ok { + return sig, nil + } + return n.Infer() +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/variant_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/variant_test.go new file mode 100644 index 00000000000..da917c8e290 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/godbus/dbus/variant_test.go @@ -0,0 +1,78 @@ +package dbus + +import "reflect" +import "testing" + +var variantFormatTests = []struct { + v interface{} + s string +}{ + {int32(1), `1`}, + {"foo", `"foo"`}, + {ObjectPath("/org/foo"), `@o "/org/foo"`}, + {Signature{"i"}, `@g "i"`}, + {[]byte{}, `@ay []`}, + {[]int32{1, 2}, `[1, 2]`}, + {[]int64{1, 2}, `@ax [1, 2]`}, + {[][]int32{{3, 4}, {5, 6}}, `[[3, 4], [5, 6]]`}, + {[]Variant{MakeVariant(int32(1)), MakeVariant(1.0)}, `[<1>, <@d 1>]`}, + {map[string]int32{"one": 1, "two": 2}, `{"one": 1, "two": 2}`}, + {map[int32]ObjectPath{1: "/org/foo"}, `@a{io} {1: "/org/foo"}`}, + {map[string]Variant{}, `@a{sv} {}`}, +} + +func TestFormatVariant(t *testing.T) { + for i, v := range variantFormatTests { + if s := MakeVariant(v.v).String(); s != v.s { + t.Errorf("test %d: got %q, wanted %q", i+1, s, v.s) + } + } +} + +var variantParseTests = []struct { + s string + v interface{} +}{ + {"1", int32(1)}, + {"true", true}, + {"false", false}, + {"1.0", float64(1.0)}, + {"0x10", int32(16)}, + {"1e1", float64(10)}, + {`"foo"`, "foo"}, + {`"\a\b\f\n\r\t"`, "\x07\x08\x0c\n\r\t"}, + {`"\u00e4\U0001f603"`, "\u00e4\U0001f603"}, + {"[1]", []int32{1}}, + {"[1, 2, 3]", []int32{1, 2, 3}}, + {"@ai []", []int32{}}, + {"[1, 5.0]", []float64{1, 5.0}}, + {"[[1, 2], [3, 4.0]]", [][]float64{{1, 2}, {3, 4}}}, + {`[@o "/org/foo", "/org/bar"]`, []ObjectPath{"/org/foo", "/org/bar"}}, + {"<1>", MakeVariant(int32(1))}, + {"[<1>, <2.0>]", []Variant{MakeVariant(int32(1)), MakeVariant(2.0)}}, + {`[[], [""]]`, [][]string{{}, {""}}}, + {`@a{ss} {}`, map[string]string{}}, + {`{"foo": 1}`, map[string]int32{"foo": 1}}, + {`[{}, {"foo": "bar"}]`, []map[string]string{{}, {"foo": "bar"}}}, + {`{"a": <1>, "b": <"foo">}`, + map[string]Variant{"a": MakeVariant(int32(1)), "b": MakeVariant("foo")}}, + {`b''`, []byte{0}}, + {`b"abc"`, []byte{'a', 'b', 'c', 0}}, + {`b"\x01\0002\a\b\f\n\r\t"`, []byte{1, 2, 0x7, 0x8, 0xc, '\n', '\r', '\t', 0}}, + {`[[0], b""]`, [][]byte{{0}, {0}}}, + {"int16 0", int16(0)}, + {"byte 0", byte(0)}, +} + +func TestParseVariant(t *testing.T) { + for i, v := range variantParseTests { + nv, err := ParseVariant(v.s, Signature{}) + if err != nil { + t.Errorf("test %d: parsing failed: %s", i+1, err) + continue + } + if !reflect.DeepEqual(nv.value, v.v) { + t.Errorf("test %d: got %q, wanted %q", i+1, nv, v.v) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/LICENSE b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/LICENSE new file mode 100644 index 00000000000..80dd96de77f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/LICENSE @@ -0,0 +1,24 @@ +Copyright 2013 Suryandaru Triandana +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/capability.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/capability.go new file mode 100644 index 00000000000..9df3b4151b6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/capability.go @@ -0,0 +1,71 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package capability provides utilities for manipulating POSIX capabilities. +package capability + +type Capabilities interface { + // Get check whether a capability present in the given + // capabilities set. The 'which' value should be one of EFFECTIVE, + // PERMITTED, INHERITABLE or BOUNDING. + Get(which CapType, what Cap) bool + + // Empty check whether all capability bits of the given capabilities + // set are zero. The 'which' value should be one of EFFECTIVE, + // PERMITTED, INHERITABLE or BOUNDING. + Empty(which CapType) bool + + // Full check whether all capability bits of the given capabilities + // set are one. The 'which' value should be one of EFFECTIVE, + // PERMITTED, INHERITABLE or BOUNDING. + Full(which CapType) bool + + // Set sets capabilities of the given capabilities sets. The + // 'which' value should be one or combination (OR'ed) of EFFECTIVE, + // PERMITTED, INHERITABLE or BOUNDING. + Set(which CapType, caps ...Cap) + + // Unset unsets capabilities of the given capabilities sets. The + // 'which' value should be one or combination (OR'ed) of EFFECTIVE, + // PERMITTED, INHERITABLE or BOUNDING. + Unset(which CapType, caps ...Cap) + + // Fill sets all bits of the given capabilities kind to one. The + // 'kind' value should be one or combination (OR'ed) of CAPS or + // BOUNDS. + Fill(kind CapType) + + // Clear sets all bits of the given capabilities kind to zero. The + // 'kind' value should be one or combination (OR'ed) of CAPS or + // BOUNDS. + Clear(kind CapType) + + // String return current capabilities state of the given capabilities + // set as string. The 'which' value should be one of EFFECTIVE, + // PERMITTED, INHERITABLE or BOUNDING. + StringCap(which CapType) string + + // String return current capabilities state as string. + String() string + + // Load load actual capabilities value. This will overwrite all + // outstanding changes. + Load() error + + // Apply apply the capabilities settings, so all changes will take + // effect. + Apply(kind CapType) error +} + +// NewPid create new initialized Capabilities object for given pid. +func NewPid(pid int) (Capabilities, error) { + return newPid(pid) +} + +// NewFile create new initialized Capabilities object for given named file. +func NewFile(name string) (Capabilities, error) { + return newFile(name) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/capability_linux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/capability_linux.go new file mode 100644 index 00000000000..c5f335f7fb2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/capability_linux.go @@ -0,0 +1,566 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package capability + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "strings" + "syscall" +) + +var errUnknownVers = errors.New("unknown capability version") + +const ( + linuxCapVer1 = 0x19980330 + linuxCapVer2 = 0x20071026 + linuxCapVer3 = 0x20080522 +) + +var capVers uint32 + +func init() { + var hdr capHeader + capget(&hdr, nil) + capVers = hdr.version +} + +func mkStringCap(c Capabilities, which CapType) (ret string) { + for i, first := Cap(0), true; i <= CAP_LAST_CAP; i++ { + if !c.Get(which, i) { + continue + } + if first { + first = false + } else { + ret += ", " + } + ret += i.String() + } + return +} + +func mkString(c Capabilities, max CapType) (ret string) { + ret = "{" + for i := CapType(1); i <= max; i <<= 1 { + ret += " " + i.String() + "=\"" + if c.Empty(i) { + ret += "empty" + } else if c.Full(i) { + ret += "full" + } else { + ret += c.StringCap(i) + } + ret += "\"" + } + ret += " }" + return +} + +func newPid(pid int) (c Capabilities, err error) { + switch capVers { + case linuxCapVer1: + p := new(capsV1) + p.hdr.version = capVers + p.hdr.pid = pid + c = p + case linuxCapVer2, linuxCapVer3: + p := new(capsV3) + p.hdr.version = capVers + p.hdr.pid = pid + c = p + default: + err = errUnknownVers + return + } + err = c.Load() + if err != nil { + c = nil + } + return +} + +type capsV1 struct { + hdr capHeader + data capData +} + +func (c *capsV1) Get(which CapType, what Cap) bool { + if what > 32 { + return false + } + + switch which { + case EFFECTIVE: + return (1< 32 { + continue + } + + if which&EFFECTIVE != 0 { + c.data.effective |= 1 << uint(what) + } + if which&PERMITTED != 0 { + c.data.permitted |= 1 << uint(what) + } + if which&INHERITABLE != 0 { + c.data.inheritable |= 1 << uint(what) + } + } +} + +func (c *capsV1) Unset(which CapType, caps ...Cap) { + for _, what := range caps { + if what > 32 { + continue + } + + if which&EFFECTIVE != 0 { + c.data.effective &= ^(1 << uint(what)) + } + if which&PERMITTED != 0 { + c.data.permitted &= ^(1 << uint(what)) + } + if which&INHERITABLE != 0 { + c.data.inheritable &= ^(1 << uint(what)) + } + } +} + +func (c *capsV1) Fill(kind CapType) { + if kind&CAPS == CAPS { + c.data.effective = 0x7fffffff + c.data.permitted = 0x7fffffff + c.data.inheritable = 0 + } +} + +func (c *capsV1) Clear(kind CapType) { + if kind&CAPS == CAPS { + c.data.effective = 0 + c.data.permitted = 0 + c.data.inheritable = 0 + } +} + +func (c *capsV1) StringCap(which CapType) (ret string) { + return mkStringCap(c, which) +} + +func (c *capsV1) String() (ret string) { + return mkString(c, BOUNDING) +} + +func (c *capsV1) Load() (err error) { + return capget(&c.hdr, &c.data) +} + +func (c *capsV1) Apply(kind CapType) error { + if kind&CAPS == CAPS { + return capset(&c.hdr, &c.data) + } + return nil +} + +type capsV3 struct { + hdr capHeader + data [2]capData + bounds [2]uint32 +} + +func (c *capsV3) Get(which CapType, what Cap) bool { + var i uint + if what > 31 { + i = uint(what) >> 5 + what %= 32 + } + + switch which { + case EFFECTIVE: + return (1< 31 { + i = uint(what) >> 5 + what %= 32 + } + + if which&EFFECTIVE != 0 { + c.data[i].effective |= 1 << uint(what) + } + if which&PERMITTED != 0 { + c.data[i].permitted |= 1 << uint(what) + } + if which&INHERITABLE != 0 { + c.data[i].inheritable |= 1 << uint(what) + } + if which&BOUNDING != 0 { + c.bounds[i] |= 1 << uint(what) + } + } +} + +func (c *capsV3) Unset(which CapType, caps ...Cap) { + for _, what := range caps { + var i uint + if what > 31 { + i = uint(what) >> 5 + what %= 32 + } + + if which&EFFECTIVE != 0 { + c.data[i].effective &= ^(1 << uint(what)) + } + if which&PERMITTED != 0 { + c.data[i].permitted &= ^(1 << uint(what)) + } + if which&INHERITABLE != 0 { + c.data[i].inheritable &= ^(1 << uint(what)) + } + if which&BOUNDING != 0 { + c.bounds[i] &= ^(1 << uint(what)) + } + } +} + +func (c *capsV3) Fill(kind CapType) { + if kind&CAPS == CAPS { + c.data[0].effective = 0xffffffff + c.data[0].permitted = 0xffffffff + c.data[0].inheritable = 0 + c.data[1].effective = 0xffffffff + c.data[1].permitted = 0xffffffff + c.data[1].inheritable = 0 + } + + if kind&BOUNDS == BOUNDS { + c.bounds[0] = 0xffffffff + c.bounds[1] = 0xffffffff + } +} + +func (c *capsV3) Clear(kind CapType) { + if kind&CAPS == CAPS { + c.data[0].effective = 0 + c.data[0].permitted = 0 + c.data[0].inheritable = 0 + c.data[1].effective = 0 + c.data[1].permitted = 0 + c.data[1].inheritable = 0 + } + + if kind&BOUNDS == BOUNDS { + c.bounds[0] = 0 + c.bounds[1] = 0 + } +} + +func (c *capsV3) StringCap(which CapType) (ret string) { + return mkStringCap(c, which) +} + +func (c *capsV3) String() (ret string) { + return mkString(c, BOUNDING) +} + +func (c *capsV3) Load() (err error) { + err = capget(&c.hdr, &c.data[0]) + if err != nil { + return + } + + f, err := os.Open(fmt.Sprintf("/proc/%d/status", c.hdr.pid)) + if err != nil { + return + } + b := bufio.NewReader(f) + for { + line, e := b.ReadString('\n') + if e != nil { + if e != io.EOF { + err = e + } + break + } + if strings.HasPrefix(line, "CapB") { + fmt.Sscanf(line[4:], "nd: %08x%08x", &c.bounds[1], &c.bounds[0]) + break + } + } + f.Close() + + return +} + +func (c *capsV3) Apply(kind CapType) (err error) { + if kind&BOUNDS == BOUNDS { + var data [2]capData + err = capget(&c.hdr, &data[0]) + if err != nil { + return + } + if (1< 31 { + if c.data.version == 1 { + return false + } + i = uint(what) >> 5 + what %= 32 + } + + switch which { + case EFFECTIVE: + return (1< 31 { + if c.data.version == 1 { + continue + } + i = uint(what) >> 5 + what %= 32 + } + + if which&EFFECTIVE != 0 { + c.data.effective[i] |= 1 << uint(what) + } + if which&PERMITTED != 0 { + c.data.data[i].permitted |= 1 << uint(what) + } + if which&INHERITABLE != 0 { + c.data.data[i].inheritable |= 1 << uint(what) + } + } +} + +func (c *capsFile) Unset(which CapType, caps ...Cap) { + for _, what := range caps { + var i uint + if what > 31 { + if c.data.version == 1 { + continue + } + i = uint(what) >> 5 + what %= 32 + } + + if which&EFFECTIVE != 0 { + c.data.effective[i] &= ^(1 << uint(what)) + } + if which&PERMITTED != 0 { + c.data.data[i].permitted &= ^(1 << uint(what)) + } + if which&INHERITABLE != 0 { + c.data.data[i].inheritable &= ^(1 << uint(what)) + } + } +} + +func (c *capsFile) Fill(kind CapType) { + if kind&CAPS == CAPS { + c.data.effective[0] = 0xffffffff + c.data.data[0].permitted = 0xffffffff + c.data.data[0].inheritable = 0 + if c.data.version == 2 { + c.data.effective[1] = 0xffffffff + c.data.data[1].permitted = 0xffffffff + c.data.data[1].inheritable = 0 + } + } +} + +func (c *capsFile) Clear(kind CapType) { + if kind&CAPS == CAPS { + c.data.effective[0] = 0 + c.data.data[0].permitted = 0 + c.data.data[0].inheritable = 0 + if c.data.version == 2 { + c.data.effective[1] = 0 + c.data.data[1].permitted = 0 + c.data.data[1].inheritable = 0 + } + } +} + +func (c *capsFile) StringCap(which CapType) (ret string) { + return mkStringCap(c, which) +} + +func (c *capsFile) String() (ret string) { + return mkString(c, INHERITABLE) +} + +func (c *capsFile) Load() (err error) { + return getVfsCap(c.path, &c.data) +} + +func (c *capsFile) Apply(kind CapType) (err error) { + if kind&CAPS == CAPS { + return setVfsCap(c.path, &c.data) + } + return +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/capability_noop.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/capability_noop.go new file mode 100644 index 00000000000..9bb3070c5ec --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/capability_noop.go @@ -0,0 +1,19 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build !linux + +package capability + +import "errors" + +func newPid(pid int) (Capabilities, error) { + return nil, errors.New("not supported") +} + +func newFile(path string) (Capabilities, error) { + return nil, errors.New("not supported") +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/capability_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/capability_test.go new file mode 100644 index 00000000000..8108655c05f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/capability_test.go @@ -0,0 +1,83 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package capability + +import "testing" + +func TestState(t *testing.T) { + testEmpty := func(name string, c Capabilities, whats CapType) { + for i := CapType(1); i <= BOUNDING; i <<= 1 { + if (i&whats) != 0 && !c.Empty(i) { + t.Errorf(name+": capabilities set %q wasn't empty", i) + } + } + } + testFull := func(name string, c Capabilities, whats CapType) { + for i := CapType(1); i <= BOUNDING; i <<= 1 { + if (i&whats) != 0 && !c.Full(i) { + t.Errorf(name+": capabilities set %q wasn't full", i) + } + } + } + testPartial := func(name string, c Capabilities, whats CapType) { + for i := CapType(1); i <= BOUNDING; i <<= 1 { + if (i&whats) != 0 && (c.Empty(i) || c.Full(i)) { + t.Errorf(name+": capabilities set %q wasn't partial", i) + } + } + } + testGet := func(name string, c Capabilities, whats CapType, max Cap) { + for i := CapType(1); i <= BOUNDING; i <<= 1 { + if (i & whats) == 0 { + continue + } + for j := Cap(0); j <= max; j++ { + if !c.Get(i, j) { + t.Errorf(name+": capability %q wasn't found on %q", j, i) + } + } + } + } + + capf := new(capsFile) + capf.data.version = 2 + for _, tc := range []struct { + name string + c Capabilities + sets CapType + max Cap + }{ + {"v1", new(capsV1), EFFECTIVE | PERMITTED, CAP_AUDIT_CONTROL}, + {"v3", new(capsV3), EFFECTIVE | PERMITTED | BOUNDING, CAP_LAST_CAP}, + {"file_v1", new(capsFile), EFFECTIVE | PERMITTED, CAP_AUDIT_CONTROL}, + {"file_v2", capf, EFFECTIVE | PERMITTED, CAP_LAST_CAP}, + } { + testEmpty(tc.name, tc.c, tc.sets) + tc.c.Fill(CAPS | BOUNDS) + testFull(tc.name, tc.c, tc.sets) + testGet(tc.name, tc.c, tc.sets, tc.max) + tc.c.Clear(CAPS | BOUNDS) + testEmpty(tc.name, tc.c, tc.sets) + for i := CapType(1); i <= BOUNDING; i <<= 1 { + for j := Cap(0); j <= CAP_LAST_CAP; j++ { + tc.c.Set(i, j) + } + } + testFull(tc.name, tc.c, tc.sets) + testGet(tc.name, tc.c, tc.sets, tc.max) + for i := CapType(1); i <= BOUNDING; i <<= 1 { + for j := Cap(0); j <= CAP_LAST_CAP; j++ { + tc.c.Unset(i, j) + } + } + testEmpty(tc.name, tc.c, tc.sets) + tc.c.Set(PERMITTED, CAP_CHOWN) + testPartial(tc.name, tc.c, PERMITTED) + tc.c.Clear(CAPS | BOUNDS) + testEmpty(tc.name, tc.c, tc.sets) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/enum.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/enum.go new file mode 100644 index 00000000000..e2900a4e933 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/enum.go @@ -0,0 +1,338 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package capability + +type CapType uint + +func (c CapType) String() string { + switch c { + case EFFECTIVE: + return "effective" + case PERMITTED: + return "permitted" + case INHERITABLE: + return "inheritable" + case BOUNDING: + return "bounding" + case CAPS: + return "caps" + } + return "unknown" +} + +const ( + EFFECTIVE CapType = 1 << iota + PERMITTED + INHERITABLE + BOUNDING + + CAPS = EFFECTIVE | PERMITTED | INHERITABLE + BOUNDS = BOUNDING +) + +type Cap int + +func (c Cap) String() string { + switch c { + case CAP_CHOWN: + return "chown" + case CAP_DAC_OVERRIDE: + return "dac_override" + case CAP_DAC_READ_SEARCH: + return "dac_read_search" + case CAP_FOWNER: + return "fowner" + case CAP_FSETID: + return "fsetid" + case CAP_KILL: + return "kill" + case CAP_SETGID: + return "setgid" + case CAP_SETUID: + return "setuid" + case CAP_SETPCAP: + return "setpcap" + case CAP_LINUX_IMMUTABLE: + return "linux_immutable" + case CAP_NET_BIND_SERVICE: + return "net_bind_service" + case CAP_NET_BROADCAST: + return "net_broadcast" + case CAP_NET_ADMIN: + return "net_admin" + case CAP_NET_RAW: + return "net_raw" + case CAP_IPC_LOCK: + return "ipc_lock" + case CAP_IPC_OWNER: + return "ipc_owner" + case CAP_SYS_MODULE: + return "sys_module" + case CAP_SYS_RAWIO: + return "sys_rawio" + case CAP_SYS_CHROOT: + return "sys_chroot" + case CAP_SYS_PTRACE: + return "sys_ptrace" + case CAP_SYS_PACCT: + return "sys_psacct" + case CAP_SYS_ADMIN: + return "sys_admin" + case CAP_SYS_BOOT: + return "sys_boot" + case CAP_SYS_NICE: + return "sys_nice" + case CAP_SYS_RESOURCE: + return "sys_resource" + case CAP_SYS_TIME: + return "sys_time" + case CAP_SYS_TTY_CONFIG: + return "sys_tty_config" + case CAP_MKNOD: + return "mknod" + case CAP_LEASE: + return "lease" + case CAP_AUDIT_WRITE: + return "audit_write" + case CAP_AUDIT_CONTROL: + return "audit_control" + case CAP_SETFCAP: + return "setfcap" + case CAP_MAC_OVERRIDE: + return "mac_override" + case CAP_MAC_ADMIN: + return "mac_admin" + case CAP_SYSLOG: + return "syslog" + case CAP_WAKE_ALARM: + return "wake_alarm" + case CAP_BLOCK_SUSPEND: + return "block_suspend" + } + return "unknown" +} + +const ( + // POSIX-draft defined capabilities. + + // In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this + // overrides the restriction of changing file ownership and group + // ownership. + CAP_CHOWN Cap = 0 + + // Override all DAC access, including ACL execute access if + // [_POSIX_ACL] is defined. Excluding DAC access covered by + // CAP_LINUX_IMMUTABLE. + CAP_DAC_OVERRIDE Cap = 1 + + // Overrides all DAC restrictions regarding read and search on files + // and directories, including ACL restrictions if [_POSIX_ACL] is + // defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE. + CAP_DAC_READ_SEARCH Cap = 2 + + // Overrides all restrictions about allowed operations on files, where + // file owner ID must be equal to the user ID, except where CAP_FSETID + // is applicable. It doesn't override MAC and DAC restrictions. + CAP_FOWNER Cap = 3 + + // Overrides the following restrictions that the effective user ID + // shall match the file owner ID when setting the S_ISUID and S_ISGID + // bits on that file; that the effective group ID (or one of the + // supplementary group IDs) shall match the file owner ID when setting + // the S_ISGID bit on that file; that the S_ISUID and S_ISGID bits are + // cleared on successful return from chown(2) (not implemented). + CAP_FSETID Cap = 4 + + // Overrides the restriction that the real or effective user ID of a + // process sending a signal must match the real or effective user ID + // of the process receiving the signal. + CAP_KILL Cap = 5 + + // Allows setgid(2) manipulation + // Allows setgroups(2) + // Allows forged gids on socket credentials passing. + CAP_SETGID Cap = 6 + + // Allows set*uid(2) manipulation (including fsuid). + // Allows forged pids on socket credentials passing. + CAP_SETUID Cap = 7 + + // Linux-specific capabilities + + // Without VFS support for capabilities: + // Transfer any capability in your permitted set to any pid, + // remove any capability in your permitted set from any pid + // With VFS support for capabilities (neither of above, but) + // Add any capability from current's capability bounding set + // to the current process' inheritable set + // Allow taking bits out of capability bounding set + // Allow modification of the securebits for a process + CAP_SETPCAP Cap = 8 + + // Allow modification of S_IMMUTABLE and S_APPEND file attributes + CAP_LINUX_IMMUTABLE Cap = 9 + + // Allows binding to TCP/UDP sockets below 1024 + // Allows binding to ATM VCIs below 32 + CAP_NET_BIND_SERVICE Cap = 10 + + // Allow broadcasting, listen to multicast + CAP_NET_BROADCAST Cap = 11 + + // Allow interface configuration + // Allow administration of IP firewall, masquerading and accounting + // Allow setting debug option on sockets + // Allow modification of routing tables + // Allow setting arbitrary process / process group ownership on + // sockets + // Allow binding to any address for transparent proxying (also via NET_RAW) + // Allow setting TOS (type of service) + // Allow setting promiscuous mode + // Allow clearing driver statistics + // Allow multicasting + // Allow read/write of device-specific registers + // Allow activation of ATM control sockets + CAP_NET_ADMIN Cap = 12 + + // Allow use of RAW sockets + // Allow use of PACKET sockets + // Allow binding to any address for transparent proxying (also via NET_ADMIN) + CAP_NET_RAW Cap = 13 + + // Allow locking of shared memory segments + // Allow mlock and mlockall (which doesn't really have anything to do + // with IPC) + CAP_IPC_LOCK Cap = 14 + + // Override IPC ownership checks + CAP_IPC_OWNER Cap = 15 + + // Insert and remove kernel modules - modify kernel without limit + CAP_SYS_MODULE Cap = 16 + + // Allow ioperm/iopl access + // Allow sending USB messages to any device via /proc/bus/usb + CAP_SYS_RAWIO Cap = 17 + + // Allow use of chroot() + CAP_SYS_CHROOT Cap = 18 + + // Allow ptrace() of any process + CAP_SYS_PTRACE Cap = 19 + + // Allow configuration of process accounting + CAP_SYS_PACCT Cap = 20 + + // Allow configuration of the secure attention key + // Allow administration of the random device + // Allow examination and configuration of disk quotas + // Allow setting the domainname + // Allow setting the hostname + // Allow calling bdflush() + // Allow mount() and umount(), setting up new smb connection + // Allow some autofs root ioctls + // Allow nfsservctl + // Allow VM86_REQUEST_IRQ + // Allow to read/write pci config on alpha + // Allow irix_prctl on mips (setstacksize) + // Allow flushing all cache on m68k (sys_cacheflush) + // Allow removing semaphores + // Used instead of CAP_CHOWN to "chown" IPC message queues, semaphores + // and shared memory + // Allow locking/unlocking of shared memory segment + // Allow turning swap on/off + // Allow forged pids on socket credentials passing + // Allow setting readahead and flushing buffers on block devices + // Allow setting geometry in floppy driver + // Allow turning DMA on/off in xd driver + // Allow administration of md devices (mostly the above, but some + // extra ioctls) + // Allow tuning the ide driver + // Allow access to the nvram device + // Allow administration of apm_bios, serial and bttv (TV) device + // Allow manufacturer commands in isdn CAPI support driver + // Allow reading non-standardized portions of pci configuration space + // Allow DDI debug ioctl on sbpcd driver + // Allow setting up serial ports + // Allow sending raw qic-117 commands + // Allow enabling/disabling tagged queuing on SCSI controllers and sending + // arbitrary SCSI commands + // Allow setting encryption key on loopback filesystem + // Allow setting zone reclaim policy + CAP_SYS_ADMIN Cap = 21 + + // Allow use of reboot() + CAP_SYS_BOOT Cap = 22 + + // Allow raising priority and setting priority on other (different + // UID) processes + // Allow use of FIFO and round-robin (realtime) scheduling on own + // processes and setting the scheduling algorithm used by another + // process. + // Allow setting cpu affinity on other processes + CAP_SYS_NICE Cap = 23 + + // Override resource limits. Set resource limits. + // Override quota limits. + // Override reserved space on ext2 filesystem + // Modify data journaling mode on ext3 filesystem (uses journaling + // resources) + // NOTE: ext2 honors fsuid when checking for resource overrides, so + // you can override using fsuid too + // Override size restrictions on IPC message queues + // Allow more than 64hz interrupts from the real-time clock + // Override max number of consoles on console allocation + // Override max number of keymaps + CAP_SYS_RESOURCE Cap = 24 + + // Allow manipulation of system clock + // Allow irix_stime on mips + // Allow setting the real-time clock + CAP_SYS_TIME Cap = 25 + + // Allow configuration of tty devices + // Allow vhangup() of tty + CAP_SYS_TTY_CONFIG Cap = 26 + + // Allow the privileged aspects of mknod() + CAP_MKNOD Cap = 27 + + // Allow taking of leases on files + CAP_LEASE Cap = 28 + + CAP_AUDIT_WRITE Cap = 29 + CAP_AUDIT_CONTROL Cap = 30 + CAP_SETFCAP Cap = 31 + + // Override MAC access. + // The base kernel enforces no MAC policy. + // An LSM may enforce a MAC policy, and if it does and it chooses + // to implement capability based overrides of that policy, this is + // the capability it should use to do so. + CAP_MAC_OVERRIDE Cap = 32 + + // Allow MAC configuration or state changes. + // The base kernel requires no MAC configuration. + // An LSM may enforce a MAC policy, and if it does and it chooses + // to implement capability based checks on modifications to that + // policy or the data required to maintain it, this is the + // capability it should use to do so. + CAP_MAC_ADMIN Cap = 33 + + // Allow configuring the kernel's syslog (printk behaviour) + CAP_SYSLOG Cap = 34 + + // Allow triggering something that will wake the system + CAP_WAKE_ALARM Cap = 35 + + // Allow preventing system suspends + CAP_BLOCK_SUSPEND Cap = 36 + + CAP_LAST_CAP = CAP_BLOCK_SUSPEND +) + +const capUpperMask = (uint32(1) << (uint(CAP_LAST_CAP) - 31)) - 1 diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/syscall_linux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/syscall_linux.go new file mode 100644 index 00000000000..c18e6f69186 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/vendor/src/github.com/syndtr/gocapability/capability/syscall_linux.go @@ -0,0 +1,143 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package capability + +import ( + "syscall" + "unsafe" +) + +type capHeader struct { + version uint32 + pid int +} + +type capData struct { + effective uint32 + permitted uint32 + inheritable uint32 +} + +func capget(hdr *capHeader, data *capData) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = e1 + } + return +} + +func capset(hdr *capHeader, data *capData) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = e1 + } + return +} + +func prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) { + _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0) + if e1 != 0 { + err = e1 + } + return +} + +const ( + vfsXattrName = "security.capability" + + vfsCapVerMask = 0xff000000 + vfsCapVer1 = 0x01000000 + vfsCapVer2 = 0x02000000 + + vfsCapFlagMask = ^vfsCapVerMask + vfsCapFlageffective = 0x000001 + + vfscapDataSizeV1 = 4 * (1 + 2*1) + vfscapDataSizeV2 = 4 * (1 + 2*2) +) + +type vfscapData struct { + magic uint32 + data [2]struct { + permitted uint32 + inheritable uint32 + } + effective [2]uint32 + version int8 +} + +var ( + _vfsXattrName *byte +) + +func init() { + _vfsXattrName, _ = syscall.BytePtrFromString(vfsXattrName) +} + +func getVfsCap(path string, dest *vfscapData) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0) + if e1 != 0 { + err = e1 + } + switch dest.magic & vfsCapVerMask { + case vfsCapVer1: + dest.version = 1 + if r0 != vfscapDataSizeV1 { + return syscall.EINVAL + } + dest.data[1].permitted = 0 + dest.data[1].inheritable = 0 + case vfsCapVer2: + dest.version = 2 + if r0 != vfscapDataSizeV2 { + return syscall.EINVAL + } + default: + return syscall.EINVAL + } + if dest.magic&vfsCapFlageffective != 0 { + dest.effective[0] = dest.data[0].permitted | dest.data[0].inheritable + dest.effective[1] = dest.data[1].permitted | dest.data[1].inheritable + } else { + dest.effective[0] = 0 + dest.effective[1] = 0 + } + return +} + +func setVfsCap(path string, data *vfscapData) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var size uintptr + if data.version == 1 { + data.magic = vfsCapVer1 + size = vfscapDataSizeV1 + } else if data.version == 2 { + data.magic = vfsCapVer2 + if data.effective[0] != 0 || data.effective[1] != 0 { + data.magic |= vfsCapFlageffective + data.data[0].permitted |= data.effective[0] + data.data[1].permitted |= data.effective[1] + } + size = vfscapDataSizeV2 + } else { + return syscall.EINVAL + } + _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/xattr/errors.go b/Godeps/_workspace/src/github.com/docker/libcontainer/xattr/errors.go new file mode 100644 index 00000000000..8cd77418ccf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/xattr/errors.go @@ -0,0 +1,8 @@ +package xattr + +import ( + "fmt" + "runtime" +) + +var ErrNotSupportedPlatform = fmt.Errorf("platform and architecture is not supported %s %s", runtime.GOOS, runtime.GOARCH) diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/xattr/xattr_linux.go b/Godeps/_workspace/src/github.com/docker/libcontainer/xattr/xattr_linux.go new file mode 100644 index 00000000000..fc08d01fd9e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/xattr/xattr_linux.go @@ -0,0 +1,53 @@ +// +build linux + +package xattr + +import ( + "syscall" + + "github.com/docker/libcontainer/system" +) + +func XattrEnabled(path string) bool { + if Setxattr(path, "user.test", "") == syscall.ENOTSUP { + return false + } + return true +} + +func stringsfromByte(buf []byte) (result []string) { + offset := 0 + for index, b := range buf { + if b == 0 { + result = append(result, string(buf[offset:index])) + offset = index + 1 + } + } + return +} + +func Listxattr(path string) ([]string, error) { + size, err := system.Llistxattr(path, nil) + if err != nil { + return nil, err + } + buf := make([]byte, size) + read, err := system.Llistxattr(path, buf) + if err != nil { + return nil, err + } + names := stringsfromByte(buf[:read]) + return names, nil +} + +func Getxattr(path, attr string) (string, error) { + value, err := system.Lgetxattr(path, attr) + if err != nil { + return "", err + } + return string(value), nil +} + +func Setxattr(path, xattr, value string) error { + return system.Lsetxattr(path, xattr, []byte(value), 0) +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/xattr/xattr_test.go b/Godeps/_workspace/src/github.com/docker/libcontainer/xattr/xattr_test.go new file mode 100644 index 00000000000..d818c691d4b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/xattr/xattr_test.go @@ -0,0 +1,77 @@ +// +build linux + +package xattr_test + +import ( + "os" + "testing" + + "github.com/docker/libcontainer/xattr" +) + +func testXattr(t *testing.T) { + tmp := "xattr_test" + out, err := os.OpenFile(tmp, os.O_WRONLY, 0) + if err != nil { + t.Fatal("failed") + } + attr := "user.test" + out.Close() + + if !xattr.XattrEnabled(tmp) { + t.Log("Disabled") + t.Fatal("failed") + } + t.Log("Success") + + err = xattr.Setxattr(tmp, attr, "test") + if err != nil { + t.Fatal("failed") + } + + var value string + value, err = xattr.Getxattr(tmp, attr) + if err != nil { + t.Fatal("failed") + } + if value != "test" { + t.Fatal("failed") + } + t.Log("Success") + + var names []string + names, err = xattr.Listxattr(tmp) + if err != nil { + t.Fatal("failed") + } + + var found int + for _, name := range names { + if name == attr { + found = 1 + } + } + // Listxattr doesn't return trusted.* and system.* namespace + // attrs when run in unprevileged mode. + if found != 1 { + t.Fatal("failed") + } + t.Log("Success") + + big := "0000000000000000000000000000000000000000000000000000000000000000000008c6419ad822dfe29283fb3ac98dcc5908810cb31f4cfe690040c42c144b7492eicompslf20dxmlpgz" + // Test for long xattrs larger than 128 bytes + err = xattr.Setxattr(tmp, attr, big) + if err != nil { + t.Fatal("failed to add long value") + } + value, err = xattr.Getxattr(tmp, attr) + if err != nil { + t.Fatal("failed to get long value") + } + t.Log("Success") + + if value != big { + t.Fatal("failed, value doesn't match") + } + t.Log("Success") +} diff --git a/Godeps/_workspace/src/github.com/docker/libcontainer/xattr/xattr_unsupported.go b/Godeps/_workspace/src/github.com/docker/libcontainer/xattr/xattr_unsupported.go new file mode 100644 index 00000000000..821dea3be11 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libcontainer/xattr/xattr_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux + +package xattr + +func Listxattr(path string) ([]string, error) { + return nil, ErrNotSupportedPlatform +} + +func Getxattr(path, attr string) (string, error) { + return "", ErrNotSupportedPlatform +} + +func Setxattr(path, xattr, value string) error { + return ErrNotSupportedPlatform +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/LICENSE b/Godeps/_workspace/src/github.com/godbus/dbus/LICENSE new file mode 100644 index 00000000000..06b252bcbc5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013, Georg Reinke () +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/README.markdown b/Godeps/_workspace/src/github.com/godbus/dbus/README.markdown new file mode 100644 index 00000000000..3ab2116651a --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/README.markdown @@ -0,0 +1,38 @@ +dbus +---- + +dbus is a simple library that implements native Go client bindings for the +D-Bus message bus system. + +### Features + +* Complete native implementation of the D-Bus message protocol +* Go-like API (channels for signals / asynchronous method calls, Goroutine-safe connections) +* Subpackages that help with the introspection / property interfaces + +### Installation + +This packages requires Go 1.1. If you installed it and set up your GOPATH, just run: + +``` +go get github.com/godbus/dbus +``` + +If you want to use the subpackages, you can install them the same way. + +### Usage + +The complete package documentation and some simple examples are available at +[godoc.org](http://godoc.org/github.com/godbus/dbus). Also, the +[_examples](https://github.com/godbus/dbus/tree/master/_examples) directory +gives a short overview over the basic usage. + +Please note that the API is considered unstable for now and may change without +further notice. + +### License + +go.dbus is available under the Simplified BSD License; see LICENSE for the full +text. + +Nearly all of the credit for this library goes to github.com/guelfey/go.dbus. diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/auth.go b/Godeps/_workspace/src/github.com/godbus/dbus/auth.go new file mode 100644 index 00000000000..98017b693ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/auth.go @@ -0,0 +1,253 @@ +package dbus + +import ( + "bufio" + "bytes" + "errors" + "io" + "os" + "strconv" +) + +// AuthStatus represents the Status of an authentication mechanism. +type AuthStatus byte + +const ( + // AuthOk signals that authentication is finished; the next command + // from the server should be an OK. + AuthOk AuthStatus = iota + + // AuthContinue signals that additional data is needed; the next command + // from the server should be a DATA. + AuthContinue + + // AuthError signals an error; the server sent invalid data or some + // other unexpected thing happened and the current authentication + // process should be aborted. + AuthError +) + +type authState byte + +const ( + waitingForData authState = iota + waitingForOk + waitingForReject +) + +// Auth defines the behaviour of an authentication mechanism. +type Auth interface { + // Return the name of the mechnism, the argument to the first AUTH command + // and the next status. + FirstData() (name, resp []byte, status AuthStatus) + + // Process the given DATA command, and return the argument to the DATA + // command and the next status. If len(resp) == 0, no DATA command is sent. + HandleData(data []byte) (resp []byte, status AuthStatus) +} + +// Auth authenticates the connection, trying the given list of authentication +// mechanisms (in that order). If nil is passed, the EXTERNAL and +// DBUS_COOKIE_SHA1 mechanisms are tried for the current user. For private +// connections, this method must be called before sending any messages to the +// bus. Auth must not be called on shared connections. +func (conn *Conn) Auth(methods []Auth) error { + if methods == nil { + uid := strconv.Itoa(os.Getuid()) + methods = []Auth{AuthExternal(uid), AuthCookieSha1(uid, getHomeDir())} + } + in := bufio.NewReader(conn.transport) + err := conn.transport.SendNullByte() + if err != nil { + return err + } + err = authWriteLine(conn.transport, []byte("AUTH")) + if err != nil { + return err + } + s, err := authReadLine(in) + if err != nil { + return err + } + if len(s) < 2 || !bytes.Equal(s[0], []byte("REJECTED")) { + return errors.New("dbus: authentication protocol error") + } + s = s[1:] + for _, v := range s { + for _, m := range methods { + if name, data, status := m.FirstData(); bytes.Equal(v, name) { + var ok bool + err = authWriteLine(conn.transport, []byte("AUTH"), []byte(v), data) + if err != nil { + return err + } + switch status { + case AuthOk: + err, ok = conn.tryAuth(m, waitingForOk, in) + case AuthContinue: + err, ok = conn.tryAuth(m, waitingForData, in) + default: + panic("dbus: invalid authentication status") + } + if err != nil { + return err + } + if ok { + if conn.transport.SupportsUnixFDs() { + err = authWriteLine(conn, []byte("NEGOTIATE_UNIX_FD")) + if err != nil { + return err + } + line, err := authReadLine(in) + if err != nil { + return err + } + switch { + case bytes.Equal(line[0], []byte("AGREE_UNIX_FD")): + conn.EnableUnixFDs() + conn.unixFD = true + case bytes.Equal(line[0], []byte("ERROR")): + default: + return errors.New("dbus: authentication protocol error") + } + } + err = authWriteLine(conn.transport, []byte("BEGIN")) + if err != nil { + return err + } + go conn.inWorker() + go conn.outWorker() + return nil + } + } + } + } + return errors.New("dbus: authentication failed") +} + +// tryAuth tries to authenticate with m as the mechanism, using state as the +// initial authState and in for reading input. It returns (nil, true) on +// success, (nil, false) on a REJECTED and (someErr, false) if some other +// error occured. +func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) { + for { + s, err := authReadLine(in) + if err != nil { + return err, false + } + switch { + case state == waitingForData && string(s[0]) == "DATA": + if len(s) != 2 { + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + continue + } + data, status := m.HandleData(s[1]) + switch status { + case AuthOk, AuthContinue: + if len(data) != 0 { + err = authWriteLine(conn.transport, []byte("DATA"), data) + if err != nil { + return err, false + } + } + if status == AuthOk { + state = waitingForOk + } + case AuthError: + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + } + case state == waitingForData && string(s[0]) == "REJECTED": + return nil, false + case state == waitingForData && string(s[0]) == "ERROR": + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + case state == waitingForData && string(s[0]) == "OK": + if len(s) != 2 { + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + } + conn.uuid = string(s[1]) + return nil, true + case state == waitingForData: + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + case state == waitingForOk && string(s[0]) == "OK": + if len(s) != 2 { + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + } + conn.uuid = string(s[1]) + return nil, true + case state == waitingForOk && string(s[0]) == "REJECTED": + return nil, false + case state == waitingForOk && (string(s[0]) == "DATA" || + string(s[0]) == "ERROR"): + + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + case state == waitingForOk: + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + case state == waitingForReject && string(s[0]) == "REJECTED": + return nil, false + case state == waitingForReject: + return errors.New("dbus: authentication protocol error"), false + default: + panic("dbus: invalid auth state") + } + } +} + +// authReadLine reads a line and separates it into its fields. +func authReadLine(in *bufio.Reader) ([][]byte, error) { + data, err := in.ReadBytes('\n') + if err != nil { + return nil, err + } + data = bytes.TrimSuffix(data, []byte("\r\n")) + return bytes.Split(data, []byte{' '}), nil +} + +// authWriteLine writes the given line in the authentication protocol format +// (elements of data separated by a " " and terminated by "\r\n"). +func authWriteLine(out io.Writer, data ...[]byte) error { + buf := make([]byte, 0) + for i, v := range data { + buf = append(buf, v...) + if i != len(data)-1 { + buf = append(buf, ' ') + } + } + buf = append(buf, '\r') + buf = append(buf, '\n') + n, err := out.Write(buf) + if err != nil { + return err + } + if n != len(buf) { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/auth_external.go b/Godeps/_workspace/src/github.com/godbus/dbus/auth_external.go new file mode 100644 index 00000000000..7e376d3ef6a --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/auth_external.go @@ -0,0 +1,26 @@ +package dbus + +import ( + "encoding/hex" +) + +// AuthExternal returns an Auth that authenticates as the given user with the +// EXTERNAL mechanism. +func AuthExternal(user string) Auth { + return authExternal{user} +} + +// AuthExternal implements the EXTERNAL authentication mechanism. +type authExternal struct { + user string +} + +func (a authExternal) FirstData() ([]byte, []byte, AuthStatus) { + b := make([]byte, 2*len(a.user)) + hex.Encode(b, []byte(a.user)) + return []byte("EXTERNAL"), b, AuthOk +} + +func (a authExternal) HandleData(b []byte) ([]byte, AuthStatus) { + return nil, AuthError +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/auth_sha1.go b/Godeps/_workspace/src/github.com/godbus/dbus/auth_sha1.go new file mode 100644 index 00000000000..df15b461198 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/auth_sha1.go @@ -0,0 +1,102 @@ +package dbus + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/hex" + "os" +) + +// AuthCookieSha1 returns an Auth that authenticates as the given user with the +// DBUS_COOKIE_SHA1 mechanism. The home parameter should specify the home +// directory of the user. +func AuthCookieSha1(user, home string) Auth { + return authCookieSha1{user, home} +} + +type authCookieSha1 struct { + user, home string +} + +func (a authCookieSha1) FirstData() ([]byte, []byte, AuthStatus) { + b := make([]byte, 2*len(a.user)) + hex.Encode(b, []byte(a.user)) + return []byte("DBUS_COOKIE_SHA1"), b, AuthContinue +} + +func (a authCookieSha1) HandleData(data []byte) ([]byte, AuthStatus) { + challenge := make([]byte, len(data)/2) + _, err := hex.Decode(challenge, data) + if err != nil { + return nil, AuthError + } + b := bytes.Split(challenge, []byte{' '}) + if len(b) != 3 { + return nil, AuthError + } + context := b[0] + id := b[1] + svchallenge := b[2] + cookie := a.getCookie(context, id) + if cookie == nil { + return nil, AuthError + } + clchallenge := a.generateChallenge() + if clchallenge == nil { + return nil, AuthError + } + hash := sha1.New() + hash.Write(bytes.Join([][]byte{svchallenge, clchallenge, cookie}, []byte{':'})) + hexhash := make([]byte, 2*hash.Size()) + hex.Encode(hexhash, hash.Sum(nil)) + data = append(clchallenge, ' ') + data = append(data, hexhash...) + resp := make([]byte, 2*len(data)) + hex.Encode(resp, data) + return resp, AuthOk +} + +// getCookie searches for the cookie identified by id in context and returns +// the cookie content or nil. (Since HandleData can't return a specific error, +// but only whether an error occured, this function also doesn't bother to +// return an error.) +func (a authCookieSha1) getCookie(context, id []byte) []byte { + file, err := os.Open(a.home + "/.dbus-keyrings/" + string(context)) + if err != nil { + return nil + } + defer file.Close() + rd := bufio.NewReader(file) + for { + line, err := rd.ReadBytes('\n') + if err != nil { + return nil + } + line = line[:len(line)-1] + b := bytes.Split(line, []byte{' '}) + if len(b) != 3 { + return nil + } + if bytes.Equal(b[0], id) { + return b[2] + } + } +} + +// generateChallenge returns a random, hex-encoded challenge, or nil on error +// (see above). +func (a authCookieSha1) generateChallenge() []byte { + b := make([]byte, 16) + n, err := rand.Read(b) + if err != nil { + return nil + } + if n != 16 { + return nil + } + enc := make([]byte, 32) + hex.Encode(enc, b) + return enc +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/call.go b/Godeps/_workspace/src/github.com/godbus/dbus/call.go new file mode 100644 index 00000000000..1d2fbc7efd8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/call.go @@ -0,0 +1,147 @@ +package dbus + +import ( + "errors" + "strings" +) + +// Call represents a pending or completed method call. +type Call struct { + Destination string + Path ObjectPath + Method string + Args []interface{} + + // Strobes when the call is complete. + Done chan *Call + + // After completion, the error status. If this is non-nil, it may be an + // error message from the peer (with Error as its type) or some other error. + Err error + + // Holds the response once the call is done. + Body []interface{} +} + +var errSignature = errors.New("dbus: mismatched signature") + +// Store stores the body of the reply into the provided pointers. It returns +// an error if the signatures of the body and retvalues don't match, or if +// the error status is not nil. +func (c *Call) Store(retvalues ...interface{}) error { + if c.Err != nil { + return c.Err + } + + return Store(c.Body, retvalues...) +} + +// Object represents a remote object on which methods can be invoked. +type Object struct { + conn *Conn + dest string + path ObjectPath +} + +// Call calls a method with (*Object).Go and waits for its reply. +func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call { + return <-o.Go(method, flags, make(chan *Call, 1), args...).Done +} + +// GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given +// object. The property name must be given in interface.member notation. +func (o *Object) GetProperty(p string) (Variant, error) { + idx := strings.LastIndex(p, ".") + if idx == -1 || idx+1 == len(p) { + return Variant{}, errors.New("dbus: invalid property " + p) + } + + iface := p[:idx] + prop := p[idx+1:] + + result := Variant{} + err := o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop).Store(&result) + + if err != nil { + return Variant{}, err + } + + return result, nil +} + +// Go calls a method with the given arguments asynchronously. It returns a +// Call structure representing this method call. The passed channel will +// return the same value once the call is done. If ch is nil, a new channel +// will be allocated. Otherwise, ch has to be buffered or Go will panic. +// +// If the flags include FlagNoReplyExpected, ch is ignored and a Call structure +// is returned of which only the Err member is valid. +// +// If the method parameter contains a dot ('.'), the part before the last dot +// specifies the interface on which the method is called. +func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call { + iface := "" + i := strings.LastIndex(method, ".") + if i != -1 { + iface = method[:i] + } + method = method[i+1:] + msg := new(Message) + msg.Type = TypeMethodCall + msg.serial = o.conn.getSerial() + msg.Flags = flags & (FlagNoAutoStart | FlagNoReplyExpected) + msg.Headers = make(map[HeaderField]Variant) + msg.Headers[FieldPath] = MakeVariant(o.path) + msg.Headers[FieldDestination] = MakeVariant(o.dest) + msg.Headers[FieldMember] = MakeVariant(method) + if iface != "" { + msg.Headers[FieldInterface] = MakeVariant(iface) + } + msg.Body = args + if len(args) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(args...)) + } + if msg.Flags&FlagNoReplyExpected == 0 { + if ch == nil { + ch = make(chan *Call, 10) + } else if cap(ch) == 0 { + panic("dbus: unbuffered channel passed to (*Object).Go") + } + call := &Call{ + Destination: o.dest, + Path: o.path, + Method: method, + Args: args, + Done: ch, + } + o.conn.callsLck.Lock() + o.conn.calls[msg.serial] = call + o.conn.callsLck.Unlock() + o.conn.outLck.RLock() + if o.conn.closed { + call.Err = ErrClosed + call.Done <- call + } else { + o.conn.out <- msg + } + o.conn.outLck.RUnlock() + return call + } + o.conn.outLck.RLock() + defer o.conn.outLck.RUnlock() + if o.conn.closed { + return &Call{Err: ErrClosed} + } + o.conn.out <- msg + return &Call{Err: nil} +} + +// Destination returns the destination that calls on o are sent to. +func (o *Object) Destination() string { + return o.dest +} + +// Path returns the path that calls on o are sent to. +func (o *Object) Path() ObjectPath { + return o.path +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/conn.go b/Godeps/_workspace/src/github.com/godbus/dbus/conn.go new file mode 100644 index 00000000000..d6f9c2162e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/conn.go @@ -0,0 +1,602 @@ +package dbus + +import ( + "errors" + "io" + "os" + "reflect" + "strings" + "sync" +) + +const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket" + +var ( + systemBus *Conn + systemBusLck sync.Mutex + sessionBus *Conn + sessionBusLck sync.Mutex +) + +// ErrClosed is the error returned by calls on a closed connection. +var ErrClosed = errors.New("dbus: connection closed by user") + +// Conn represents a connection to a message bus (usually, the system or +// session bus). +// +// Connections are either shared or private. Shared connections +// are shared between calls to the functions that return them. As a result, +// the methods Close, Auth and Hello must not be called on them. +// +// Multiple goroutines may invoke methods on a connection simultaneously. +type Conn struct { + transport + + busObj *Object + unixFD bool + uuid string + + names []string + namesLck sync.RWMutex + + serialLck sync.Mutex + nextSerial uint32 + serialUsed map[uint32]bool + + calls map[uint32]*Call + callsLck sync.RWMutex + + handlers map[ObjectPath]map[string]interface{} + handlersLck sync.RWMutex + + out chan *Message + closed bool + outLck sync.RWMutex + + signals []chan<- *Signal + signalsLck sync.Mutex + + eavesdropped chan<- *Message + eavesdroppedLck sync.Mutex +} + +// SessionBus returns a shared connection to the session bus, connecting to it +// if not already done. +func SessionBus() (conn *Conn, err error) { + sessionBusLck.Lock() + defer sessionBusLck.Unlock() + if sessionBus != nil { + return sessionBus, nil + } + defer func() { + if conn != nil { + sessionBus = conn + } + }() + conn, err = SessionBusPrivate() + if err != nil { + return + } + if err = conn.Auth(nil); err != nil { + conn.Close() + conn = nil + return + } + if err = conn.Hello(); err != nil { + conn.Close() + conn = nil + } + return +} + +// SessionBusPrivate returns a new private connection to the session bus. +func SessionBusPrivate() (*Conn, error) { + address := os.Getenv("DBUS_SESSION_BUS_ADDRESS") + if address != "" && address != "autolaunch:" { + return Dial(address) + } + + return sessionBusPlatform() +} + +// SystemBus returns a shared connection to the system bus, connecting to it if +// not already done. +func SystemBus() (conn *Conn, err error) { + systemBusLck.Lock() + defer systemBusLck.Unlock() + if systemBus != nil { + return systemBus, nil + } + defer func() { + if conn != nil { + systemBus = conn + } + }() + conn, err = SystemBusPrivate() + if err != nil { + return + } + if err = conn.Auth(nil); err != nil { + conn.Close() + conn = nil + return + } + if err = conn.Hello(); err != nil { + conn.Close() + conn = nil + } + return +} + +// SystemBusPrivate returns a new private connection to the system bus. +func SystemBusPrivate() (*Conn, error) { + address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS") + if address != "" { + return Dial(address) + } + return Dial(defaultSystemBusAddress) +} + +// Dial establishes a new private connection to the message bus specified by address. +func Dial(address string) (*Conn, error) { + tr, err := getTransport(address) + if err != nil { + return nil, err + } + return newConn(tr) +} + +// NewConn creates a new private *Conn from an already established connection. +func NewConn(conn io.ReadWriteCloser) (*Conn, error) { + return newConn(genericTransport{conn}) +} + +// newConn creates a new *Conn from a transport. +func newConn(tr transport) (*Conn, error) { + conn := new(Conn) + conn.transport = tr + conn.calls = make(map[uint32]*Call) + conn.out = make(chan *Message, 10) + conn.handlers = make(map[ObjectPath]map[string]interface{}) + conn.nextSerial = 1 + conn.serialUsed = map[uint32]bool{0: true} + conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus") + return conn, nil +} + +// BusObject returns the object owned by the bus daemon which handles +// administrative requests. +func (conn *Conn) BusObject() *Object { + return conn.busObj +} + +// Close closes the connection. Any blocked operations will return with errors +// and the channels passed to Eavesdrop and Signal are closed. This method must +// not be called on shared connections. +func (conn *Conn) Close() error { + conn.outLck.Lock() + close(conn.out) + conn.closed = true + conn.outLck.Unlock() + conn.signalsLck.Lock() + for _, ch := range conn.signals { + close(ch) + } + conn.signalsLck.Unlock() + conn.eavesdroppedLck.Lock() + if conn.eavesdropped != nil { + close(conn.eavesdropped) + } + conn.eavesdroppedLck.Unlock() + return conn.transport.Close() +} + +// Eavesdrop causes conn to send all incoming messages to the given channel +// without further processing. Method replies, errors and signals will not be +// sent to the appropiate channels and method calls will not be handled. If nil +// is passed, the normal behaviour is restored. +// +// The caller has to make sure that ch is sufficiently buffered; +// if a message arrives when a write to ch is not possible, the message is +// discarded. +func (conn *Conn) Eavesdrop(ch chan<- *Message) { + conn.eavesdroppedLck.Lock() + conn.eavesdropped = ch + conn.eavesdroppedLck.Unlock() +} + +// getSerial returns an unused serial. +func (conn *Conn) getSerial() uint32 { + conn.serialLck.Lock() + defer conn.serialLck.Unlock() + n := conn.nextSerial + for conn.serialUsed[n] { + n++ + } + conn.serialUsed[n] = true + conn.nextSerial = n + 1 + return n +} + +// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be +// called after authentication, but before sending any other messages to the +// bus. Hello must not be called for shared connections. +func (conn *Conn) Hello() error { + var s string + err := conn.busObj.Call("org.freedesktop.DBus.Hello", 0).Store(&s) + if err != nil { + return err + } + conn.namesLck.Lock() + conn.names = make([]string, 1) + conn.names[0] = s + conn.namesLck.Unlock() + return nil +} + +// inWorker runs in an own goroutine, reading incoming messages from the +// transport and dispatching them appropiately. +func (conn *Conn) inWorker() { + for { + msg, err := conn.ReadMessage() + if err == nil { + conn.eavesdroppedLck.Lock() + if conn.eavesdropped != nil { + select { + case conn.eavesdropped <- msg: + default: + } + conn.eavesdroppedLck.Unlock() + continue + } + conn.eavesdroppedLck.Unlock() + dest, _ := msg.Headers[FieldDestination].value.(string) + found := false + if dest == "" { + found = true + } else { + conn.namesLck.RLock() + if len(conn.names) == 0 { + found = true + } + for _, v := range conn.names { + if dest == v { + found = true + break + } + } + conn.namesLck.RUnlock() + } + if !found { + // Eavesdropped a message, but no channel for it is registered. + // Ignore it. + continue + } + switch msg.Type { + case TypeMethodReply, TypeError: + serial := msg.Headers[FieldReplySerial].value.(uint32) + conn.callsLck.Lock() + if c, ok := conn.calls[serial]; ok { + if msg.Type == TypeError { + name, _ := msg.Headers[FieldErrorName].value.(string) + c.Err = Error{name, msg.Body} + } else { + c.Body = msg.Body + } + c.Done <- c + conn.serialLck.Lock() + delete(conn.serialUsed, serial) + conn.serialLck.Unlock() + delete(conn.calls, serial) + } + conn.callsLck.Unlock() + case TypeSignal: + iface := msg.Headers[FieldInterface].value.(string) + member := msg.Headers[FieldMember].value.(string) + // as per http://dbus.freedesktop.org/doc/dbus-specification.html , + // sender is optional for signals. + sender, _ := msg.Headers[FieldSender].value.(string) + if iface == "org.freedesktop.DBus" && member == "NameLost" && + sender == "org.freedesktop.DBus" { + + name, _ := msg.Body[0].(string) + conn.namesLck.Lock() + for i, v := range conn.names { + if v == name { + copy(conn.names[i:], conn.names[i+1:]) + conn.names = conn.names[:len(conn.names)-1] + } + } + conn.namesLck.Unlock() + } + signal := &Signal{ + Sender: sender, + Path: msg.Headers[FieldPath].value.(ObjectPath), + Name: iface + "." + member, + Body: msg.Body, + } + conn.signalsLck.Lock() + for _, ch := range conn.signals { + // don't block trying to send a signal + select { + case ch <- signal: + default: + } + } + conn.signalsLck.Unlock() + case TypeMethodCall: + go conn.handleCall(msg) + } + } else if _, ok := err.(InvalidMessageError); !ok { + // Some read error occured (usually EOF); we can't really do + // anything but to shut down all stuff and returns errors to all + // pending replies. + conn.Close() + conn.callsLck.RLock() + for _, v := range conn.calls { + v.Err = err + v.Done <- v + } + conn.callsLck.RUnlock() + return + } + // invalid messages are ignored + } +} + +// Names returns the list of all names that are currently owned by this +// connection. The slice is always at least one element long, the first element +// being the unique name of the connection. +func (conn *Conn) Names() []string { + conn.namesLck.RLock() + // copy the slice so it can't be modified + s := make([]string, len(conn.names)) + copy(s, conn.names) + conn.namesLck.RUnlock() + return s +} + +// Object returns the object identified by the given destination name and path. +func (conn *Conn) Object(dest string, path ObjectPath) *Object { + return &Object{conn, dest, path} +} + +// outWorker runs in an own goroutine, encoding and sending messages that are +// sent to conn.out. +func (conn *Conn) outWorker() { + for msg := range conn.out { + err := conn.SendMessage(msg) + conn.callsLck.RLock() + if err != nil { + if c := conn.calls[msg.serial]; c != nil { + c.Err = err + c.Done <- c + } + conn.serialLck.Lock() + delete(conn.serialUsed, msg.serial) + conn.serialLck.Unlock() + } else if msg.Type != TypeMethodCall { + conn.serialLck.Lock() + delete(conn.serialUsed, msg.serial) + conn.serialLck.Unlock() + } + conn.callsLck.RUnlock() + } +} + +// Send sends the given message to the message bus. You usually don't need to +// use this; use the higher-level equivalents (Call / Go, Emit and Export) +// instead. If msg is a method call and NoReplyExpected is not set, a non-nil +// call is returned and the same value is sent to ch (which must be buffered) +// once the call is complete. Otherwise, ch is ignored and a Call structure is +// returned of which only the Err member is valid. +func (conn *Conn) Send(msg *Message, ch chan *Call) *Call { + var call *Call + + msg.serial = conn.getSerial() + if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 { + if ch == nil { + ch = make(chan *Call, 5) + } else if cap(ch) == 0 { + panic("dbus: unbuffered channel passed to (*Conn).Send") + } + call = new(Call) + call.Destination, _ = msg.Headers[FieldDestination].value.(string) + call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath) + iface, _ := msg.Headers[FieldInterface].value.(string) + member, _ := msg.Headers[FieldMember].value.(string) + call.Method = iface + "." + member + call.Args = msg.Body + call.Done = ch + conn.callsLck.Lock() + conn.calls[msg.serial] = call + conn.callsLck.Unlock() + conn.outLck.RLock() + if conn.closed { + call.Err = ErrClosed + call.Done <- call + } else { + conn.out <- msg + } + conn.outLck.RUnlock() + } else { + conn.outLck.RLock() + if conn.closed { + call = &Call{Err: ErrClosed} + } else { + conn.out <- msg + call = &Call{Err: nil} + } + conn.outLck.RUnlock() + } + return call +} + +// sendError creates an error message corresponding to the parameters and sends +// it to conn.out. +func (conn *Conn) sendError(e Error, dest string, serial uint32) { + msg := new(Message) + msg.Type = TypeError + msg.serial = conn.getSerial() + msg.Headers = make(map[HeaderField]Variant) + if dest != "" { + msg.Headers[FieldDestination] = MakeVariant(dest) + } + msg.Headers[FieldErrorName] = MakeVariant(e.Name) + msg.Headers[FieldReplySerial] = MakeVariant(serial) + msg.Body = e.Body + if len(e.Body) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...)) + } + conn.outLck.RLock() + if !conn.closed { + conn.out <- msg + } + conn.outLck.RUnlock() +} + +// sendReply creates a method reply message corresponding to the parameters and +// sends it to conn.out. +func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) { + msg := new(Message) + msg.Type = TypeMethodReply + msg.serial = conn.getSerial() + msg.Headers = make(map[HeaderField]Variant) + if dest != "" { + msg.Headers[FieldDestination] = MakeVariant(dest) + } + msg.Headers[FieldReplySerial] = MakeVariant(serial) + msg.Body = values + if len(values) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) + } + conn.outLck.RLock() + if !conn.closed { + conn.out <- msg + } + conn.outLck.RUnlock() +} + +// Signal registers the given channel to be passed all received signal messages. +// The caller has to make sure that ch is sufficiently buffered; if a message +// arrives when a write to c is not possible, it is discarded. +// +// Multiple of these channels can be registered at the same time. Passing a +// channel that already is registered will remove it from the list of the +// registered channels. +// +// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a +// channel for eavesdropped messages, this channel receives all signals, and +// none of the channels passed to Signal will receive any signals. +func (conn *Conn) Signal(ch chan<- *Signal) { + conn.signalsLck.Lock() + conn.signals = append(conn.signals, ch) + conn.signalsLck.Unlock() +} + +// SupportsUnixFDs returns whether the underlying transport supports passing of +// unix file descriptors. If this is false, method calls containing unix file +// descriptors will return an error and emitted signals containing them will +// not be sent. +func (conn *Conn) SupportsUnixFDs() bool { + return conn.unixFD +} + +// Error represents a D-Bus message of type Error. +type Error struct { + Name string + Body []interface{} +} + +func (e Error) Error() string { + if len(e.Body) >= 1 { + s, ok := e.Body[0].(string) + if ok { + return s + } + } + return e.Name +} + +// Signal represents a D-Bus message of type Signal. The name member is given in +// "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost. +type Signal struct { + Sender string + Path ObjectPath + Name string + Body []interface{} +} + +// transport is a D-Bus transport. +type transport interface { + // Read and Write raw data (for example, for the authentication protocol). + io.ReadWriteCloser + + // Send the initial null byte used for the EXTERNAL mechanism. + SendNullByte() error + + // Returns whether this transport supports passing Unix FDs. + SupportsUnixFDs() bool + + // Signal the transport that Unix FD passing is enabled for this connection. + EnableUnixFDs() + + // Read / send a message, handling things like Unix FDs. + ReadMessage() (*Message, error) + SendMessage(*Message) error +} + +var ( + transports map[string]func(string) (transport, error) = make(map[string]func(string) (transport, error)) +) + +func getTransport(address string) (transport, error) { + var err error + var t transport + + addresses := strings.Split(address, ";") + for _, v := range addresses { + i := strings.IndexRune(v, ':') + if i == -1 { + err = errors.New("dbus: invalid bus address (no transport)") + continue + } + f := transports[v[:i]] + if f == nil { + err = errors.New("dbus: invalid bus address (invalid or unsupported transport)") + } + t, err = f(v[i+1:]) + if err == nil { + return t, nil + } + } + return nil, err +} + +// dereferenceAll returns a slice that, assuming that vs is a slice of pointers +// of arbitrary types, containes the values that are obtained from dereferencing +// all elements in vs. +func dereferenceAll(vs []interface{}) []interface{} { + for i := range vs { + v := reflect.ValueOf(vs[i]) + v = v.Elem() + vs[i] = v.Interface() + } + return vs +} + +// getKey gets a key from a the list of keys. Returns "" on error / not found... +func getKey(s, key string) string { + i := strings.Index(s, key) + if i == -1 { + return "" + } + if i+len(key)+1 >= len(s) || s[i+len(key)] != '=' { + return "" + } + j := strings.Index(s, ",") + if j == -1 { + j = len(s) + } + return s[i+len(key)+1 : j] +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/conn_darwin.go b/Godeps/_workspace/src/github.com/godbus/dbus/conn_darwin.go new file mode 100644 index 00000000000..b67bb1b81da --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/conn_darwin.go @@ -0,0 +1,21 @@ +package dbus + +import ( + "errors" + "os/exec" +) + +func sessionBusPlatform() (*Conn, error) { + cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET") + b, err := cmd.CombinedOutput() + + if err != nil { + return nil, err + } + + if len(b) == 0 { + return nil, errors.New("dbus: couldn't determine address of session bus") + } + + return Dial("unix:path=" + string(b[:len(b)-1])) +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/conn_other.go b/Godeps/_workspace/src/github.com/godbus/dbus/conn_other.go new file mode 100644 index 00000000000..f74b8758d44 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/conn_other.go @@ -0,0 +1,27 @@ +// +build !darwin + +package dbus + +import ( + "bytes" + "errors" + "os/exec" +) + +func sessionBusPlatform() (*Conn, error) { + cmd := exec.Command("dbus-launch") + b, err := cmd.CombinedOutput() + + if err != nil { + return nil, err + } + + i := bytes.IndexByte(b, '=') + j := bytes.IndexByte(b, '\n') + + if i == -1 || j == -1 { + return nil, errors.New("dbus: couldn't determine address of session bus") + } + + return Dial(string(b[i+1 : j])) +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/conn_test.go b/Godeps/_workspace/src/github.com/godbus/dbus/conn_test.go new file mode 100644 index 00000000000..a2b14e8cc46 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/conn_test.go @@ -0,0 +1,199 @@ +package dbus + +import "testing" + +func TestSessionBus(t *testing.T) { + _, err := SessionBus() + if err != nil { + t.Error(err) + } +} + +func TestSystemBus(t *testing.T) { + _, err := SystemBus() + if err != nil { + t.Error(err) + } +} + +func TestSend(t *testing.T) { + bus, err := SessionBus() + if err != nil { + t.Error(err) + } + ch := make(chan *Call, 1) + msg := &Message{ + Type: TypeMethodCall, + Flags: 0, + Headers: map[HeaderField]Variant{ + FieldDestination: MakeVariant(bus.Names()[0]), + FieldPath: MakeVariant(ObjectPath("/org/freedesktop/DBus")), + FieldInterface: MakeVariant("org.freedesktop.DBus.Peer"), + FieldMember: MakeVariant("Ping"), + }, + } + call := bus.Send(msg, ch) + <-ch + if call.Err != nil { + t.Error(call.Err) + } +} + +type server struct{} + +func (server) Double(i int64) (int64, *Error) { + return 2 * i, nil +} + +func BenchmarkCall(b *testing.B) { + b.StopTimer() + var s string + bus, err := SessionBus() + if err != nil { + b.Fatal(err) + } + name := bus.Names()[0] + obj := bus.BusObject() + b.StartTimer() + for i := 0; i < b.N; i++ { + err := obj.Call("org.freedesktop.DBus.GetNameOwner", 0, name).Store(&s) + if err != nil { + b.Fatal(err) + } + if s != name { + b.Errorf("got %s, wanted %s", s, name) + } + } +} + +func BenchmarkCallAsync(b *testing.B) { + b.StopTimer() + bus, err := SessionBus() + if err != nil { + b.Fatal(err) + } + name := bus.Names()[0] + obj := bus.BusObject() + c := make(chan *Call, 50) + done := make(chan struct{}) + go func() { + for i := 0; i < b.N; i++ { + v := <-c + if v.Err != nil { + b.Error(v.Err) + } + s := v.Body[0].(string) + if s != name { + b.Errorf("got %s, wanted %s", s, name) + } + } + close(done) + }() + b.StartTimer() + for i := 0; i < b.N; i++ { + obj.Go("org.freedesktop.DBus.GetNameOwner", 0, c, name) + } + <-done +} + +func BenchmarkServe(b *testing.B) { + b.StopTimer() + srv, err := SessionBus() + if err != nil { + b.Fatal(err) + } + cli, err := SessionBusPrivate() + if err != nil { + b.Fatal(err) + } + if err = cli.Auth(nil); err != nil { + b.Fatal(err) + } + if err = cli.Hello(); err != nil { + b.Fatal(err) + } + benchmarkServe(b, srv, cli) +} + +func BenchmarkServeAsync(b *testing.B) { + b.StopTimer() + srv, err := SessionBus() + if err != nil { + b.Fatal(err) + } + cli, err := SessionBusPrivate() + if err != nil { + b.Fatal(err) + } + if err = cli.Auth(nil); err != nil { + b.Fatal(err) + } + if err = cli.Hello(); err != nil { + b.Fatal(err) + } + benchmarkServeAsync(b, srv, cli) +} + +func BenchmarkServeSameConn(b *testing.B) { + b.StopTimer() + bus, err := SessionBus() + if err != nil { + b.Fatal(err) + } + + benchmarkServe(b, bus, bus) +} + +func BenchmarkServeSameConnAsync(b *testing.B) { + b.StopTimer() + bus, err := SessionBus() + if err != nil { + b.Fatal(err) + } + + benchmarkServeAsync(b, bus, bus) +} + +func benchmarkServe(b *testing.B, srv, cli *Conn) { + var r int64 + var err error + dest := srv.Names()[0] + srv.Export(server{}, "/org/guelfey/DBus/Test", "org.guelfey.DBus.Test") + obj := cli.Object(dest, "/org/guelfey/DBus/Test") + b.StartTimer() + for i := 0; i < b.N; i++ { + err = obj.Call("org.guelfey.DBus.Test.Double", 0, int64(i)).Store(&r) + if err != nil { + b.Fatal(err) + } + if r != 2*int64(i) { + b.Errorf("got %d, wanted %d", r, 2*int64(i)) + } + } +} + +func benchmarkServeAsync(b *testing.B, srv, cli *Conn) { + dest := srv.Names()[0] + srv.Export(server{}, "/org/guelfey/DBus/Test", "org.guelfey.DBus.Test") + obj := cli.Object(dest, "/org/guelfey/DBus/Test") + c := make(chan *Call, 50) + done := make(chan struct{}) + go func() { + for i := 0; i < b.N; i++ { + v := <-c + if v.Err != nil { + b.Fatal(v.Err) + } + i, r := v.Args[0].(int64), v.Body[0].(int64) + if 2*i != r { + b.Errorf("got %d, wanted %d", r, 2*i) + } + } + close(done) + }() + b.StartTimer() + for i := 0; i < b.N; i++ { + obj.Go("org.guelfey.DBus.Test.Double", 0, c, int64(i)) + } + <-done +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/dbus.go b/Godeps/_workspace/src/github.com/godbus/dbus/dbus.go new file mode 100644 index 00000000000..2ce68735cdf --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/dbus.go @@ -0,0 +1,258 @@ +package dbus + +import ( + "errors" + "reflect" + "strings" +) + +var ( + byteType = reflect.TypeOf(byte(0)) + boolType = reflect.TypeOf(false) + uint8Type = reflect.TypeOf(uint8(0)) + int16Type = reflect.TypeOf(int16(0)) + uint16Type = reflect.TypeOf(uint16(0)) + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) + stringType = reflect.TypeOf("") + signatureType = reflect.TypeOf(Signature{""}) + objectPathType = reflect.TypeOf(ObjectPath("")) + variantType = reflect.TypeOf(Variant{Signature{""}, nil}) + interfacesType = reflect.TypeOf([]interface{}{}) + unixFDType = reflect.TypeOf(UnixFD(0)) + unixFDIndexType = reflect.TypeOf(UnixFDIndex(0)) +) + +// An InvalidTypeError signals that a value which cannot be represented in the +// D-Bus wire format was passed to a function. +type InvalidTypeError struct { + Type reflect.Type +} + +func (e InvalidTypeError) Error() string { + return "dbus: invalid type " + e.Type.String() +} + +// Store copies the values contained in src to dest, which must be a slice of +// pointers. It converts slices of interfaces from src to corresponding structs +// in dest. An error is returned if the lengths of src and dest or the types of +// their elements don't match. +func Store(src []interface{}, dest ...interface{}) error { + if len(src) != len(dest) { + return errors.New("dbus.Store: length mismatch") + } + + for i := range src { + if err := store(src[i], dest[i]); err != nil { + return err + } + } + return nil +} + +func store(src, dest interface{}) error { + if reflect.TypeOf(dest).Elem() == reflect.TypeOf(src) { + reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src)) + return nil + } else if hasStruct(dest) { + rv := reflect.ValueOf(dest).Elem() + switch rv.Kind() { + case reflect.Struct: + vs, ok := src.([]interface{}) + if !ok { + return errors.New("dbus.Store: type mismatch") + } + t := rv.Type() + ndest := make([]interface{}, 0, rv.NumField()) + for i := 0; i < rv.NumField(); i++ { + field := t.Field(i) + if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { + ndest = append(ndest, rv.Field(i).Addr().Interface()) + } + } + if len(vs) != len(ndest) { + return errors.New("dbus.Store: type mismatch") + } + err := Store(vs, ndest...) + if err != nil { + return errors.New("dbus.Store: type mismatch") + } + case reflect.Slice: + sv := reflect.ValueOf(src) + if sv.Kind() != reflect.Slice { + return errors.New("dbus.Store: type mismatch") + } + rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len())) + for i := 0; i < sv.Len(); i++ { + if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil { + return err + } + } + case reflect.Map: + sv := reflect.ValueOf(src) + if sv.Kind() != reflect.Map { + return errors.New("dbus.Store: type mismatch") + } + keys := sv.MapKeys() + rv.Set(reflect.MakeMap(sv.Type())) + for _, key := range keys { + v := reflect.New(sv.Type().Elem()) + if err := store(v, sv.MapIndex(key).Interface()); err != nil { + return err + } + rv.SetMapIndex(key, v.Elem()) + } + default: + return errors.New("dbus.Store: type mismatch") + } + return nil + } else { + return errors.New("dbus.Store: type mismatch") + } +} + +func hasStruct(v interface{}) bool { + t := reflect.TypeOf(v) + for { + switch t.Kind() { + case reflect.Struct: + return true + case reflect.Slice, reflect.Ptr, reflect.Map: + t = t.Elem() + default: + return false + } + } +} + +// An ObjectPath is an object path as defined by the D-Bus spec. +type ObjectPath string + +// IsValid returns whether the object path is valid. +func (o ObjectPath) IsValid() bool { + s := string(o) + if len(s) == 0 { + return false + } + if s[0] != '/' { + return false + } + if s[len(s)-1] == '/' && len(s) != 1 { + return false + } + // probably not used, but technically possible + if s == "/" { + return true + } + split := strings.Split(s[1:], "/") + for _, v := range split { + if len(v) == 0 { + return false + } + for _, c := range v { + if !isMemberChar(c) { + return false + } + } + } + return true +} + +// A UnixFD is a Unix file descriptor sent over the wire. See the package-level +// documentation for more information about Unix file descriptor passsing. +type UnixFD int32 + +// A UnixFDIndex is the representation of a Unix file descriptor in a message. +type UnixFDIndex uint32 + +// alignment returns the alignment of values of type t. +func alignment(t reflect.Type) int { + switch t { + case variantType: + return 1 + case objectPathType: + return 4 + case signatureType: + return 1 + case interfacesType: // sometimes used for structs + return 8 + } + switch t.Kind() { + case reflect.Uint8: + return 1 + case reflect.Uint16, reflect.Int16: + return 2 + case reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map: + return 4 + case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct: + return 8 + case reflect.Ptr: + return alignment(t.Elem()) + } + return 1 +} + +// isKeyType returns whether t is a valid type for a D-Bus dict. +func isKeyType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64, + reflect.String: + + return true + } + return false +} + +// isValidInterface returns whether s is a valid name for an interface. +func isValidInterface(s string) bool { + if len(s) == 0 || len(s) > 255 || s[0] == '.' { + return false + } + elem := strings.Split(s, ".") + if len(elem) < 2 { + return false + } + for _, v := range elem { + if len(v) == 0 { + return false + } + if v[0] >= '0' && v[0] <= '9' { + return false + } + for _, c := range v { + if !isMemberChar(c) { + return false + } + } + } + return true +} + +// isValidMember returns whether s is a valid name for a member. +func isValidMember(s string) bool { + if len(s) == 0 || len(s) > 255 { + return false + } + i := strings.Index(s, ".") + if i != -1 { + return false + } + if s[0] >= '0' && s[0] <= '9' { + return false + } + for _, c := range s { + if !isMemberChar(c) { + return false + } + } + return true +} + +func isMemberChar(c rune) bool { + return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') || + (c >= 'a' && c <= 'z') || c == '_' +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/decoder.go b/Godeps/_workspace/src/github.com/godbus/dbus/decoder.go new file mode 100644 index 00000000000..ef50dcab98d --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/decoder.go @@ -0,0 +1,228 @@ +package dbus + +import ( + "encoding/binary" + "io" + "reflect" +) + +type decoder struct { + in io.Reader + order binary.ByteOrder + pos int +} + +// newDecoder returns a new decoder that reads values from in. The input is +// expected to be in the given byte order. +func newDecoder(in io.Reader, order binary.ByteOrder) *decoder { + dec := new(decoder) + dec.in = in + dec.order = order + return dec +} + +// align aligns the input to the given boundary and panics on error. +func (dec *decoder) align(n int) { + if dec.pos%n != 0 { + newpos := (dec.pos + n - 1) & ^(n - 1) + empty := make([]byte, newpos-dec.pos) + if _, err := io.ReadFull(dec.in, empty); err != nil { + panic(err) + } + dec.pos = newpos + } +} + +// Calls binary.Read(dec.in, dec.order, v) and panics on read errors. +func (dec *decoder) binread(v interface{}) { + if err := binary.Read(dec.in, dec.order, v); err != nil { + panic(err) + } +} + +func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) { + defer func() { + var ok bool + v := recover() + if err, ok = v.(error); ok { + if err == io.EOF || err == io.ErrUnexpectedEOF { + err = FormatError("unexpected EOF") + } + } + }() + vs = make([]interface{}, 0) + s := sig.str + for s != "" { + err, rem := validSingle(s, 0) + if err != nil { + return nil, err + } + v := dec.decode(s[:len(s)-len(rem)], 0) + vs = append(vs, v) + s = rem + } + return vs, nil +} + +func (dec *decoder) decode(s string, depth int) interface{} { + dec.align(alignment(typeFor(s))) + switch s[0] { + case 'y': + var b [1]byte + if _, err := dec.in.Read(b[:]); err != nil { + panic(err) + } + dec.pos++ + return b[0] + case 'b': + i := dec.decode("u", depth).(uint32) + switch { + case i == 0: + return false + case i == 1: + return true + default: + panic(FormatError("invalid value for boolean")) + } + case 'n': + var i int16 + dec.binread(&i) + dec.pos += 2 + return i + case 'i': + var i int32 + dec.binread(&i) + dec.pos += 4 + return i + case 'x': + var i int64 + dec.binread(&i) + dec.pos += 8 + return i + case 'q': + var i uint16 + dec.binread(&i) + dec.pos += 2 + return i + case 'u': + var i uint32 + dec.binread(&i) + dec.pos += 4 + return i + case 't': + var i uint64 + dec.binread(&i) + dec.pos += 8 + return i + case 'd': + var f float64 + dec.binread(&f) + dec.pos += 8 + return f + case 's': + length := dec.decode("u", depth).(uint32) + b := make([]byte, int(length)+1) + if _, err := io.ReadFull(dec.in, b); err != nil { + panic(err) + } + dec.pos += int(length) + 1 + return string(b[:len(b)-1]) + case 'o': + return ObjectPath(dec.decode("s", depth).(string)) + case 'g': + length := dec.decode("y", depth).(byte) + b := make([]byte, int(length)+1) + if _, err := io.ReadFull(dec.in, b); err != nil { + panic(err) + } + dec.pos += int(length) + 1 + sig, err := ParseSignature(string(b[:len(b)-1])) + if err != nil { + panic(err) + } + return sig + case 'v': + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + var variant Variant + sig := dec.decode("g", depth).(Signature) + if len(sig.str) == 0 { + panic(FormatError("variant signature is empty")) + } + err, rem := validSingle(sig.str, 0) + if err != nil { + panic(err) + } + if rem != "" { + panic(FormatError("variant signature has multiple types")) + } + variant.sig = sig + variant.value = dec.decode(sig.str, depth+1) + return variant + case 'h': + return UnixFDIndex(dec.decode("u", depth).(uint32)) + case 'a': + if len(s) > 1 && s[1] == '{' { + ksig := s[2:3] + vsig := s[3 : len(s)-1] + v := reflect.MakeMap(reflect.MapOf(typeFor(ksig), typeFor(vsig))) + if depth >= 63 { + panic(FormatError("input exceeds container depth limit")) + } + length := dec.decode("u", depth).(uint32) + // Even for empty maps, the correct padding must be included + dec.align(8) + spos := dec.pos + for dec.pos < spos+int(length) { + dec.align(8) + if !isKeyType(v.Type().Key()) { + panic(InvalidTypeError{v.Type()}) + } + kv := dec.decode(ksig, depth+2) + vv := dec.decode(vsig, depth+2) + v.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv)) + } + return v.Interface() + } + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + length := dec.decode("u", depth).(uint32) + v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length)) + // Even for empty arrays, the correct padding must be included + dec.align(alignment(typeFor(s[1:]))) + spos := dec.pos + for dec.pos < spos+int(length) { + ev := dec.decode(s[1:], depth+1) + v = reflect.Append(v, reflect.ValueOf(ev)) + } + return v.Interface() + case '(': + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + dec.align(8) + v := make([]interface{}, 0) + s = s[1 : len(s)-1] + for s != "" { + err, rem := validSingle(s, 0) + if err != nil { + panic(err) + } + ev := dec.decode(s[:len(s)-len(rem)], depth+1) + v = append(v, ev) + s = rem + } + return v + default: + panic(SignatureError{Sig: s}) + } +} + +// A FormatError is an error in the wire format. +type FormatError string + +func (e FormatError) Error() string { + return "dbus: wire format error: " + string(e) +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/doc.go b/Godeps/_workspace/src/github.com/godbus/dbus/doc.go new file mode 100644 index 00000000000..deff554a381 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/doc.go @@ -0,0 +1,63 @@ +/* +Package dbus implements bindings to the D-Bus message bus system. + +To use the message bus API, you first need to connect to a bus (usually the +session or system bus). The acquired connection then can be used to call methods +on remote objects and emit or receive signals. Using the Export method, you can +arrange D-Bus methods calls to be directly translated to method calls on a Go +value. + +Conversion Rules + +For outgoing messages, Go types are automatically converted to the +corresponding D-Bus types. The following types are directly encoded as their +respective D-Bus equivalents: + + Go type | D-Bus type + ------------+----------- + byte | BYTE + bool | BOOLEAN + int16 | INT16 + uint16 | UINT16 + int32 | INT32 + uint32 | UINT32 + int64 | INT64 + uint64 | UINT64 + float64 | DOUBLE + string | STRING + ObjectPath | OBJECT_PATH + Signature | SIGNATURE + Variant | VARIANT + UnixFDIndex | UNIX_FD + +Slices and arrays encode as ARRAYs of their element type. + +Maps encode as DICTs, provided that their key type can be used as a key for +a DICT. + +Structs other than Variant and Signature encode as a STRUCT containing their +exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will +be skipped. + +Pointers encode as the value they're pointed to. + +Trying to encode any other type or a slice, map or struct containing an +unsupported type will result in an InvalidTypeError. + +For incoming messages, the inverse of these rules are used, with the exception +of STRUCTs. Incoming STRUCTS are represented as a slice of empty interfaces +containing the struct fields in the correct order. The Store function can be +used to convert such values to Go structs. + +Unix FD passing + +Handling Unix file descriptors deserves special mention. To use them, you should +first check that they are supported on a connection by calling SupportsUnixFDs. +If it returns true, all method of Connection will translate messages containing +UnixFD's to messages that are accompanied by the given file descriptors with the +UnixFD values being substituted by the correct indices. Similarily, the indices +of incoming messages are automatically resolved. It shouldn't be necessary to use +UnixFDIndex. + +*/ +package dbus diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/encoder.go b/Godeps/_workspace/src/github.com/godbus/dbus/encoder.go new file mode 100644 index 00000000000..f9d2f057160 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/encoder.go @@ -0,0 +1,179 @@ +package dbus + +import ( + "bytes" + "encoding/binary" + "io" + "reflect" +) + +// An encoder encodes values to the D-Bus wire format. +type encoder struct { + out io.Writer + order binary.ByteOrder + pos int +} + +// NewEncoder returns a new encoder that writes to out in the given byte order. +func newEncoder(out io.Writer, order binary.ByteOrder) *encoder { + enc := new(encoder) + enc.out = out + enc.order = order + return enc +} + +// Aligns the next output to be on a multiple of n. Panics on write errors. +func (enc *encoder) align(n int) { + if enc.pos%n != 0 { + newpos := (enc.pos + n - 1) & ^(n - 1) + empty := make([]byte, newpos-enc.pos) + if _, err := enc.out.Write(empty); err != nil { + panic(err) + } + enc.pos = newpos + } +} + +// Calls binary.Write(enc.out, enc.order, v) and panics on write errors. +func (enc *encoder) binwrite(v interface{}) { + if err := binary.Write(enc.out, enc.order, v); err != nil { + panic(err) + } +} + +// Encode encodes the given values to the underyling reader. All written values +// are aligned properly as required by the D-Bus spec. +func (enc *encoder) Encode(vs ...interface{}) (err error) { + defer func() { + err, _ = recover().(error) + }() + for _, v := range vs { + enc.encode(reflect.ValueOf(v), 0) + } + return nil +} + +// encode encodes the given value to the writer and panics on error. depth holds +// the depth of the container nesting. +func (enc *encoder) encode(v reflect.Value, depth int) { + enc.align(alignment(v.Type())) + switch v.Kind() { + case reflect.Uint8: + var b [1]byte + b[0] = byte(v.Uint()) + if _, err := enc.out.Write(b[:]); err != nil { + panic(err) + } + enc.pos++ + case reflect.Bool: + if v.Bool() { + enc.encode(reflect.ValueOf(uint32(1)), depth) + } else { + enc.encode(reflect.ValueOf(uint32(0)), depth) + } + case reflect.Int16: + enc.binwrite(int16(v.Int())) + enc.pos += 2 + case reflect.Uint16: + enc.binwrite(uint16(v.Uint())) + enc.pos += 2 + case reflect.Int32: + enc.binwrite(int32(v.Int())) + enc.pos += 4 + case reflect.Uint32: + enc.binwrite(uint32(v.Uint())) + enc.pos += 4 + case reflect.Int64: + enc.binwrite(v.Int()) + enc.pos += 8 + case reflect.Uint64: + enc.binwrite(v.Uint()) + enc.pos += 8 + case reflect.Float64: + enc.binwrite(v.Float()) + enc.pos += 8 + case reflect.String: + enc.encode(reflect.ValueOf(uint32(len(v.String()))), depth) + b := make([]byte, v.Len()+1) + copy(b, v.String()) + b[len(b)-1] = 0 + n, err := enc.out.Write(b) + if err != nil { + panic(err) + } + enc.pos += n + case reflect.Ptr: + enc.encode(v.Elem(), depth) + case reflect.Slice, reflect.Array: + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + var buf bytes.Buffer + bufenc := newEncoder(&buf, enc.order) + + for i := 0; i < v.Len(); i++ { + bufenc.encode(v.Index(i), depth+1) + } + enc.encode(reflect.ValueOf(uint32(buf.Len())), depth) + length := buf.Len() + enc.align(alignment(v.Type().Elem())) + if _, err := buf.WriteTo(enc.out); err != nil { + panic(err) + } + enc.pos += length + case reflect.Struct: + if depth >= 64 && v.Type() != signatureType { + panic(FormatError("input exceeds container depth limit")) + } + switch t := v.Type(); t { + case signatureType: + str := v.Field(0) + enc.encode(reflect.ValueOf(byte(str.Len())), depth+1) + b := make([]byte, str.Len()+1) + copy(b, str.String()) + b[len(b)-1] = 0 + n, err := enc.out.Write(b) + if err != nil { + panic(err) + } + enc.pos += n + case variantType: + variant := v.Interface().(Variant) + enc.encode(reflect.ValueOf(variant.sig), depth+1) + enc.encode(reflect.ValueOf(variant.value), depth+1) + default: + for i := 0; i < v.Type().NumField(); i++ { + field := t.Field(i) + if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { + enc.encode(v.Field(i), depth+1) + } + } + } + case reflect.Map: + // Maps are arrays of structures, so they actually increase the depth by + // 2. + if depth >= 63 { + panic(FormatError("input exceeds container depth limit")) + } + if !isKeyType(v.Type().Key()) { + panic(InvalidTypeError{v.Type()}) + } + keys := v.MapKeys() + var buf bytes.Buffer + bufenc := newEncoder(&buf, enc.order) + for _, k := range keys { + bufenc.align(8) + bufenc.encode(k, depth+2) + bufenc.encode(v.MapIndex(k), depth+2) + } + enc.encode(reflect.ValueOf(uint32(buf.Len())), depth) + length := buf.Len() + enc.align(8) + if _, err := buf.WriteTo(enc.out); err != nil { + panic(err) + } + enc.pos += length + default: + panic(InvalidTypeError{v.Type()}) + } +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/examples_test.go b/Godeps/_workspace/src/github.com/godbus/dbus/examples_test.go new file mode 100644 index 00000000000..0218ac55986 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/examples_test.go @@ -0,0 +1,50 @@ +package dbus + +import "fmt" + +func ExampleConn_Emit() { + conn, err := SessionBus() + if err != nil { + panic(err) + } + + conn.Emit("/foo/bar", "foo.bar.Baz", uint32(0xDAEDBEEF)) +} + +func ExampleObject_Call() { + var list []string + + conn, err := SessionBus() + if err != nil { + panic(err) + } + + err = conn.BusObject().Call("org.freedesktop.DBus.ListNames", 0).Store(&list) + if err != nil { + panic(err) + } + for _, v := range list { + fmt.Println(v) + } +} + +func ExampleObject_Go() { + conn, err := SessionBus() + if err != nil { + panic(err) + } + + ch := make(chan *Call, 10) + conn.BusObject().Go("org.freedesktop.DBus.ListActivatableNames", 0, ch) + select { + case call := <-ch: + if call.Err != nil { + panic(err) + } + list := call.Body[0].([]string) + for _, v := range list { + fmt.Println(v) + } + // put some other cases here + } +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/export.go b/Godeps/_workspace/src/github.com/godbus/dbus/export.go new file mode 100644 index 00000000000..1dd15915280 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/export.go @@ -0,0 +1,302 @@ +package dbus + +import ( + "errors" + "reflect" + "strings" + "unicode" +) + +var ( + errmsgInvalidArg = Error{ + "org.freedesktop.DBus.Error.InvalidArgs", + []interface{}{"Invalid type / number of args"}, + } + errmsgNoObject = Error{ + "org.freedesktop.DBus.Error.NoSuchObject", + []interface{}{"No such object"}, + } + errmsgUnknownMethod = Error{ + "org.freedesktop.DBus.Error.UnknownMethod", + []interface{}{"Unknown / invalid method"}, + } +) + +// Sender is a type which can be used in exported methods to receive the message +// sender. +type Sender string + +func exportedMethod(v interface{}, name string) reflect.Value { + if v == nil { + return reflect.Value{} + } + m := reflect.ValueOf(v).MethodByName(name) + if !m.IsValid() { + return reflect.Value{} + } + t := m.Type() + if t.NumOut() == 0 || + t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) { + + return reflect.Value{} + } + return m +} + +// handleCall handles the given method call (i.e. looks if it's one of the +// pre-implemented ones and searches for a corresponding handler if not). +func (conn *Conn) handleCall(msg *Message) { + name := msg.Headers[FieldMember].value.(string) + path := msg.Headers[FieldPath].value.(ObjectPath) + ifaceName, hasIface := msg.Headers[FieldInterface].value.(string) + sender, hasSender := msg.Headers[FieldSender].value.(string) + serial := msg.serial + if ifaceName == "org.freedesktop.DBus.Peer" { + switch name { + case "Ping": + conn.sendReply(sender, serial) + case "GetMachineId": + conn.sendReply(sender, serial, conn.uuid) + default: + conn.sendError(errmsgUnknownMethod, sender, serial) + } + return + } + if len(name) == 0 || unicode.IsLower([]rune(name)[0]) { + conn.sendError(errmsgUnknownMethod, sender, serial) + } + var m reflect.Value + if hasIface { + conn.handlersLck.RLock() + obj, ok := conn.handlers[path] + if !ok { + conn.sendError(errmsgNoObject, sender, serial) + conn.handlersLck.RUnlock() + return + } + iface := obj[ifaceName] + conn.handlersLck.RUnlock() + m = exportedMethod(iface, name) + } else { + conn.handlersLck.RLock() + if _, ok := conn.handlers[path]; !ok { + conn.sendError(errmsgNoObject, sender, serial) + conn.handlersLck.RUnlock() + return + } + for _, v := range conn.handlers[path] { + m = exportedMethod(v, name) + if m.IsValid() { + break + } + } + conn.handlersLck.RUnlock() + } + if !m.IsValid() { + conn.sendError(errmsgUnknownMethod, sender, serial) + return + } + t := m.Type() + vs := msg.Body + pointers := make([]interface{}, t.NumIn()) + decode := make([]interface{}, 0, len(vs)) + for i := 0; i < t.NumIn(); i++ { + tp := t.In(i) + val := reflect.New(tp) + pointers[i] = val.Interface() + if tp == reflect.TypeOf((*Sender)(nil)).Elem() { + val.Elem().SetString(sender) + } else { + decode = append(decode, pointers[i]) + } + } + if len(decode) != len(vs) { + conn.sendError(errmsgInvalidArg, sender, serial) + return + } + if err := Store(vs, decode...); err != nil { + conn.sendError(errmsgInvalidArg, sender, serial) + return + } + params := make([]reflect.Value, len(pointers)) + for i := 0; i < len(pointers); i++ { + params[i] = reflect.ValueOf(pointers[i]).Elem() + } + ret := m.Call(params) + if em := ret[t.NumOut()-1].Interface().(*Error); em != nil { + conn.sendError(*em, sender, serial) + return + } + if msg.Flags&FlagNoReplyExpected == 0 { + reply := new(Message) + reply.Type = TypeMethodReply + reply.serial = conn.getSerial() + reply.Headers = make(map[HeaderField]Variant) + if hasSender { + reply.Headers[FieldDestination] = msg.Headers[FieldSender] + } + reply.Headers[FieldReplySerial] = MakeVariant(msg.serial) + reply.Body = make([]interface{}, len(ret)-1) + for i := 0; i < len(ret)-1; i++ { + reply.Body[i] = ret[i].Interface() + } + if len(ret) != 1 { + reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...)) + } + conn.outLck.RLock() + if !conn.closed { + conn.out <- reply + } + conn.outLck.RUnlock() + } +} + +// Emit emits the given signal on the message bus. The name parameter must be +// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost". +func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error { + if !path.IsValid() { + return errors.New("dbus: invalid object path") + } + i := strings.LastIndex(name, ".") + if i == -1 { + return errors.New("dbus: invalid method name") + } + iface := name[:i] + member := name[i+1:] + if !isValidMember(member) { + return errors.New("dbus: invalid method name") + } + if !isValidInterface(iface) { + return errors.New("dbus: invalid interface name") + } + msg := new(Message) + msg.Type = TypeSignal + msg.serial = conn.getSerial() + msg.Headers = make(map[HeaderField]Variant) + msg.Headers[FieldInterface] = MakeVariant(iface) + msg.Headers[FieldMember] = MakeVariant(member) + msg.Headers[FieldPath] = MakeVariant(path) + msg.Body = values + if len(values) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) + } + conn.outLck.RLock() + defer conn.outLck.RUnlock() + if conn.closed { + return ErrClosed + } + conn.out <- msg + return nil +} + +// Export registers the given value to be exported as an object on the +// message bus. +// +// If a method call on the given path and interface is received, an exported +// method with the same name is called with v as the receiver if the +// parameters match and the last return value is of type *Error. If this +// *Error is not nil, it is sent back to the caller as an error. +// Otherwise, a method reply is sent with the other return values as its body. +// +// Any parameters with the special type Sender are set to the sender of the +// dbus message when the method is called. Parameters of this type do not +// contribute to the dbus signature of the method (i.e. the method is exposed +// as if the parameters of type Sender were not there). +// +// Every method call is executed in a new goroutine, so the method may be called +// in multiple goroutines at once. +// +// Method calls on the interface org.freedesktop.DBus.Peer will be automatically +// handled for every object. +// +// Passing nil as the first parameter will cause conn to cease handling calls on +// the given combination of path and interface. +// +// Export returns an error if path is not a valid path name. +func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error { + if !path.IsValid() { + return errors.New("dbus: invalid path name") + } + conn.handlersLck.Lock() + if v == nil { + if _, ok := conn.handlers[path]; ok { + delete(conn.handlers[path], iface) + if len(conn.handlers[path]) == 0 { + delete(conn.handlers, path) + } + } + return nil + } + if _, ok := conn.handlers[path]; !ok { + conn.handlers[path] = make(map[string]interface{}) + } + conn.handlers[path][iface] = v + conn.handlersLck.Unlock() + return nil +} + +// ReleaseName calls org.freedesktop.DBus.ReleaseName. You should use only this +// method to release a name (see below). +func (conn *Conn) ReleaseName(name string) (ReleaseNameReply, error) { + var r uint32 + err := conn.busObj.Call("org.freedesktop.DBus.ReleaseName", 0, name).Store(&r) + if err != nil { + return 0, err + } + if r == uint32(ReleaseNameReplyReleased) { + conn.namesLck.Lock() + for i, v := range conn.names { + if v == name { + copy(conn.names[i:], conn.names[i+1:]) + conn.names = conn.names[:len(conn.names)-1] + } + } + conn.namesLck.Unlock() + } + return ReleaseNameReply(r), nil +} + +// RequestName calls org.freedesktop.DBus.RequestName. You should use only this +// method to request a name because package dbus needs to keep track of all +// names that the connection has. +func (conn *Conn) RequestName(name string, flags RequestNameFlags) (RequestNameReply, error) { + var r uint32 + err := conn.busObj.Call("org.freedesktop.DBus.RequestName", 0, name, flags).Store(&r) + if err != nil { + return 0, err + } + if r == uint32(RequestNameReplyPrimaryOwner) { + conn.namesLck.Lock() + conn.names = append(conn.names, name) + conn.namesLck.Unlock() + } + return RequestNameReply(r), nil +} + +// ReleaseNameReply is the reply to a ReleaseName call. +type ReleaseNameReply uint32 + +const ( + ReleaseNameReplyReleased ReleaseNameReply = 1 + iota + ReleaseNameReplyNonExistent + ReleaseNameReplyNotOwner +) + +// RequestNameFlags represents the possible flags for a RequestName call. +type RequestNameFlags uint32 + +const ( + NameFlagAllowReplacement RequestNameFlags = 1 << iota + NameFlagReplaceExisting + NameFlagDoNotQueue +) + +// RequestNameReply is the reply to a RequestName call. +type RequestNameReply uint32 + +const ( + RequestNameReplyPrimaryOwner RequestNameReply = 1 + iota + RequestNameReplyInQueue + RequestNameReplyExists + RequestNameReplyAlreadyOwner +) diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/homedir.go b/Godeps/_workspace/src/github.com/godbus/dbus/homedir.go new file mode 100644 index 00000000000..0b745f9313a --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/homedir.go @@ -0,0 +1,28 @@ +package dbus + +import ( + "os" + "sync" +) + +var ( + homeDir string + homeDirLock sync.Mutex +) + +func getHomeDir() string { + homeDirLock.Lock() + defer homeDirLock.Unlock() + + if homeDir != "" { + return homeDir + } + + homeDir = os.Getenv("HOME") + if homeDir != "" { + return homeDir + } + + homeDir = lookupHomeDir() + return homeDir +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/homedir_dynamic.go b/Godeps/_workspace/src/github.com/godbus/dbus/homedir_dynamic.go new file mode 100644 index 00000000000..2732081e73b --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/homedir_dynamic.go @@ -0,0 +1,15 @@ +// +build !static_build + +package dbus + +import ( + "os/user" +) + +func lookupHomeDir() string { + u, err := user.Current() + if err != nil { + return "/" + } + return u.HomeDir +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/homedir_static.go b/Godeps/_workspace/src/github.com/godbus/dbus/homedir_static.go new file mode 100644 index 00000000000..b9d9cb5525a --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/homedir_static.go @@ -0,0 +1,45 @@ +// +build static_build + +package dbus + +import ( + "bufio" + "os" + "strconv" + "strings" +) + +func lookupHomeDir() string { + myUid := os.Getuid() + + f, err := os.Open("/etc/passwd") + if err != nil { + return "/" + } + defer f.Close() + + s := bufio.NewScanner(f) + + for s.Scan() { + if err := s.Err(); err != nil { + break + } + + line := strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + parts := strings.Split(line, ":") + + if len(parts) >= 6 { + uid, err := strconv.Atoi(parts[2]) + if err == nil && uid == myUid { + return parts[5] + } + } + } + + // Default to / if we can't get a better value + return "/" +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/introspect/call.go b/Godeps/_workspace/src/github.com/godbus/dbus/introspect/call.go new file mode 100644 index 00000000000..4aca2ea63e6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/introspect/call.go @@ -0,0 +1,27 @@ +package introspect + +import ( + "encoding/xml" + "github.com/godbus/dbus" + "strings" +) + +// Call calls org.freedesktop.Introspectable.Introspect on a remote object +// and returns the introspection data. +func Call(o *dbus.Object) (*Node, error) { + var xmldata string + var node Node + + err := o.Call("org.freedesktop.DBus.Introspectable.Introspect", 0).Store(&xmldata) + if err != nil { + return nil, err + } + err = xml.NewDecoder(strings.NewReader(xmldata)).Decode(&node) + if err != nil { + return nil, err + } + if node.Name == "" { + node.Name = string(o.Path()) + } + return &node, nil +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspect.go b/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspect.go new file mode 100644 index 00000000000..b06c3f1cf2d --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspect.go @@ -0,0 +1,86 @@ +// Package introspect provides some utilities for dealing with the DBus +// introspection format. +package introspect + +import "encoding/xml" + +// The introspection data for the org.freedesktop.DBus.Introspectable interface. +var IntrospectData = Interface{ + Name: "org.freedesktop.DBus.Introspectable", + Methods: []Method{ + { + Name: "Introspect", + Args: []Arg{ + {"out", "s", "out"}, + }, + }, + }, +} + +// XML document type declaration of the introspection format version 1.0 +const IntrospectDeclarationString = ` + +` + +// The introspection data for the org.freedesktop.DBus.Introspectable interface, +// as a string. +const IntrospectDataString = ` + + + + + +` + +// Node is the root element of an introspection. +type Node struct { + XMLName xml.Name `xml:"node"` + Name string `xml:"name,attr,omitempty"` + Interfaces []Interface `xml:"interface"` + Children []Node `xml:"node,omitempty"` +} + +// Interface describes a DBus interface that is available on the message bus. +type Interface struct { + Name string `xml:"name,attr"` + Methods []Method `xml:"method"` + Signals []Signal `xml:"signal"` + Properties []Property `xml:"property"` + Annotations []Annotation `xml:"annotation"` +} + +// Method describes a Method on an Interface as retured by an introspection. +type Method struct { + Name string `xml:"name,attr"` + Args []Arg `xml:"arg"` + Annotations []Annotation `xml:"annotation"` +} + +// Signal describes a Signal emitted on an Interface. +type Signal struct { + Name string `xml:"name,attr"` + Args []Arg `xml:"arg"` + Annotations []Annotation `xml:"annotation"` +} + +// Property describes a property of an Interface. +type Property struct { + Name string `xml:"name,attr"` + Type string `xml:"type,attr"` + Access string `xml:"access,attr"` + Annotations []Annotation `xml:"annotation"` +} + +// Arg represents an argument of a method or a signal. +type Arg struct { + Name string `xml:"name,attr,omitempty"` + Type string `xml:"type,attr"` + Direction string `xml:"direction,attr,omitempty"` +} + +// Annotation is an annotation in the introspection format. +type Annotation struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspectable.go b/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspectable.go new file mode 100644 index 00000000000..08ce158744c --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspectable.go @@ -0,0 +1,75 @@ +package introspect + +import ( + "encoding/xml" + "github.com/godbus/dbus" + "reflect" + "strings" +) + +// Introspectable implements org.freedesktop.Introspectable. +// +// You can create it by converting the XML-formatted introspection data from a +// string to an Introspectable or call NewIntrospectable with a Node. Then, +// export it as org.freedesktop.Introspectable on you object. +type Introspectable string + +// NewIntrospectable returns an Introspectable that returns the introspection +// data that corresponds to the given Node. If n.Interfaces doesn't contain the +// data for org.freedesktop.DBus.Introspectable, it is added automatically. +func NewIntrospectable(n *Node) Introspectable { + found := false + for _, v := range n.Interfaces { + if v.Name == "org.freedesktop.DBus.Introspectable" { + found = true + break + } + } + if !found { + n.Interfaces = append(n.Interfaces, IntrospectData) + } + b, err := xml.Marshal(n) + if err != nil { + panic(err) + } + return Introspectable(strings.TrimSpace(IntrospectDeclarationString) + string(b)) +} + +// Introspect implements org.freedesktop.Introspectable.Introspect. +func (i Introspectable) Introspect() (string, *dbus.Error) { + return string(i), nil +} + +// Methods returns the description of the methods of v. This can be used to +// create a Node which can be passed to NewIntrospectable. +func Methods(v interface{}) []Method { + t := reflect.TypeOf(v) + ms := make([]Method, 0, t.NumMethod()) + for i := 0; i < t.NumMethod(); i++ { + if t.Method(i).PkgPath != "" { + continue + } + mt := t.Method(i).Type + if mt.NumOut() == 0 || + mt.Out(mt.NumOut()-1) != reflect.TypeOf(&dbus.Error{"", nil}) { + + continue + } + var m Method + m.Name = t.Method(i).Name + m.Args = make([]Arg, 0, mt.NumIn()+mt.NumOut()-2) + for j := 1; j < mt.NumIn(); j++ { + if mt.In(j) != reflect.TypeOf((*dbus.Sender)(nil)).Elem() { + arg := Arg{"", dbus.SignatureOfType(mt.In(j)).String(), "in"} + m.Args = append(m.Args, arg) + } + } + for j := 0; j < mt.NumOut()-1; j++ { + arg := Arg{"", dbus.SignatureOfType(mt.Out(j)).String(), "out"} + m.Args = append(m.Args, arg) + } + m.Annotations = make([]Annotation, 0) + ms = append(ms, m) + } + return ms +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/message.go b/Godeps/_workspace/src/github.com/godbus/dbus/message.go new file mode 100644 index 00000000000..075d6e38bae --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/message.go @@ -0,0 +1,346 @@ +package dbus + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "reflect" + "strconv" +) + +const protoVersion byte = 1 + +// Flags represents the possible flags of a D-Bus message. +type Flags byte + +const ( + // FlagNoReplyExpected signals that the message is not expected to generate + // a reply. If this flag is set on outgoing messages, any possible reply + // will be discarded. + FlagNoReplyExpected Flags = 1 << iota + // FlagNoAutoStart signals that the message bus should not automatically + // start an application when handling this message. + FlagNoAutoStart +) + +// Type represents the possible types of a D-Bus message. +type Type byte + +const ( + TypeMethodCall Type = 1 + iota + TypeMethodReply + TypeError + TypeSignal + typeMax +) + +func (t Type) String() string { + switch t { + case TypeMethodCall: + return "method call" + case TypeMethodReply: + return "reply" + case TypeError: + return "error" + case TypeSignal: + return "signal" + } + return "invalid" +} + +// HeaderField represents the possible byte codes for the headers +// of a D-Bus message. +type HeaderField byte + +const ( + FieldPath HeaderField = 1 + iota + FieldInterface + FieldMember + FieldErrorName + FieldReplySerial + FieldDestination + FieldSender + FieldSignature + FieldUnixFDs + fieldMax +) + +// An InvalidMessageError describes the reason why a D-Bus message is regarded as +// invalid. +type InvalidMessageError string + +func (e InvalidMessageError) Error() string { + return "dbus: invalid message: " + string(e) +} + +// fieldType are the types of the various header fields. +var fieldTypes = [fieldMax]reflect.Type{ + FieldPath: objectPathType, + FieldInterface: stringType, + FieldMember: stringType, + FieldErrorName: stringType, + FieldReplySerial: uint32Type, + FieldDestination: stringType, + FieldSender: stringType, + FieldSignature: signatureType, + FieldUnixFDs: uint32Type, +} + +// requiredFields lists the header fields that are required by the different +// message types. +var requiredFields = [typeMax][]HeaderField{ + TypeMethodCall: {FieldPath, FieldMember}, + TypeMethodReply: {FieldReplySerial}, + TypeError: {FieldErrorName, FieldReplySerial}, + TypeSignal: {FieldPath, FieldInterface, FieldMember}, +} + +// Message represents a single D-Bus message. +type Message struct { + Type + Flags + Headers map[HeaderField]Variant + Body []interface{} + + serial uint32 +} + +type header struct { + Field byte + Variant +} + +// DecodeMessage tries to decode a single message in the D-Bus wire format +// from the given reader. The byte order is figured out from the first byte. +// The possibly returned error can be an error of the underlying reader, an +// InvalidMessageError or a FormatError. +func DecodeMessage(rd io.Reader) (msg *Message, err error) { + var order binary.ByteOrder + var hlength, length uint32 + var typ, flags, proto byte + var headers []header + + b := make([]byte, 1) + _, err = rd.Read(b) + if err != nil { + return + } + switch b[0] { + case 'l': + order = binary.LittleEndian + case 'B': + order = binary.BigEndian + default: + return nil, InvalidMessageError("invalid byte order") + } + + dec := newDecoder(rd, order) + dec.pos = 1 + + msg = new(Message) + vs, err := dec.Decode(Signature{"yyyuu"}) + if err != nil { + return nil, err + } + if err = Store(vs, &typ, &flags, &proto, &length, &msg.serial); err != nil { + return nil, err + } + msg.Type = Type(typ) + msg.Flags = Flags(flags) + + // get the header length separately because we need it later + b = make([]byte, 4) + _, err = io.ReadFull(rd, b) + if err != nil { + return nil, err + } + binary.Read(bytes.NewBuffer(b), order, &hlength) + if hlength+length+16 > 1<<27 { + return nil, InvalidMessageError("message is too long") + } + dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order) + dec.pos = 12 + vs, err = dec.Decode(Signature{"a(yv)"}) + if err != nil { + return nil, err + } + if err = Store(vs, &headers); err != nil { + return nil, err + } + + msg.Headers = make(map[HeaderField]Variant) + for _, v := range headers { + msg.Headers[HeaderField(v.Field)] = v.Variant + } + + dec.align(8) + body := make([]byte, int(length)) + if length != 0 { + _, err := io.ReadFull(rd, body) + if err != nil { + return nil, err + } + } + + if err = msg.IsValid(); err != nil { + return nil, err + } + sig, _ := msg.Headers[FieldSignature].value.(Signature) + if sig.str != "" { + buf := bytes.NewBuffer(body) + dec = newDecoder(buf, order) + vs, err := dec.Decode(sig) + if err != nil { + return nil, err + } + msg.Body = vs + } + + return +} + +// EncodeTo encodes and sends a message to the given writer. The byte order must +// be either binary.LittleEndian or binary.BigEndian. If the message is not +// valid or an error occurs when writing, an error is returned. +func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error { + if err := msg.IsValid(); err != nil { + return err + } + var vs [7]interface{} + switch order { + case binary.LittleEndian: + vs[0] = byte('l') + case binary.BigEndian: + vs[0] = byte('B') + default: + return errors.New("dbus: invalid byte order") + } + body := new(bytes.Buffer) + enc := newEncoder(body, order) + if len(msg.Body) != 0 { + enc.Encode(msg.Body...) + } + vs[1] = msg.Type + vs[2] = msg.Flags + vs[3] = protoVersion + vs[4] = uint32(len(body.Bytes())) + vs[5] = msg.serial + headers := make([]header, 0, len(msg.Headers)) + for k, v := range msg.Headers { + headers = append(headers, header{byte(k), v}) + } + vs[6] = headers + var buf bytes.Buffer + enc = newEncoder(&buf, order) + enc.Encode(vs[:]...) + enc.align(8) + body.WriteTo(&buf) + if buf.Len() > 1<<27 { + return InvalidMessageError("message is too long") + } + if _, err := buf.WriteTo(out); err != nil { + return err + } + return nil +} + +// IsValid checks whether msg is a valid message and returns an +// InvalidMessageError if it is not. +func (msg *Message) IsValid() error { + if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected) != 0 { + return InvalidMessageError("invalid flags") + } + if msg.Type == 0 || msg.Type >= typeMax { + return InvalidMessageError("invalid message type") + } + for k, v := range msg.Headers { + if k == 0 || k >= fieldMax { + return InvalidMessageError("invalid header") + } + if reflect.TypeOf(v.value) != fieldTypes[k] { + return InvalidMessageError("invalid type of header field") + } + } + for _, v := range requiredFields[msg.Type] { + if _, ok := msg.Headers[v]; !ok { + return InvalidMessageError("missing required header") + } + } + if path, ok := msg.Headers[FieldPath]; ok { + if !path.value.(ObjectPath).IsValid() { + return InvalidMessageError("invalid path name") + } + } + if iface, ok := msg.Headers[FieldInterface]; ok { + if !isValidInterface(iface.value.(string)) { + return InvalidMessageError("invalid interface name") + } + } + if member, ok := msg.Headers[FieldMember]; ok { + if !isValidMember(member.value.(string)) { + return InvalidMessageError("invalid member name") + } + } + if errname, ok := msg.Headers[FieldErrorName]; ok { + if !isValidInterface(errname.value.(string)) { + return InvalidMessageError("invalid error name") + } + } + if len(msg.Body) != 0 { + if _, ok := msg.Headers[FieldSignature]; !ok { + return InvalidMessageError("missing signature") + } + } + return nil +} + +// Serial returns the message's serial number. The returned value is only valid +// for messages received by eavesdropping. +func (msg *Message) Serial() uint32 { + return msg.serial +} + +// String returns a string representation of a message similar to the format of +// dbus-monitor. +func (msg *Message) String() string { + if err := msg.IsValid(); err != nil { + return "" + } + s := msg.Type.String() + if v, ok := msg.Headers[FieldSender]; ok { + s += " from " + v.value.(string) + } + if v, ok := msg.Headers[FieldDestination]; ok { + s += " to " + v.value.(string) + } + s += " serial " + strconv.FormatUint(uint64(msg.serial), 10) + if v, ok := msg.Headers[FieldReplySerial]; ok { + s += " reply_serial " + strconv.FormatUint(uint64(v.value.(uint32)), 10) + } + if v, ok := msg.Headers[FieldUnixFDs]; ok { + s += " unixfds " + strconv.FormatUint(uint64(v.value.(uint32)), 10) + } + if v, ok := msg.Headers[FieldPath]; ok { + s += " path " + string(v.value.(ObjectPath)) + } + if v, ok := msg.Headers[FieldInterface]; ok { + s += " interface " + v.value.(string) + } + if v, ok := msg.Headers[FieldErrorName]; ok { + s += " error " + v.value.(string) + } + if v, ok := msg.Headers[FieldMember]; ok { + s += " member " + v.value.(string) + } + if len(msg.Body) != 0 { + s += "\n" + } + for i, v := range msg.Body { + s += " " + MakeVariant(v).String() + if i != len(msg.Body)-1 { + s += "\n" + } + } + return s +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/prop/prop.go b/Godeps/_workspace/src/github.com/godbus/dbus/prop/prop.go new file mode 100644 index 00000000000..ed5bdf2243c --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/prop/prop.go @@ -0,0 +1,264 @@ +// Package prop provides the Properties struct which can be used to implement +// org.freedesktop.DBus.Properties. +package prop + +import ( + "github.com/godbus/dbus" + "github.com/godbus/dbus/introspect" + "sync" +) + +// EmitType controls how org.freedesktop.DBus.Properties.PropertiesChanged is +// emitted for a property. If it is EmitTrue, the signal is emitted. If it is +// EmitInvalidates, the signal is also emitted, but the new value of the property +// is not disclosed. +type EmitType byte + +const ( + EmitFalse EmitType = iota + EmitTrue + EmitInvalidates +) + +// ErrIfaceNotFound is the error returned to peers who try to access properties +// on interfaces that aren't found. +var ErrIfaceNotFound = &dbus.Error{"org.freedesktop.DBus.Properties.Error.InterfaceNotFound", nil} + +// ErrPropNotFound is the error returned to peers trying to access properties +// that aren't found. +var ErrPropNotFound = &dbus.Error{"org.freedesktop.DBus.Properties.Error.PropertyNotFound", nil} + +// ErrReadOnly is the error returned to peers trying to set a read-only +// property. +var ErrReadOnly = &dbus.Error{"org.freedesktop.DBus.Properties.Error.ReadOnly", nil} + +// ErrInvalidArg is returned to peers if the type of the property that is being +// changed and the argument don't match. +var ErrInvalidArg = &dbus.Error{"org.freedesktop.DBus.Properties.Error.InvalidArg", nil} + +// The introspection data for the org.freedesktop.DBus.Properties interface. +var IntrospectData = introspect.Interface{ + Name: "org.freedesktop.DBus.Properties", + Methods: []introspect.Method{ + { + Name: "Get", + Args: []introspect.Arg{ + {"interface", "s", "in"}, + {"property", "s", "in"}, + {"value", "v", "out"}, + }, + }, + { + Name: "GetAll", + Args: []introspect.Arg{ + {"interface", "s", "in"}, + {"props", "a{sv}", "out"}, + }, + }, + { + Name: "Set", + Args: []introspect.Arg{ + {"interface", "s", "in"}, + {"property", "s", "in"}, + {"value", "v", "in"}, + }, + }, + }, + Signals: []introspect.Signal{ + { + Name: "PropertiesChanged", + Args: []introspect.Arg{ + {"interface", "s", "out"}, + {"changed_properties", "a{sv}", "out"}, + {"invalidates_properties", "as", "out"}, + }, + }, + }, +} + +// The introspection data for the org.freedesktop.DBus.Properties interface, as +// a string. +const IntrospectDataString = ` + + + + + + + + + + + + + + + + + + + + + +` + +// Prop represents a single property. It is used for creating a Properties +// value. +type Prop struct { + // Initial value. Must be a DBus-representable type. + Value interface{} + + // If true, the value can be modified by calls to Set. + Writable bool + + // Controls how org.freedesktop.DBus.Properties.PropertiesChanged is + // emitted if this property changes. + Emit EmitType + + // If not nil, anytime this property is changed by Set, this function is + // called with an appropiate Change as its argument. If the returned error + // is not nil, it is sent back to the caller of Set and the property is not + // changed. + Callback func(*Change) *dbus.Error +} + +// Change represents a change of a property by a call to Set. +type Change struct { + Props *Properties + Iface string + Name string + Value interface{} +} + +// Properties is a set of values that can be made available to the message bus +// using the org.freedesktop.DBus.Properties interface. It is safe for +// concurrent use by multiple goroutines. +type Properties struct { + m map[string]map[string]*Prop + mut sync.RWMutex + conn *dbus.Conn + path dbus.ObjectPath +} + +// New returns a new Properties structure that manages the given properties. +// The key for the first-level map of props is the name of the interface; the +// second-level key is the name of the property. The returned structure will be +// exported as org.freedesktop.DBus.Properties on path. +func New(conn *dbus.Conn, path dbus.ObjectPath, props map[string]map[string]*Prop) *Properties { + p := &Properties{m: props, conn: conn, path: path} + conn.Export(p, path, "org.freedesktop.DBus.Properties") + return p +} + +// Get implements org.freedesktop.DBus.Properties.Get. +func (p *Properties) Get(iface, property string) (dbus.Variant, *dbus.Error) { + p.mut.RLock() + defer p.mut.RUnlock() + m, ok := p.m[iface] + if !ok { + return dbus.Variant{}, ErrIfaceNotFound + } + prop, ok := m[property] + if !ok { + return dbus.Variant{}, ErrPropNotFound + } + return dbus.MakeVariant(prop.Value), nil +} + +// GetAll implements org.freedesktop.DBus.Properties.GetAll. +func (p *Properties) GetAll(iface string) (map[string]dbus.Variant, *dbus.Error) { + p.mut.RLock() + defer p.mut.RUnlock() + m, ok := p.m[iface] + if !ok { + return nil, ErrIfaceNotFound + } + rm := make(map[string]dbus.Variant, len(m)) + for k, v := range m { + rm[k] = dbus.MakeVariant(v.Value) + } + return rm, nil +} + +// GetMust returns the value of the given property and panics if either the +// interface or the property name are invalid. +func (p *Properties) GetMust(iface, property string) interface{} { + p.mut.RLock() + defer p.mut.RUnlock() + return p.m[iface][property].Value +} + +// Introspection returns the introspection data that represents the properties +// of iface. +func (p *Properties) Introspection(iface string) []introspect.Property { + p.mut.RLock() + defer p.mut.RUnlock() + m := p.m[iface] + s := make([]introspect.Property, 0, len(m)) + for k, v := range m { + p := introspect.Property{Name: k, Type: dbus.SignatureOf(v.Value).String()} + if v.Writable { + p.Access = "readwrite" + } else { + p.Access = "read" + } + s = append(s, p) + } + return s +} + +// set sets the given property and emits PropertyChanged if appropiate. p.mut +// must already be locked. +func (p *Properties) set(iface, property string, v interface{}) { + prop := p.m[iface][property] + prop.Value = v + switch prop.Emit { + case EmitFalse: + // do nothing + case EmitInvalidates: + p.conn.Emit(p.path, "org.freedesktop.DBus.Properties.PropertiesChanged", + iface, map[string]dbus.Variant{}, []string{property}) + case EmitTrue: + p.conn.Emit(p.path, "org.freedesktop.DBus.Properties.PropertiesChanged", + iface, map[string]dbus.Variant{property: dbus.MakeVariant(v)}, + []string{}) + default: + panic("invalid value for EmitType") + } +} + +// Set implements org.freedesktop.Properties.Set. +func (p *Properties) Set(iface, property string, newv dbus.Variant) *dbus.Error { + p.mut.Lock() + defer p.mut.Unlock() + m, ok := p.m[iface] + if !ok { + return ErrIfaceNotFound + } + prop, ok := m[property] + if !ok { + return ErrPropNotFound + } + if !prop.Writable { + return ErrReadOnly + } + if newv.Signature() != dbus.SignatureOf(prop.Value) { + return ErrInvalidArg + } + if prop.Callback != nil { + err := prop.Callback(&Change{p, iface, property, newv.Value()}) + if err != nil { + return err + } + } + p.set(iface, property, newv.Value()) + return nil +} + +// SetMust sets the value of the given property and panics if the interface or +// the property name are invalid. +func (p *Properties) SetMust(iface, property string, v interface{}) { + p.mut.Lock() + p.set(iface, property, v) + p.mut.Unlock() +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/proto_test.go b/Godeps/_workspace/src/github.com/godbus/dbus/proto_test.go new file mode 100644 index 00000000000..608a770d41f --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/proto_test.go @@ -0,0 +1,369 @@ +package dbus + +import ( + "bytes" + "encoding/binary" + "io/ioutil" + "math" + "reflect" + "testing" +) + +var protoTests = []struct { + vs []interface{} + bigEndian []byte + littleEndian []byte +}{ + { + []interface{}{int32(0)}, + []byte{0, 0, 0, 0}, + []byte{0, 0, 0, 0}, + }, + { + []interface{}{true, false}, + []byte{0, 0, 0, 1, 0, 0, 0, 0}, + []byte{1, 0, 0, 0, 0, 0, 0, 0}, + }, + { + []interface{}{byte(0), uint16(12), int16(32), uint32(43)}, + []byte{0, 0, 0, 12, 0, 32, 0, 0, 0, 0, 0, 43}, + []byte{0, 0, 12, 0, 32, 0, 0, 0, 43, 0, 0, 0}, + }, + { + []interface{}{int64(-1), uint64(1<<64 - 1)}, + bytes.Repeat([]byte{255}, 16), + bytes.Repeat([]byte{255}, 16), + }, + { + []interface{}{math.Inf(+1)}, + []byte{0x7f, 0xf0, 0, 0, 0, 0, 0, 0}, + []byte{0, 0, 0, 0, 0, 0, 0xf0, 0x7f}, + }, + { + []interface{}{"foo"}, + []byte{0, 0, 0, 3, 'f', 'o', 'o', 0}, + []byte{3, 0, 0, 0, 'f', 'o', 'o', 0}, + }, + { + []interface{}{Signature{"ai"}}, + []byte{2, 'a', 'i', 0}, + []byte{2, 'a', 'i', 0}, + }, + { + []interface{}{[]int16{42, 256}}, + []byte{0, 0, 0, 4, 0, 42, 1, 0}, + []byte{4, 0, 0, 0, 42, 0, 0, 1}, + }, + { + []interface{}{MakeVariant("foo")}, + []byte{1, 's', 0, 0, 0, 0, 0, 3, 'f', 'o', 'o', 0}, + []byte{1, 's', 0, 0, 3, 0, 0, 0, 'f', 'o', 'o', 0}, + }, + { + []interface{}{MakeVariant(MakeVariant(Signature{"v"}))}, + []byte{1, 'v', 0, 1, 'g', 0, 1, 'v', 0}, + []byte{1, 'v', 0, 1, 'g', 0, 1, 'v', 0}, + }, + { + []interface{}{map[int32]bool{42: true}}, + []byte{0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 1}, + []byte{8, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 1, 0, 0, 0}, + }, + { + []interface{}{map[string]Variant{}, byte(42)}, + []byte{0, 0, 0, 0, 0, 0, 0, 0, 42}, + []byte{0, 0, 0, 0, 0, 0, 0, 0, 42}, + }, + { + []interface{}{[]uint64{}, byte(42)}, + []byte{0, 0, 0, 0, 0, 0, 0, 0, 42}, + []byte{0, 0, 0, 0, 0, 0, 0, 0, 42}, + }, +} + +func TestProto(t *testing.T) { + for i, v := range protoTests { + buf := new(bytes.Buffer) + bigEnc := newEncoder(buf, binary.BigEndian) + bigEnc.Encode(v.vs...) + marshalled := buf.Bytes() + if bytes.Compare(marshalled, v.bigEndian) != 0 { + t.Errorf("test %d (marshal be): got '%v', but expected '%v'\n", i+1, marshalled, + v.bigEndian) + } + buf.Reset() + litEnc := newEncoder(buf, binary.LittleEndian) + litEnc.Encode(v.vs...) + marshalled = buf.Bytes() + if bytes.Compare(marshalled, v.littleEndian) != 0 { + t.Errorf("test %d (marshal le): got '%v', but expected '%v'\n", i+1, marshalled, + v.littleEndian) + } + unmarshalled := reflect.MakeSlice(reflect.TypeOf(v.vs), + 0, 0) + for i := range v.vs { + unmarshalled = reflect.Append(unmarshalled, + reflect.New(reflect.TypeOf(v.vs[i]))) + } + bigDec := newDecoder(bytes.NewReader(v.bigEndian), binary.BigEndian) + vs, err := bigDec.Decode(SignatureOf(v.vs...)) + if err != nil { + t.Errorf("test %d (unmarshal be): %s\n", i+1, err) + continue + } + if !reflect.DeepEqual(vs, v.vs) { + t.Errorf("test %d (unmarshal be): got %#v, but expected %#v\n", i+1, vs, v.vs) + } + litDec := newDecoder(bytes.NewReader(v.littleEndian), binary.LittleEndian) + vs, err = litDec.Decode(SignatureOf(v.vs...)) + if err != nil { + t.Errorf("test %d (unmarshal le): %s\n", i+1, err) + continue + } + if !reflect.DeepEqual(vs, v.vs) { + t.Errorf("test %d (unmarshal le): got %#v, but expected %#v\n", i+1, vs, v.vs) + } + + } +} + +func TestProtoMap(t *testing.T) { + m := map[string]uint8{ + "foo": 23, + "bar": 2, + } + var n map[string]uint8 + buf := new(bytes.Buffer) + enc := newEncoder(buf, binary.LittleEndian) + enc.Encode(m) + dec := newDecoder(buf, binary.LittleEndian) + vs, err := dec.Decode(Signature{"a{sy}"}) + if err != nil { + t.Fatal(err) + } + if err = Store(vs, &n); err != nil { + t.Fatal(err) + } + if len(n) != 2 || n["foo"] != 23 || n["bar"] != 2 { + t.Error("got", n) + } +} + +func TestProtoVariantStruct(t *testing.T) { + var variant Variant + v := MakeVariant(struct { + A int32 + B int16 + }{1, 2}) + buf := new(bytes.Buffer) + enc := newEncoder(buf, binary.LittleEndian) + enc.Encode(v) + dec := newDecoder(buf, binary.LittleEndian) + vs, err := dec.Decode(Signature{"v"}) + if err != nil { + t.Fatal(err) + } + if err = Store(vs, &variant); err != nil { + t.Fatal(err) + } + sl := variant.Value().([]interface{}) + v1, v2 := sl[0].(int32), sl[1].(int16) + if v1 != int32(1) { + t.Error("got", v1, "as first int") + } + if v2 != int16(2) { + t.Error("got", v2, "as second int") + } +} + +func TestProtoStructTag(t *testing.T) { + type Bar struct { + A int32 + B chan interface{} `dbus:"-"` + C int32 + } + var bar1, bar2 Bar + bar1.A = 234 + bar2.C = 345 + buf := new(bytes.Buffer) + enc := newEncoder(buf, binary.LittleEndian) + enc.Encode(bar1) + dec := newDecoder(buf, binary.LittleEndian) + vs, err := dec.Decode(Signature{"(ii)"}) + if err != nil { + t.Fatal(err) + } + if err = Store(vs, &bar2); err != nil { + t.Fatal(err) + } + if bar1 != bar2 { + t.Error("struct tag test: got", bar2) + } +} + +func TestProtoStoreStruct(t *testing.T) { + var foo struct { + A int32 + B string + c chan interface{} + D interface{} `dbus:"-"` + } + src := []interface{}{[]interface{}{int32(42), "foo"}} + err := Store(src, &foo) + if err != nil { + t.Fatal(err) + } +} + +func TestProtoStoreNestedStruct(t *testing.T) { + var foo struct { + A int32 + B struct { + C string + D float64 + } + } + src := []interface{}{ + []interface{}{ + int32(42), + []interface{}{ + "foo", + 3.14, + }, + }, + } + err := Store(src, &foo) + if err != nil { + t.Fatal(err) + } +} + +func TestMessage(t *testing.T) { + buf := new(bytes.Buffer) + message := new(Message) + message.Type = TypeMethodCall + message.serial = 32 + message.Headers = map[HeaderField]Variant{ + FieldPath: MakeVariant(ObjectPath("/org/foo/bar")), + FieldMember: MakeVariant("baz"), + } + message.Body = make([]interface{}, 0) + err := message.EncodeTo(buf, binary.LittleEndian) + if err != nil { + t.Error(err) + } + _, err = DecodeMessage(buf) + if err != nil { + t.Error(err) + } +} + +func TestProtoStructInterfaces(t *testing.T) { + b := []byte{42} + vs, err := newDecoder(bytes.NewReader(b), binary.LittleEndian).Decode(Signature{"(y)"}) + if err != nil { + t.Fatal(err) + } + if vs[0].([]interface{})[0].(byte) != 42 { + t.Errorf("wrongs results (got %v)", vs) + } +} + +// ordinary org.freedesktop.DBus.Hello call +var smallMessage = &Message{ + Type: TypeMethodCall, + serial: 1, + Headers: map[HeaderField]Variant{ + FieldDestination: MakeVariant("org.freedesktop.DBus"), + FieldPath: MakeVariant(ObjectPath("/org/freedesktop/DBus")), + FieldInterface: MakeVariant("org.freedesktop.DBus"), + FieldMember: MakeVariant("Hello"), + }, +} + +// org.freedesktop.Notifications.Notify +var bigMessage = &Message{ + Type: TypeMethodCall, + serial: 2, + Headers: map[HeaderField]Variant{ + FieldDestination: MakeVariant("org.freedesktop.Notifications"), + FieldPath: MakeVariant(ObjectPath("/org/freedesktop/Notifications")), + FieldInterface: MakeVariant("org.freedesktop.Notifications"), + FieldMember: MakeVariant("Notify"), + FieldSignature: MakeVariant(Signature{"susssasa{sv}i"}), + }, + Body: []interface{}{ + "app_name", + uint32(0), + "dialog-information", + "Notification", + "This is the body of a notification", + []string{"ok", "Ok"}, + map[string]Variant{ + "sound-name": MakeVariant("dialog-information"), + }, + int32(-1), + }, +} + +func BenchmarkDecodeMessageSmall(b *testing.B) { + var err error + var rd *bytes.Reader + + b.StopTimer() + buf := new(bytes.Buffer) + err = smallMessage.EncodeTo(buf, binary.LittleEndian) + if err != nil { + b.Fatal(err) + } + decoded := buf.Bytes() + b.StartTimer() + for i := 0; i < b.N; i++ { + rd = bytes.NewReader(decoded) + _, err = DecodeMessage(rd) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecodeMessageBig(b *testing.B) { + var err error + var rd *bytes.Reader + + b.StopTimer() + buf := new(bytes.Buffer) + err = bigMessage.EncodeTo(buf, binary.LittleEndian) + if err != nil { + b.Fatal(err) + } + decoded := buf.Bytes() + b.StartTimer() + for i := 0; i < b.N; i++ { + rd = bytes.NewReader(decoded) + _, err = DecodeMessage(rd) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeMessageSmall(b *testing.B) { + var err error + for i := 0; i < b.N; i++ { + err = smallMessage.EncodeTo(ioutil.Discard, binary.LittleEndian) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeMessageBig(b *testing.B) { + var err error + for i := 0; i < b.N; i++ { + err = bigMessage.EncodeTo(ioutil.Discard, binary.LittleEndian) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/sig.go b/Godeps/_workspace/src/github.com/godbus/dbus/sig.go new file mode 100644 index 00000000000..f45b53ce1b2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/sig.go @@ -0,0 +1,257 @@ +package dbus + +import ( + "fmt" + "reflect" + "strings" +) + +var sigToType = map[byte]reflect.Type{ + 'y': byteType, + 'b': boolType, + 'n': int16Type, + 'q': uint16Type, + 'i': int32Type, + 'u': uint32Type, + 'x': int64Type, + 't': uint64Type, + 'd': float64Type, + 's': stringType, + 'g': signatureType, + 'o': objectPathType, + 'v': variantType, + 'h': unixFDIndexType, +} + +// Signature represents a correct type signature as specified by the D-Bus +// specification. The zero value represents the empty signature, "". +type Signature struct { + str string +} + +// SignatureOf returns the concatenation of all the signatures of the given +// values. It panics if one of them is not representable in D-Bus. +func SignatureOf(vs ...interface{}) Signature { + var s string + for _, v := range vs { + s += getSignature(reflect.TypeOf(v)) + } + return Signature{s} +} + +// SignatureOfType returns the signature of the given type. It panics if the +// type is not representable in D-Bus. +func SignatureOfType(t reflect.Type) Signature { + return Signature{getSignature(t)} +} + +// getSignature returns the signature of the given type and panics on unknown types. +func getSignature(t reflect.Type) string { + // handle simple types first + switch t.Kind() { + case reflect.Uint8: + return "y" + case reflect.Bool: + return "b" + case reflect.Int16: + return "n" + case reflect.Uint16: + return "q" + case reflect.Int32: + if t == unixFDType { + return "h" + } + return "i" + case reflect.Uint32: + if t == unixFDIndexType { + return "h" + } + return "u" + case reflect.Int64: + return "x" + case reflect.Uint64: + return "t" + case reflect.Float64: + return "d" + case reflect.Ptr: + return getSignature(t.Elem()) + case reflect.String: + if t == objectPathType { + return "o" + } + return "s" + case reflect.Struct: + if t == variantType { + return "v" + } else if t == signatureType { + return "g" + } + var s string + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { + s += getSignature(t.Field(i).Type) + } + } + return "(" + s + ")" + case reflect.Array, reflect.Slice: + return "a" + getSignature(t.Elem()) + case reflect.Map: + if !isKeyType(t.Key()) { + panic(InvalidTypeError{t}) + } + return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}" + } + panic(InvalidTypeError{t}) +} + +// ParseSignature returns the signature represented by this string, or a +// SignatureError if the string is not a valid signature. +func ParseSignature(s string) (sig Signature, err error) { + if len(s) == 0 { + return + } + if len(s) > 255 { + return Signature{""}, SignatureError{s, "too long"} + } + sig.str = s + for err == nil && len(s) != 0 { + err, s = validSingle(s, 0) + } + if err != nil { + sig = Signature{""} + } + + return +} + +// ParseSignatureMust behaves like ParseSignature, except that it panics if s +// is not valid. +func ParseSignatureMust(s string) Signature { + sig, err := ParseSignature(s) + if err != nil { + panic(err) + } + return sig +} + +// Empty retruns whether the signature is the empty signature. +func (s Signature) Empty() bool { + return s.str == "" +} + +// Single returns whether the signature represents a single, complete type. +func (s Signature) Single() bool { + err, r := validSingle(s.str, 0) + return err != nil && r == "" +} + +// String returns the signature's string representation. +func (s Signature) String() string { + return s.str +} + +// A SignatureError indicates that a signature passed to a function or received +// on a connection is not a valid signature. +type SignatureError struct { + Sig string + Reason string +} + +func (e SignatureError) Error() string { + return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason) +} + +// Try to read a single type from this string. If it was successfull, err is nil +// and rem is the remaining unparsed part. Otherwise, err is a non-nil +// SignatureError and rem is "". depth is the current recursion depth which may +// not be greater than 64 and should be given as 0 on the first call. +func validSingle(s string, depth int) (err error, rem string) { + if s == "" { + return SignatureError{Sig: s, Reason: "empty signature"}, "" + } + if depth > 64 { + return SignatureError{Sig: s, Reason: "container nesting too deep"}, "" + } + switch s[0] { + case 'y', 'b', 'n', 'q', 'i', 'u', 'x', 't', 'd', 's', 'g', 'o', 'v', 'h': + return nil, s[1:] + case 'a': + if len(s) > 1 && s[1] == '{' { + i := findMatching(s[1:], '{', '}') + if i == -1 { + return SignatureError{Sig: s, Reason: "unmatched '{'"}, "" + } + i++ + rem = s[i+1:] + s = s[2:i] + if err, _ = validSingle(s[:1], depth+1); err != nil { + return err, "" + } + err, nr := validSingle(s[1:], depth+1) + if err != nil { + return err, "" + } + if nr != "" { + return SignatureError{Sig: s, Reason: "too many types in dict"}, "" + } + return nil, rem + } + return validSingle(s[1:], depth+1) + case '(': + i := findMatching(s, '(', ')') + if i == -1 { + return SignatureError{Sig: s, Reason: "unmatched ')'"}, "" + } + rem = s[i+1:] + s = s[1:i] + for err == nil && s != "" { + err, s = validSingle(s, depth+1) + } + if err != nil { + rem = "" + } + return + } + return SignatureError{Sig: s, Reason: "invalid type character"}, "" +} + +func findMatching(s string, left, right rune) int { + n := 0 + for i, v := range s { + if v == left { + n++ + } else if v == right { + n-- + } + if n == 0 { + return i + } + } + return -1 +} + +// typeFor returns the type of the given signature. It ignores any left over +// characters and panics if s doesn't start with a valid type signature. +func typeFor(s string) (t reflect.Type) { + err, _ := validSingle(s, 0) + if err != nil { + panic(err) + } + + if t, ok := sigToType[s[0]]; ok { + return t + } + switch s[0] { + case 'a': + if s[1] == '{' { + i := strings.LastIndex(s, "}") + t = reflect.MapOf(sigToType[s[2]], typeFor(s[3:i])) + } else { + t = reflect.SliceOf(typeFor(s[1:])) + } + case '(': + t = interfacesType + } + return +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/sig_test.go b/Godeps/_workspace/src/github.com/godbus/dbus/sig_test.go new file mode 100644 index 00000000000..da37bc968e7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/sig_test.go @@ -0,0 +1,70 @@ +package dbus + +import ( + "testing" +) + +var sigTests = []struct { + vs []interface{} + sig Signature +}{ + { + []interface{}{new(int32)}, + Signature{"i"}, + }, + { + []interface{}{new(string)}, + Signature{"s"}, + }, + { + []interface{}{new(Signature)}, + Signature{"g"}, + }, + { + []interface{}{new([]int16)}, + Signature{"an"}, + }, + { + []interface{}{new(int16), new(uint32)}, + Signature{"nu"}, + }, + { + []interface{}{new(map[byte]Variant)}, + Signature{"a{yv}"}, + }, + { + []interface{}{new(Variant), new([]map[int32]string)}, + Signature{"vaa{is}"}, + }, +} + +func TestSig(t *testing.T) { + for i, v := range sigTests { + sig := SignatureOf(v.vs...) + if sig != v.sig { + t.Errorf("test %d: got %q, expected %q", i+1, sig.str, v.sig.str) + } + } +} + +var getSigTest = []interface{}{ + []struct { + b byte + i int32 + t uint64 + s string + }{}, + map[string]Variant{}, +} + +func BenchmarkGetSignatureSimple(b *testing.B) { + for i := 0; i < b.N; i++ { + SignatureOf("", int32(0)) + } +} + +func BenchmarkGetSignatureLong(b *testing.B) { + for i := 0; i < b.N; i++ { + SignatureOf(getSigTest...) + } +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/transport_darwin.go b/Godeps/_workspace/src/github.com/godbus/dbus/transport_darwin.go new file mode 100644 index 00000000000..1bba0d6bf78 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/transport_darwin.go @@ -0,0 +1,6 @@ +package dbus + +func (t *unixTransport) SendNullByte() error { + _, err := t.Write([]byte{0}) + return err +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/transport_generic.go b/Godeps/_workspace/src/github.com/godbus/dbus/transport_generic.go new file mode 100644 index 00000000000..46f8f49d699 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/transport_generic.go @@ -0,0 +1,35 @@ +package dbus + +import ( + "encoding/binary" + "errors" + "io" +) + +type genericTransport struct { + io.ReadWriteCloser +} + +func (t genericTransport) SendNullByte() error { + _, err := t.Write([]byte{0}) + return err +} + +func (t genericTransport) SupportsUnixFDs() bool { + return false +} + +func (t genericTransport) EnableUnixFDs() {} + +func (t genericTransport) ReadMessage() (*Message, error) { + return DecodeMessage(t) +} + +func (t genericTransport) SendMessage(msg *Message) error { + for _, v := range msg.Body { + if _, ok := v.(UnixFD); ok { + return errors.New("dbus: unix fd passing not enabled") + } + } + return msg.EncodeTo(t, binary.LittleEndian) +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/transport_unix.go b/Godeps/_workspace/src/github.com/godbus/dbus/transport_unix.go new file mode 100644 index 00000000000..3fafeabb15b --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/transport_unix.go @@ -0,0 +1,196 @@ +//+build !windows + +package dbus + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "net" + "syscall" +) + +type oobReader struct { + conn *net.UnixConn + oob []byte + buf [4096]byte +} + +func (o *oobReader) Read(b []byte) (n int, err error) { + n, oobn, flags, _, err := o.conn.ReadMsgUnix(b, o.buf[:]) + if err != nil { + return n, err + } + if flags&syscall.MSG_CTRUNC != 0 { + return n, errors.New("dbus: control data truncated (too many fds received)") + } + o.oob = append(o.oob, o.buf[:oobn]...) + return n, nil +} + +type unixTransport struct { + *net.UnixConn + hasUnixFDs bool +} + +func newUnixTransport(keys string) (transport, error) { + var err error + + t := new(unixTransport) + abstract := getKey(keys, "abstract") + path := getKey(keys, "path") + switch { + case abstract == "" && path == "": + return nil, errors.New("dbus: invalid address (neither path nor abstract set)") + case abstract != "" && path == "": + t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: "@" + abstract, Net: "unix"}) + if err != nil { + return nil, err + } + return t, nil + case abstract == "" && path != "": + t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: path, Net: "unix"}) + if err != nil { + return nil, err + } + return t, nil + default: + return nil, errors.New("dbus: invalid address (both path and abstract set)") + } +} + +func init() { + transports["unix"] = newUnixTransport +} + +func (t *unixTransport) EnableUnixFDs() { + t.hasUnixFDs = true +} + +func (t *unixTransport) ReadMessage() (*Message, error) { + var ( + blen, hlen uint32 + csheader [16]byte + headers []header + order binary.ByteOrder + unixfds uint32 + ) + // To be sure that all bytes of out-of-band data are read, we use a special + // reader that uses ReadUnix on the underlying connection instead of Read + // and gathers the out-of-band data in a buffer. + rd := &oobReader{conn: t.UnixConn} + // read the first 16 bytes (the part of the header that has a constant size), + // from which we can figure out the length of the rest of the message + if _, err := io.ReadFull(rd, csheader[:]); err != nil { + return nil, err + } + switch csheader[0] { + case 'l': + order = binary.LittleEndian + case 'B': + order = binary.BigEndian + default: + return nil, InvalidMessageError("invalid byte order") + } + // csheader[4:8] -> length of message body, csheader[12:16] -> length of + // header fields (without alignment) + binary.Read(bytes.NewBuffer(csheader[4:8]), order, &blen) + binary.Read(bytes.NewBuffer(csheader[12:]), order, &hlen) + if hlen%8 != 0 { + hlen += 8 - (hlen % 8) + } + + // decode headers and look for unix fds + headerdata := make([]byte, hlen+4) + copy(headerdata, csheader[12:]) + if _, err := io.ReadFull(t, headerdata[4:]); err != nil { + return nil, err + } + dec := newDecoder(bytes.NewBuffer(headerdata), order) + dec.pos = 12 + vs, err := dec.Decode(Signature{"a(yv)"}) + if err != nil { + return nil, err + } + Store(vs, &headers) + for _, v := range headers { + if v.Field == byte(FieldUnixFDs) { + unixfds, _ = v.Variant.value.(uint32) + } + } + all := make([]byte, 16+hlen+blen) + copy(all, csheader[:]) + copy(all[16:], headerdata[4:]) + if _, err := io.ReadFull(rd, all[16+hlen:]); err != nil { + return nil, err + } + if unixfds != 0 { + if !t.hasUnixFDs { + return nil, errors.New("dbus: got unix fds on unsupported transport") + } + // read the fds from the OOB data + scms, err := syscall.ParseSocketControlMessage(rd.oob) + if err != nil { + return nil, err + } + if len(scms) != 1 { + return nil, errors.New("dbus: received more than one socket control message") + } + fds, err := syscall.ParseUnixRights(&scms[0]) + if err != nil { + return nil, err + } + msg, err := DecodeMessage(bytes.NewBuffer(all)) + if err != nil { + return nil, err + } + // substitute the values in the message body (which are indices for the + // array receiver via OOB) with the actual values + for i, v := range msg.Body { + if j, ok := v.(UnixFDIndex); ok { + if uint32(j) >= unixfds { + return nil, InvalidMessageError("invalid index for unix fd") + } + msg.Body[i] = UnixFD(fds[j]) + } + } + return msg, nil + } + return DecodeMessage(bytes.NewBuffer(all)) +} + +func (t *unixTransport) SendMessage(msg *Message) error { + fds := make([]int, 0) + for i, v := range msg.Body { + if fd, ok := v.(UnixFD); ok { + msg.Body[i] = UnixFDIndex(len(fds)) + fds = append(fds, int(fd)) + } + } + if len(fds) != 0 { + if !t.hasUnixFDs { + return errors.New("dbus: unix fd passing not enabled") + } + msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds))) + oob := syscall.UnixRights(fds...) + buf := new(bytes.Buffer) + msg.EncodeTo(buf, binary.LittleEndian) + n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil) + if err != nil { + return err + } + if n != buf.Len() || oobn != len(oob) { + return io.ErrShortWrite + } + } else { + if err := msg.EncodeTo(t, binary.LittleEndian); err != nil { + return nil + } + } + return nil +} + +func (t *unixTransport) SupportsUnixFDs() bool { + return true +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/transport_unix_test.go b/Godeps/_workspace/src/github.com/godbus/dbus/transport_unix_test.go new file mode 100644 index 00000000000..302233fc65e --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/transport_unix_test.go @@ -0,0 +1,49 @@ +package dbus + +import ( + "os" + "testing" +) + +const testString = `This is a test! +This text should be read from the file that is created by this test.` + +type unixFDTest struct{} + +func (t unixFDTest) Test(fd UnixFD) (string, *Error) { + var b [4096]byte + file := os.NewFile(uintptr(fd), "testfile") + defer file.Close() + n, err := file.Read(b[:]) + if err != nil { + return "", &Error{"com.github.guelfey.test.Error", nil} + } + return string(b[:n]), nil +} + +func TestUnixFDs(t *testing.T) { + conn, err := SessionBus() + if err != nil { + t.Fatal(err) + } + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer w.Close() + if _, err := w.Write([]byte(testString)); err != nil { + t.Fatal(err) + } + name := conn.Names()[0] + test := unixFDTest{} + conn.Export(test, "/com/github/guelfey/test", "com.github.guelfey.test") + var s string + obj := conn.Object(name, "/com/github/guelfey/test") + err = obj.Call("com.github.guelfey.test.Test", 0, UnixFD(r.Fd())).Store(&s) + if err != nil { + t.Fatal(err) + } + if s != testString { + t.Fatal("got", s, "wanted", testString) + } +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/transport_unixcred_dragonfly.go b/Godeps/_workspace/src/github.com/godbus/dbus/transport_unixcred_dragonfly.go new file mode 100644 index 00000000000..a8cd39395f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/transport_unixcred_dragonfly.go @@ -0,0 +1,95 @@ +// The UnixCredentials system call is currently only implemented on Linux +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// https://golang.org/s/go1.4-syscall +// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys + +// Local implementation of the UnixCredentials system call for DragonFly BSD + +package dbus + +/* +#include +*/ +import "C" + +import ( + "io" + "os" + "syscall" + "unsafe" +) + +// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go +// http://golang.org/src/pkg/syscall/ztypes_dragonfly_amd64.go +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +// http://golang.org/src/pkg/syscall/types_linux.go +// http://golang.org/src/pkg/syscall/types_dragonfly.go +// https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/ucred.h +const ( + SizeofUcred = C.sizeof_struct_ucred +) + +// http://golang.org/src/pkg/syscall/sockcmsg_unix.go +func cmsgAlignOf(salen int) int { + // From http://golang.org/src/pkg/syscall/sockcmsg_unix.go + //salign := sizeofPtr + // NOTE: It seems like 64-bit Darwin and DragonFly BSD kernels + // still require 32-bit aligned access to network subsystem. + //if darwin64Bit || dragonfly64Bit { + // salign = 4 + //} + salign := 4 + return (salen + salign - 1) & ^(salign - 1) +} + +// http://golang.org/src/pkg/syscall/sockcmsg_unix.go +func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer { + return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr))) +} + +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// UnixCredentials encodes credentials into a socket control message +// for sending to another process. This can be used for +// authentication. +func UnixCredentials(ucred *Ucred) []byte { + b := make([]byte, syscall.CmsgSpace(SizeofUcred)) + h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = syscall.SOL_SOCKET + h.Type = syscall.SCM_CREDS + h.SetLen(syscall.CmsgLen(SizeofUcred)) + *((*Ucred)(cmsgData(h))) = *ucred + return b +} + +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// ParseUnixCredentials decodes a socket control message that contains +// credentials in a Ucred structure. To receive such a message, the +// SO_PASSCRED option must be enabled on the socket. +func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) { + if m.Header.Level != syscall.SOL_SOCKET { + return nil, syscall.EINVAL + } + if m.Header.Type != syscall.SCM_CREDS { + return nil, syscall.EINVAL + } + ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) + return &ucred, nil +} + +func (t *unixTransport) SendNullByte() error { + ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())} + b := UnixCredentials(ucred) + _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil) + if err != nil { + return err + } + if oobn != len(b) { + return io.ErrShortWrite + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/transport_unixcred_linux.go b/Godeps/_workspace/src/github.com/godbus/dbus/transport_unixcred_linux.go new file mode 100644 index 00000000000..d9dfdf69821 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/transport_unixcred_linux.go @@ -0,0 +1,25 @@ +// The UnixCredentials system call is currently only implemented on Linux +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// https://golang.org/s/go1.4-syscall +// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys + +package dbus + +import ( + "io" + "os" + "syscall" +) + +func (t *unixTransport) SendNullByte() error { + ucred := &syscall.Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())} + b := syscall.UnixCredentials(ucred) + _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil) + if err != nil { + return err + } + if oobn != len(b) { + return io.ErrShortWrite + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/variant.go b/Godeps/_workspace/src/github.com/godbus/dbus/variant.go new file mode 100644 index 00000000000..b1b53ceb472 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/variant.go @@ -0,0 +1,129 @@ +package dbus + +import ( + "bytes" + "fmt" + "reflect" + "strconv" +) + +// Variant represents the D-Bus variant type. +type Variant struct { + sig Signature + value interface{} +} + +// MakeVariant converts the given value to a Variant. It panics if v cannot be +// represented as a D-Bus type. +func MakeVariant(v interface{}) Variant { + return Variant{SignatureOf(v), v} +} + +// ParseVariant parses the given string as a variant as described at +// https://developer.gnome.org/glib/unstable/gvariant-text.html. If sig is not +// empty, it is taken to be the expected signature for the variant. +func ParseVariant(s string, sig Signature) (Variant, error) { + tokens := varLex(s) + p := &varParser{tokens: tokens} + n, err := varMakeNode(p) + if err != nil { + return Variant{}, err + } + if sig.str == "" { + sig, err = varInfer(n) + if err != nil { + return Variant{}, err + } + } + v, err := n.Value(sig) + if err != nil { + return Variant{}, err + } + return MakeVariant(v), nil +} + +// format returns a formatted version of v and whether this string can be parsed +// unambigously. +func (v Variant) format() (string, bool) { + switch v.sig.str[0] { + case 'b', 'i': + return fmt.Sprint(v.value), true + case 'n', 'q', 'u', 'x', 't', 'd', 'h': + return fmt.Sprint(v.value), false + case 's': + return strconv.Quote(v.value.(string)), true + case 'o': + return strconv.Quote(string(v.value.(ObjectPath))), false + case 'g': + return strconv.Quote(v.value.(Signature).str), false + case 'v': + s, unamb := v.value.(Variant).format() + if !unamb { + return "<@" + v.value.(Variant).sig.str + " " + s + ">", true + } + return "<" + s + ">", true + case 'y': + return fmt.Sprintf("%#x", v.value.(byte)), false + } + rv := reflect.ValueOf(v.value) + switch rv.Kind() { + case reflect.Slice: + if rv.Len() == 0 { + return "[]", false + } + unamb := true + buf := bytes.NewBuffer([]byte("[")) + for i := 0; i < rv.Len(); i++ { + // TODO: slooow + s, b := MakeVariant(rv.Index(i).Interface()).format() + unamb = unamb && b + buf.WriteString(s) + if i != rv.Len()-1 { + buf.WriteString(", ") + } + } + buf.WriteByte(']') + return buf.String(), unamb + case reflect.Map: + if rv.Len() == 0 { + return "{}", false + } + unamb := true + buf := bytes.NewBuffer([]byte("{")) + for i, k := range rv.MapKeys() { + s, b := MakeVariant(k.Interface()).format() + unamb = unamb && b + buf.WriteString(s) + buf.WriteString(": ") + s, b = MakeVariant(rv.MapIndex(k).Interface()).format() + unamb = unamb && b + buf.WriteString(s) + if i != rv.Len()-1 { + buf.WriteString(", ") + } + } + buf.WriteByte('}') + return buf.String(), unamb + } + return `"INVALID"`, true +} + +// Signature returns the D-Bus signature of the underlying value of v. +func (v Variant) Signature() Signature { + return v.sig +} + +// String returns the string representation of the underlying value of v as +// described at https://developer.gnome.org/glib/unstable/gvariant-text.html. +func (v Variant) String() string { + s, unamb := v.format() + if !unamb { + return "@" + v.sig.str + " " + s + } + return s +} + +// Value returns the underlying value of v. +func (v Variant) Value() interface{} { + return v.value +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/variant_lexer.go b/Godeps/_workspace/src/github.com/godbus/dbus/variant_lexer.go new file mode 100644 index 00000000000..332007d6f12 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/variant_lexer.go @@ -0,0 +1,284 @@ +package dbus + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +// Heavily inspired by the lexer from text/template. + +type varToken struct { + typ varTokenType + val string +} + +type varTokenType byte + +const ( + tokEOF varTokenType = iota + tokError + tokNumber + tokString + tokBool + tokArrayStart + tokArrayEnd + tokDictStart + tokDictEnd + tokVariantStart + tokVariantEnd + tokComma + tokColon + tokType + tokByteString +) + +type varLexer struct { + input string + start int + pos int + width int + tokens []varToken +} + +type lexState func(*varLexer) lexState + +func varLex(s string) []varToken { + l := &varLexer{input: s} + l.run() + return l.tokens +} + +func (l *varLexer) accept(valid string) bool { + if strings.IndexRune(valid, l.next()) >= 0 { + return true + } + l.backup() + return false +} + +func (l *varLexer) backup() { + l.pos -= l.width +} + +func (l *varLexer) emit(t varTokenType) { + l.tokens = append(l.tokens, varToken{t, l.input[l.start:l.pos]}) + l.start = l.pos +} + +func (l *varLexer) errorf(format string, v ...interface{}) lexState { + l.tokens = append(l.tokens, varToken{ + tokError, + fmt.Sprintf(format, v...), + }) + return nil +} + +func (l *varLexer) ignore() { + l.start = l.pos +} + +func (l *varLexer) next() rune { + var r rune + + if l.pos >= len(l.input) { + l.width = 0 + return -1 + } + r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) + l.pos += l.width + return r +} + +func (l *varLexer) run() { + for state := varLexNormal; state != nil; { + state = state(l) + } +} + +func (l *varLexer) peek() rune { + r := l.next() + l.backup() + return r +} + +func varLexNormal(l *varLexer) lexState { + for { + r := l.next() + switch { + case r == -1: + l.emit(tokEOF) + return nil + case r == '[': + l.emit(tokArrayStart) + case r == ']': + l.emit(tokArrayEnd) + case r == '{': + l.emit(tokDictStart) + case r == '}': + l.emit(tokDictEnd) + case r == '<': + l.emit(tokVariantStart) + case r == '>': + l.emit(tokVariantEnd) + case r == ':': + l.emit(tokColon) + case r == ',': + l.emit(tokComma) + case r == '\'' || r == '"': + l.backup() + return varLexString + case r == '@': + l.backup() + return varLexType + case unicode.IsSpace(r): + l.ignore() + case unicode.IsNumber(r) || r == '+' || r == '-': + l.backup() + return varLexNumber + case r == 'b': + pos := l.start + if n := l.peek(); n == '"' || n == '\'' { + return varLexByteString + } + // not a byte string; try to parse it as a type or bool below + l.pos = pos + 1 + l.width = 1 + fallthrough + default: + // either a bool or a type. Try bools first. + l.backup() + if l.pos+4 <= len(l.input) { + if l.input[l.pos:l.pos+4] == "true" { + l.pos += 4 + l.emit(tokBool) + continue + } + } + if l.pos+5 <= len(l.input) { + if l.input[l.pos:l.pos+5] == "false" { + l.pos += 5 + l.emit(tokBool) + continue + } + } + // must be a type. + return varLexType + } + } +} + +var varTypeMap = map[string]string{ + "boolean": "b", + "byte": "y", + "int16": "n", + "uint16": "q", + "int32": "i", + "uint32": "u", + "int64": "x", + "uint64": "t", + "double": "f", + "string": "s", + "objectpath": "o", + "signature": "g", +} + +func varLexByteString(l *varLexer) lexState { + q := l.next() +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != -1 { + break + } + fallthrough + case -1: + return l.errorf("unterminated bytestring") + case q: + break Loop + } + } + l.emit(tokByteString) + return varLexNormal +} + +func varLexNumber(l *varLexer) lexState { + l.accept("+-") + digits := "0123456789" + if l.accept("0") { + if l.accept("x") { + digits = "0123456789abcdefABCDEF" + } else { + digits = "01234567" + } + } + for strings.IndexRune(digits, l.next()) >= 0 { + } + l.backup() + if l.accept(".") { + for strings.IndexRune(digits, l.next()) >= 0 { + } + l.backup() + } + if l.accept("eE") { + l.accept("+-") + for strings.IndexRune("0123456789", l.next()) >= 0 { + } + l.backup() + } + if r := l.peek(); unicode.IsLetter(r) { + l.next() + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + l.emit(tokNumber) + return varLexNormal +} + +func varLexString(l *varLexer) lexState { + q := l.next() +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != -1 { + break + } + fallthrough + case -1: + return l.errorf("unterminated string") + case q: + break Loop + } + } + l.emit(tokString) + return varLexNormal +} + +func varLexType(l *varLexer) lexState { + at := l.accept("@") + for { + r := l.next() + if r == -1 { + break + } + if unicode.IsSpace(r) { + l.backup() + break + } + } + if at { + if _, err := ParseSignature(l.input[l.start+1 : l.pos]); err != nil { + return l.errorf("%s", err) + } + } else { + if _, ok := varTypeMap[l.input[l.start:l.pos]]; ok { + l.emit(tokType) + return varLexNormal + } + return l.errorf("unrecognized type %q", l.input[l.start:l.pos]) + } + l.emit(tokType) + return varLexNormal +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/variant_parser.go b/Godeps/_workspace/src/github.com/godbus/dbus/variant_parser.go new file mode 100644 index 00000000000..d20f5da6dd2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/variant_parser.go @@ -0,0 +1,817 @@ +package dbus + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +type varParser struct { + tokens []varToken + i int +} + +func (p *varParser) backup() { + p.i-- +} + +func (p *varParser) next() varToken { + if p.i < len(p.tokens) { + t := p.tokens[p.i] + p.i++ + return t + } + return varToken{typ: tokEOF} +} + +type varNode interface { + Infer() (Signature, error) + String() string + Sigs() sigSet + Value(Signature) (interface{}, error) +} + +func varMakeNode(p *varParser) (varNode, error) { + var sig Signature + + for { + t := p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokNumber: + return varMakeNumNode(t, sig) + case tokString: + return varMakeStringNode(t, sig) + case tokBool: + if sig.str != "" && sig.str != "b" { + return nil, varTypeError{t.val, sig} + } + b, err := strconv.ParseBool(t.val) + if err != nil { + return nil, err + } + return boolNode(b), nil + case tokArrayStart: + return varMakeArrayNode(p, sig) + case tokVariantStart: + return varMakeVariantNode(p, sig) + case tokDictStart: + return varMakeDictNode(p, sig) + case tokType: + if sig.str != "" { + return nil, errors.New("unexpected type annotation") + } + if t.val[0] == '@' { + sig.str = t.val[1:] + } else { + sig.str = varTypeMap[t.val] + } + case tokByteString: + if sig.str != "" && sig.str != "ay" { + return nil, varTypeError{t.val, sig} + } + b, err := varParseByteString(t.val) + if err != nil { + return nil, err + } + return byteStringNode(b), nil + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + } +} + +type varTypeError struct { + val string + sig Signature +} + +func (e varTypeError) Error() string { + return fmt.Sprintf("dbus: can't parse %q as type %q", e.val, e.sig.str) +} + +type sigSet map[Signature]bool + +func (s sigSet) Empty() bool { + return len(s) == 0 +} + +func (s sigSet) Intersect(s2 sigSet) sigSet { + r := make(sigSet) + for k := range s { + if s2[k] { + r[k] = true + } + } + return r +} + +func (s sigSet) Single() (Signature, bool) { + if len(s) == 1 { + for k := range s { + return k, true + } + } + return Signature{}, false +} + +func (s sigSet) ToArray() sigSet { + r := make(sigSet, len(s)) + for k := range s { + r[Signature{"a" + k.str}] = true + } + return r +} + +type numNode struct { + sig Signature + str string + val interface{} +} + +var numSigSet = sigSet{ + Signature{"y"}: true, + Signature{"n"}: true, + Signature{"q"}: true, + Signature{"i"}: true, + Signature{"u"}: true, + Signature{"x"}: true, + Signature{"t"}: true, + Signature{"d"}: true, +} + +func (n numNode) Infer() (Signature, error) { + if strings.ContainsAny(n.str, ".e") { + return Signature{"d"}, nil + } + return Signature{"i"}, nil +} + +func (n numNode) String() string { + return n.str +} + +func (n numNode) Sigs() sigSet { + if n.sig.str != "" { + return sigSet{n.sig: true} + } + if strings.ContainsAny(n.str, ".e") { + return sigSet{Signature{"d"}: true} + } + return numSigSet +} + +func (n numNode) Value(sig Signature) (interface{}, error) { + if n.sig.str != "" && n.sig != sig { + return nil, varTypeError{n.str, sig} + } + if n.val != nil { + return n.val, nil + } + return varNumAs(n.str, sig) +} + +func varMakeNumNode(tok varToken, sig Signature) (varNode, error) { + if sig.str == "" { + return numNode{str: tok.val}, nil + } + num, err := varNumAs(tok.val, sig) + if err != nil { + return nil, err + } + return numNode{sig: sig, val: num}, nil +} + +func varNumAs(s string, sig Signature) (interface{}, error) { + isUnsigned := false + size := 32 + switch sig.str { + case "n": + size = 16 + case "i": + case "x": + size = 64 + case "y": + size = 8 + isUnsigned = true + case "q": + size = 16 + isUnsigned = true + case "u": + isUnsigned = true + case "t": + size = 64 + isUnsigned = true + case "d": + d, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, err + } + return d, nil + default: + return nil, varTypeError{s, sig} + } + base := 10 + if strings.HasPrefix(s, "0x") { + base = 16 + s = s[2:] + } + if strings.HasPrefix(s, "0") && len(s) != 1 { + base = 8 + s = s[1:] + } + if isUnsigned { + i, err := strconv.ParseUint(s, base, size) + if err != nil { + return nil, err + } + var v interface{} = i + switch sig.str { + case "y": + v = byte(i) + case "q": + v = uint16(i) + case "u": + v = uint32(i) + } + return v, nil + } + i, err := strconv.ParseInt(s, base, size) + if err != nil { + return nil, err + } + var v interface{} = i + switch sig.str { + case "n": + v = int16(i) + case "i": + v = int32(i) + } + return v, nil +} + +type stringNode struct { + sig Signature + str string // parsed + val interface{} // has correct type +} + +var stringSigSet = sigSet{ + Signature{"s"}: true, + Signature{"g"}: true, + Signature{"o"}: true, +} + +func (n stringNode) Infer() (Signature, error) { + return Signature{"s"}, nil +} + +func (n stringNode) String() string { + return n.str +} + +func (n stringNode) Sigs() sigSet { + if n.sig.str != "" { + return sigSet{n.sig: true} + } + return stringSigSet +} + +func (n stringNode) Value(sig Signature) (interface{}, error) { + if n.sig.str != "" && n.sig != sig { + return nil, varTypeError{n.str, sig} + } + if n.val != nil { + return n.val, nil + } + switch { + case sig.str == "g": + return Signature{n.str}, nil + case sig.str == "o": + return ObjectPath(n.str), nil + case sig.str == "s": + return n.str, nil + default: + return nil, varTypeError{n.str, sig} + } +} + +func varMakeStringNode(tok varToken, sig Signature) (varNode, error) { + if sig.str != "" && sig.str != "s" && sig.str != "g" && sig.str != "o" { + return nil, fmt.Errorf("invalid type %q for string", sig.str) + } + s, err := varParseString(tok.val) + if err != nil { + return nil, err + } + n := stringNode{str: s} + if sig.str == "" { + return stringNode{str: s}, nil + } + n.sig = sig + switch sig.str { + case "o": + n.val = ObjectPath(s) + case "g": + n.val = Signature{s} + case "s": + n.val = s + } + return n, nil +} + +func varParseString(s string) (string, error) { + // quotes are guaranteed to be there + s = s[1 : len(s)-1] + buf := new(bytes.Buffer) + for len(s) != 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && size == 1 { + return "", errors.New("invalid UTF-8") + } + s = s[size:] + if r != '\\' { + buf.WriteRune(r) + continue + } + r, size = utf8.DecodeRuneInString(s) + if r == utf8.RuneError && size == 1 { + return "", errors.New("invalid UTF-8") + } + s = s[size:] + switch r { + case 'a': + buf.WriteRune(0x7) + case 'b': + buf.WriteRune(0x8) + case 'f': + buf.WriteRune(0xc) + case 'n': + buf.WriteRune('\n') + case 'r': + buf.WriteRune('\r') + case 't': + buf.WriteRune('\t') + case '\n': + case 'u': + if len(s) < 4 { + return "", errors.New("short unicode escape") + } + r, err := strconv.ParseUint(s[:4], 16, 32) + if err != nil { + return "", err + } + buf.WriteRune(rune(r)) + s = s[4:] + case 'U': + if len(s) < 8 { + return "", errors.New("short unicode escape") + } + r, err := strconv.ParseUint(s[:8], 16, 32) + if err != nil { + return "", err + } + buf.WriteRune(rune(r)) + s = s[8:] + default: + buf.WriteRune(r) + } + } + return buf.String(), nil +} + +var boolSigSet = sigSet{Signature{"b"}: true} + +type boolNode bool + +func (boolNode) Infer() (Signature, error) { + return Signature{"b"}, nil +} + +func (b boolNode) String() string { + if b { + return "true" + } + return "false" +} + +func (boolNode) Sigs() sigSet { + return boolSigSet +} + +func (b boolNode) Value(sig Signature) (interface{}, error) { + if sig.str != "b" { + return nil, varTypeError{b.String(), sig} + } + return bool(b), nil +} + +type arrayNode struct { + set sigSet + children []varNode + val interface{} +} + +func (n arrayNode) Infer() (Signature, error) { + for _, v := range n.children { + csig, err := varInfer(v) + if err != nil { + continue + } + return Signature{"a" + csig.str}, nil + } + return Signature{}, fmt.Errorf("can't infer type for %q", n.String()) +} + +func (n arrayNode) String() string { + s := "[" + for i, v := range n.children { + s += v.String() + if i != len(n.children)-1 { + s += ", " + } + } + return s + "]" +} + +func (n arrayNode) Sigs() sigSet { + return n.set +} + +func (n arrayNode) Value(sig Signature) (interface{}, error) { + if n.set.Empty() { + // no type information whatsoever, so this must be an empty slice + return reflect.MakeSlice(typeFor(sig.str), 0, 0).Interface(), nil + } + if !n.set[sig] { + return nil, varTypeError{n.String(), sig} + } + s := reflect.MakeSlice(typeFor(sig.str), len(n.children), len(n.children)) + for i, v := range n.children { + rv, err := v.Value(Signature{sig.str[1:]}) + if err != nil { + return nil, err + } + s.Index(i).Set(reflect.ValueOf(rv)) + } + return s.Interface(), nil +} + +func varMakeArrayNode(p *varParser, sig Signature) (varNode, error) { + var n arrayNode + if sig.str != "" { + n.set = sigSet{sig: true} + } + if t := p.next(); t.typ == tokArrayEnd { + return n, nil + } else { + p.backup() + } +Loop: + for { + t := p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + } + p.backup() + cn, err := varMakeNode(p) + if err != nil { + return nil, err + } + if cset := cn.Sigs(); !cset.Empty() { + if n.set.Empty() { + n.set = cset.ToArray() + } else { + nset := cset.ToArray().Intersect(n.set) + if nset.Empty() { + return nil, fmt.Errorf("can't parse %q with given type information", cn.String()) + } + n.set = nset + } + } + n.children = append(n.children, cn) + switch t := p.next(); t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokArrayEnd: + break Loop + case tokComma: + continue + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + } + return n, nil +} + +type variantNode struct { + n varNode +} + +var variantSet = sigSet{ + Signature{"v"}: true, +} + +func (variantNode) Infer() (Signature, error) { + return Signature{"v"}, nil +} + +func (n variantNode) String() string { + return "<" + n.n.String() + ">" +} + +func (variantNode) Sigs() sigSet { + return variantSet +} + +func (n variantNode) Value(sig Signature) (interface{}, error) { + if sig.str != "v" { + return nil, varTypeError{n.String(), sig} + } + sig, err := varInfer(n.n) + if err != nil { + return nil, err + } + v, err := n.n.Value(sig) + if err != nil { + return nil, err + } + return MakeVariant(v), nil +} + +func varMakeVariantNode(p *varParser, sig Signature) (varNode, error) { + n, err := varMakeNode(p) + if err != nil { + return nil, err + } + if t := p.next(); t.typ != tokVariantEnd { + return nil, fmt.Errorf("unexpected %q", t.val) + } + vn := variantNode{n} + if sig.str != "" && sig.str != "v" { + return nil, varTypeError{vn.String(), sig} + } + return variantNode{n}, nil +} + +type dictEntry struct { + key, val varNode +} + +type dictNode struct { + kset, vset sigSet + children []dictEntry + val interface{} +} + +func (n dictNode) Infer() (Signature, error) { + for _, v := range n.children { + ksig, err := varInfer(v.key) + if err != nil { + continue + } + vsig, err := varInfer(v.val) + if err != nil { + continue + } + return Signature{"a{" + ksig.str + vsig.str + "}"}, nil + } + return Signature{}, fmt.Errorf("can't infer type for %q", n.String()) +} + +func (n dictNode) String() string { + s := "{" + for i, v := range n.children { + s += v.key.String() + ": " + v.val.String() + if i != len(n.children)-1 { + s += ", " + } + } + return s + "}" +} + +func (n dictNode) Sigs() sigSet { + r := sigSet{} + for k := range n.kset { + for v := range n.vset { + sig := "a{" + k.str + v.str + "}" + r[Signature{sig}] = true + } + } + return r +} + +func (n dictNode) Value(sig Signature) (interface{}, error) { + set := n.Sigs() + if set.Empty() { + // no type information -> empty dict + return reflect.MakeMap(typeFor(sig.str)).Interface(), nil + } + if !set[sig] { + return nil, varTypeError{n.String(), sig} + } + m := reflect.MakeMap(typeFor(sig.str)) + ksig := Signature{sig.str[2:3]} + vsig := Signature{sig.str[3 : len(sig.str)-1]} + for _, v := range n.children { + kv, err := v.key.Value(ksig) + if err != nil { + return nil, err + } + vv, err := v.val.Value(vsig) + if err != nil { + return nil, err + } + m.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv)) + } + return m.Interface(), nil +} + +func varMakeDictNode(p *varParser, sig Signature) (varNode, error) { + var n dictNode + + if sig.str != "" { + if len(sig.str) < 5 { + return nil, fmt.Errorf("invalid signature %q for dict type", sig) + } + ksig := Signature{string(sig.str[2])} + vsig := Signature{sig.str[3 : len(sig.str)-1]} + n.kset = sigSet{ksig: true} + n.vset = sigSet{vsig: true} + } + if t := p.next(); t.typ == tokDictEnd { + return n, nil + } else { + p.backup() + } +Loop: + for { + t := p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + } + p.backup() + kn, err := varMakeNode(p) + if err != nil { + return nil, err + } + if kset := kn.Sigs(); !kset.Empty() { + if n.kset.Empty() { + n.kset = kset + } else { + n.kset = kset.Intersect(n.kset) + if n.kset.Empty() { + return nil, fmt.Errorf("can't parse %q with given type information", kn.String()) + } + } + } + t = p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokColon: + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + t = p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + } + p.backup() + vn, err := varMakeNode(p) + if err != nil { + return nil, err + } + if vset := vn.Sigs(); !vset.Empty() { + if n.vset.Empty() { + n.vset = vset + } else { + n.vset = n.vset.Intersect(vset) + if n.vset.Empty() { + return nil, fmt.Errorf("can't parse %q with given type information", vn.String()) + } + } + } + n.children = append(n.children, dictEntry{kn, vn}) + t = p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokDictEnd: + break Loop + case tokComma: + continue + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + } + return n, nil +} + +type byteStringNode []byte + +var byteStringSet = sigSet{ + Signature{"ay"}: true, +} + +func (byteStringNode) Infer() (Signature, error) { + return Signature{"ay"}, nil +} + +func (b byteStringNode) String() string { + return string(b) +} + +func (b byteStringNode) Sigs() sigSet { + return byteStringSet +} + +func (b byteStringNode) Value(sig Signature) (interface{}, error) { + if sig.str != "ay" { + return nil, varTypeError{b.String(), sig} + } + return []byte(b), nil +} + +func varParseByteString(s string) ([]byte, error) { + // quotes and b at start are guaranteed to be there + b := make([]byte, 0, 1) + s = s[2 : len(s)-1] + for len(s) != 0 { + c := s[0] + s = s[1:] + if c != '\\' { + b = append(b, c) + continue + } + c = s[0] + s = s[1:] + switch c { + case 'a': + b = append(b, 0x7) + case 'b': + b = append(b, 0x8) + case 'f': + b = append(b, 0xc) + case 'n': + b = append(b, '\n') + case 'r': + b = append(b, '\r') + case 't': + b = append(b, '\t') + case 'x': + if len(s) < 2 { + return nil, errors.New("short escape") + } + n, err := strconv.ParseUint(s[:2], 16, 8) + if err != nil { + return nil, err + } + b = append(b, byte(n)) + s = s[2:] + case '0': + if len(s) < 3 { + return nil, errors.New("short escape") + } + n, err := strconv.ParseUint(s[:3], 8, 8) + if err != nil { + return nil, err + } + b = append(b, byte(n)) + s = s[3:] + default: + b = append(b, c) + } + } + return append(b, 0), nil +} + +func varInfer(n varNode) (Signature, error) { + if sig, ok := n.Sigs().Single(); ok { + return sig, nil + } + return n.Infer() +} diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/variant_test.go b/Godeps/_workspace/src/github.com/godbus/dbus/variant_test.go new file mode 100644 index 00000000000..da917c8e290 --- /dev/null +++ b/Godeps/_workspace/src/github.com/godbus/dbus/variant_test.go @@ -0,0 +1,78 @@ +package dbus + +import "reflect" +import "testing" + +var variantFormatTests = []struct { + v interface{} + s string +}{ + {int32(1), `1`}, + {"foo", `"foo"`}, + {ObjectPath("/org/foo"), `@o "/org/foo"`}, + {Signature{"i"}, `@g "i"`}, + {[]byte{}, `@ay []`}, + {[]int32{1, 2}, `[1, 2]`}, + {[]int64{1, 2}, `@ax [1, 2]`}, + {[][]int32{{3, 4}, {5, 6}}, `[[3, 4], [5, 6]]`}, + {[]Variant{MakeVariant(int32(1)), MakeVariant(1.0)}, `[<1>, <@d 1>]`}, + {map[string]int32{"one": 1, "two": 2}, `{"one": 1, "two": 2}`}, + {map[int32]ObjectPath{1: "/org/foo"}, `@a{io} {1: "/org/foo"}`}, + {map[string]Variant{}, `@a{sv} {}`}, +} + +func TestFormatVariant(t *testing.T) { + for i, v := range variantFormatTests { + if s := MakeVariant(v.v).String(); s != v.s { + t.Errorf("test %d: got %q, wanted %q", i+1, s, v.s) + } + } +} + +var variantParseTests = []struct { + s string + v interface{} +}{ + {"1", int32(1)}, + {"true", true}, + {"false", false}, + {"1.0", float64(1.0)}, + {"0x10", int32(16)}, + {"1e1", float64(10)}, + {`"foo"`, "foo"}, + {`"\a\b\f\n\r\t"`, "\x07\x08\x0c\n\r\t"}, + {`"\u00e4\U0001f603"`, "\u00e4\U0001f603"}, + {"[1]", []int32{1}}, + {"[1, 2, 3]", []int32{1, 2, 3}}, + {"@ai []", []int32{}}, + {"[1, 5.0]", []float64{1, 5.0}}, + {"[[1, 2], [3, 4.0]]", [][]float64{{1, 2}, {3, 4}}}, + {`[@o "/org/foo", "/org/bar"]`, []ObjectPath{"/org/foo", "/org/bar"}}, + {"<1>", MakeVariant(int32(1))}, + {"[<1>, <2.0>]", []Variant{MakeVariant(int32(1)), MakeVariant(2.0)}}, + {`[[], [""]]`, [][]string{{}, {""}}}, + {`@a{ss} {}`, map[string]string{}}, + {`{"foo": 1}`, map[string]int32{"foo": 1}}, + {`[{}, {"foo": "bar"}]`, []map[string]string{{}, {"foo": "bar"}}}, + {`{"a": <1>, "b": <"foo">}`, + map[string]Variant{"a": MakeVariant(int32(1)), "b": MakeVariant("foo")}}, + {`b''`, []byte{0}}, + {`b"abc"`, []byte{'a', 'b', 'c', 0}}, + {`b"\x01\0002\a\b\f\n\r\t"`, []byte{1, 2, 0x7, 0x8, 0xc, '\n', '\r', '\t', 0}}, + {`[[0], b""]`, [][]byte{{0}, {0}}}, + {"int16 0", int16(0)}, + {"byte 0", byte(0)}, +} + +func TestParseVariant(t *testing.T) { + for i, v := range variantParseTests { + nv, err := ParseVariant(v.s, Signature{}) + if err != nil { + t.Errorf("test %d: parsing failed: %s", i+1, err) + continue + } + if !reflect.DeepEqual(nv.value, v.v) { + t.Errorf("test %d: got %q, wanted %q", i+1, nv, v.v) + } + } +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/container.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/container.go new file mode 100644 index 00000000000..b62e3e9661c --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/container.go @@ -0,0 +1,78 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package container defines types for sub-container events and also +// defines an interface for container operation handlers. +package container + +import info "github.com/google/cadvisor/info/v1" + +// ListType describes whether listing should be just for a +// specific container or performed recursively. +type ListType int + +const ( + ListSelf ListType = iota + ListRecursive +) + +// SubcontainerEventType indicates an addition or deletion event. +type SubcontainerEventType int + +const ( + SubcontainerAdd SubcontainerEventType = iota + SubcontainerDelete +) + +// SubcontainerEvent represents a +type SubcontainerEvent struct { + // The type of event that occurred. + EventType SubcontainerEventType + + // The full container name of the container where the event occurred. + Name string +} + +// Interface for container operation handlers. +type ContainerHandler interface { + // Returns the ContainerReference + ContainerReference() (info.ContainerReference, error) + + // Returns container's isolation spec. + GetSpec() (info.ContainerSpec, error) + + // Returns the current stats values of the container. + GetStats() (*info.ContainerStats, error) + + // Returns the subcontainers of this container. + ListContainers(listType ListType) ([]info.ContainerReference, error) + + // Returns the threads inside this container. + ListThreads(listType ListType) ([]int, error) + + // Returns the processes inside this container. + ListProcesses(listType ListType) ([]int, error) + + // Registers a channel to listen for events affecting subcontainers (recursively). + WatchSubcontainers(events chan SubcontainerEvent) error + + // Stops watching for subcontainer changes. + StopWatchingSubcontainers() error + + // Returns absolute cgroup path for the requested resource. + GetCgroupPath(resource string) (string, error) + + // Returns whether the container still exists. + Exists() bool +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go new file mode 100644 index 00000000000..d7454e6049f --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go @@ -0,0 +1,219 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package docker + +import ( + "flag" + "fmt" + "path" + "regexp" + "strconv" + "strings" + + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/cgroups/systemd" + "github.com/fsouza/go-dockerclient" + "github.com/golang/glog" + "github.com/google/cadvisor/container" + "github.com/google/cadvisor/container/libcontainer" + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/utils" +) + +var ArgDockerEndpoint = flag.String("docker", "unix:///var/run/docker.sock", "docker endpoint") + +// The namespace under which Docker aliases are unique. +var DockerNamespace = "docker" + +// Basepath to all container specific information that libcontainer stores. +var dockerRootDir = flag.String("docker_root", "/var/lib/docker", "Absolute path to the Docker state root directory (default: /var/lib/docker)") + +// Whether the system is using Systemd. +var useSystemd bool + +func init() { + useSystemd = systemd.UseSystemd() + if !useSystemd { + // Second attempt at checking for systemd, check for a "name=systemd" cgroup. + mnt, err := cgroups.FindCgroupMountpoint("cpu") + if err == nil { + // systemd presence does not mean systemd controls cgroups. + // If system.slice cgroup exists, then systemd is taking control. + // This breaks if user creates system.slice manually :) + useSystemd = utils.FileExists(mnt + "/system.slice") + } + } +} + +func UseSystemd() bool { + // init would run and initialize useSystemd before we can call this method. + return useSystemd +} + +type dockerFactory struct { + machineInfoFactory info.MachineInfoFactory + + // Whether docker is running with AUFS storage driver. + usesAufsDriver bool + + client *docker.Client + + // Information about the mounted cgroup subsystems. + cgroupSubsystems libcontainer.CgroupSubsystems +} + +func (self *dockerFactory) String() string { + return DockerNamespace +} + +func (self *dockerFactory) NewContainerHandler(name string) (handler container.ContainerHandler, err error) { + client, err := docker.NewClient(*ArgDockerEndpoint) + if err != nil { + return + } + handler, err = newDockerContainerHandler( + client, + name, + self.machineInfoFactory, + *dockerRootDir, + self.usesAufsDriver, + &self.cgroupSubsystems, + ) + return +} + +// Returns the Docker ID from the full container name. +func ContainerNameToDockerId(name string) string { + id := path.Base(name) + + // Turn systemd cgroup name into Docker ID. + if useSystemd { + id = strings.TrimPrefix(id, "docker-") + id = strings.TrimSuffix(id, ".scope") + } + + return id +} + +// Returns a full container name for the specified Docker ID. +func FullContainerName(dockerId string) string { + // Add the full container name. + if useSystemd { + return path.Join("/system.slice", fmt.Sprintf("docker-%s.scope", dockerId)) + } else { + return path.Join("/docker", dockerId) + } +} + +// Docker handles all containers under /docker +func (self *dockerFactory) CanHandle(name string) (bool, error) { + // Check if the container is known to docker and it is active. + id := ContainerNameToDockerId(name) + + // We assume that if Inspect fails then the container is not known to docker. + ctnr, err := self.client.InspectContainer(id) + if err != nil || !ctnr.State.Running { + return false, fmt.Errorf("error inspecting container: %v", err) + } + + return true, nil +} + +func parseDockerVersion(full_version_string string) ([]int, error) { + version_regexp_string := "(\\d+)\\.(\\d+)\\.(\\d+)" + version_re := regexp.MustCompile(version_regexp_string) + matches := version_re.FindAllStringSubmatch(full_version_string, -1) + if len(matches) != 1 { + return nil, fmt.Errorf("version string \"%v\" doesn't match expected regular expression: \"%v\"", full_version_string, version_regexp_string) + } + version_string_array := matches[0][1:] + version_array := make([]int, 3) + for index, version_string := range version_string_array { + version, err := strconv.Atoi(version_string) + if err != nil { + return nil, fmt.Errorf("error while parsing \"%v\" in \"%v\"", version_string, full_version_string) + } + version_array[index] = version + } + return version_array, nil +} + +// Register root container before running this function! +func Register(factory info.MachineInfoFactory) error { + client, err := docker.NewClient(*ArgDockerEndpoint) + if err != nil { + return fmt.Errorf("unable to communicate with docker daemon: %v", err) + } + if version, err := client.Version(); err != nil { + return fmt.Errorf("unable to communicate with docker daemon: %v", err) + } else { + expected_version := []int{1, 0, 0} + version_string := version.Get("Version") + version, err := parseDockerVersion(version_string) + if err != nil { + return fmt.Errorf("couldn't parse docker version: %v", err) + } + for index, number := range version { + if number > expected_version[index] { + break + } else if number < expected_version[index] { + return fmt.Errorf("cAdvisor requires docker version %v or above but we have found version %v reported as \"%v\"", expected_version, version, version_string) + } + } + } + + // Check that the libcontainer execdriver is used. + information, err := client.Info() + if err != nil { + return fmt.Errorf("failed to detect Docker info: %v", err) + } + usesNativeDriver := false + for _, val := range *information { + if strings.Contains(val, "ExecutionDriver=") && strings.Contains(val, "native") { + usesNativeDriver = true + break + } + } + if !usesNativeDriver { + return fmt.Errorf("docker found, but not using native exec driver") + } + + usesAufsDriver := false + for _, val := range *information { + if strings.Contains(val, "Driver=") && strings.Contains(val, "aufs") { + usesAufsDriver = true + break + } + } + + if useSystemd { + glog.Infof("System is using systemd") + } + + cgroupSubsystems, err := libcontainer.GetCgroupSubsystems() + if err != nil { + return fmt.Errorf("failed to get cgroup subsystems: %v", err) + } + + glog.Infof("Registering Docker factory") + f := &dockerFactory{ + machineInfoFactory: factory, + client: client, + usesAufsDriver: usesAufsDriver, + cgroupSubsystems: cgroupSubsystems, + } + container.RegisterContainerHandlerFactory(f) + return nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go new file mode 100644 index 00000000000..a05dfe27b65 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go @@ -0,0 +1,375 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Handler for Docker containers. +package docker + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "math" + "os" + "path" + "strings" + "time" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/cgroups" + cgroup_fs "github.com/docker/libcontainer/cgroups/fs" + "github.com/fsouza/go-dockerclient" + "github.com/google/cadvisor/container" + containerLibcontainer "github.com/google/cadvisor/container/libcontainer" + "github.com/google/cadvisor/fs" + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/utils" +) + +// Relative path from Docker root to the libcontainer per-container state. +const pathToLibcontainerState = "execdriver/native" + +// Path to aufs dir where all the files exist. +// aufs/layers is ignored here since it does not hold a lot of data. +// aufs/mnt contains the mount points used to compose the rootfs. Hence it is also ignored. +var pathToAufsDir = "aufs/diff" + +type dockerContainerHandler struct { + client *docker.Client + name string + id string + aliases []string + machineInfoFactory info.MachineInfoFactory + + // Path to the libcontainer config file. + libcontainerConfigPath string + + // Path to the libcontainer state file. + libcontainerStatePath string + + // TODO(vmarmol): Remove when we depend on a newer Docker. + // Path to the libcontainer pid file. + libcontainerPidPath string + + // Absolute path to the cgroup hierarchies of this container. + // (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test") + cgroupPaths map[string]string + + cgroup cgroups.Cgroup + usesAufsDriver bool + fsInfo fs.FsInfo + storageDirs []string + + // Time at which this container was created. + creationTime time.Time +} + +func DockerStateDir() string { + return path.Join(*dockerRootDir, pathToLibcontainerState) +} + +func newDockerContainerHandler( + client *docker.Client, + name string, + machineInfoFactory info.MachineInfoFactory, + dockerRootDir string, + usesAufsDriver bool, + cgroupSubsystems *containerLibcontainer.CgroupSubsystems, +) (container.ContainerHandler, error) { + // TODO(vmarmol): Get from factory. + fsInfo, err := fs.NewFsInfo() + if err != nil { + return nil, err + } + + // Create the cgroup paths. + cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints)) + for key, val := range cgroupSubsystems.MountPoints { + cgroupPaths[key] = path.Join(val, name) + } + + id := ContainerNameToDockerId(name) + stateDir := DockerStateDir() + handler := &dockerContainerHandler{ + id: id, + client: client, + name: name, + machineInfoFactory: machineInfoFactory, + libcontainerConfigPath: path.Join(stateDir, id, "container.json"), + libcontainerStatePath: path.Join(stateDir, id, "state.json"), + libcontainerPidPath: path.Join(stateDir, id, "pid"), + cgroupPaths: cgroupPaths, + cgroup: cgroups.Cgroup{ + Parent: "/", + Name: name, + }, + usesAufsDriver: usesAufsDriver, + fsInfo: fsInfo, + } + handler.storageDirs = append(handler.storageDirs, path.Join(dockerRootDir, pathToAufsDir, id)) + + // We assume that if Inspect fails then the container is not known to docker. + ctnr, err := client.InspectContainer(id) + if err != nil { + return nil, fmt.Errorf("failed to inspect container %q: %v", id, err) + } + handler.creationTime = ctnr.Created + + // Add the name and bare ID as aliases of the container. + handler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, "/")) + handler.aliases = append(handler.aliases, id) + + return handler, nil +} + +func (self *dockerContainerHandler) ContainerReference() (info.ContainerReference, error) { + return info.ContainerReference{ + Name: self.name, + Aliases: self.aliases, + Namespace: DockerNamespace, + }, nil +} + +// TODO(vmarmol): Switch to getting this from libcontainer once we have a solid API. +func (self *dockerContainerHandler) readLibcontainerConfig() (*libcontainer.Config, error) { + out, err := ioutil.ReadFile(self.libcontainerConfigPath) + if err != nil { + return nil, fmt.Errorf("failed to read libcontainer config from %q: %v", self.libcontainerConfigPath, err) + } + var config libcontainer.Config + err = json.Unmarshal(out, &config) + if err != nil { + // TODO(vmarmol): Remove this once it becomes the standard. + // Try to parse the old config. The main difference is that namespaces used to be a map, now it is a slice of structs. + // The JSON marshaler will use the non-nested field before the nested one. + type oldLibcontainerConfig struct { + libcontainer.Config + OldNamespaces map[string]bool `json:"namespaces,omitempty"` + } + var oldConfig oldLibcontainerConfig + err2 := json.Unmarshal(out, &oldConfig) + if err2 != nil { + // Use original error. + return nil, fmt.Errorf("failed to parse libcontainer config at %q: %v", self.libcontainerConfigPath, err) + } + + // Translate the old config into the new config. + config = oldConfig.Config + for ns := range oldConfig.OldNamespaces { + config.Namespaces = append(config.Namespaces, libcontainer.Namespace{ + Type: libcontainer.NamespaceType(ns), + }) + } + } + + // Replace cgroup parent and name with our own since we may be running in a different context. + config.Cgroups.Name = self.cgroup.Name + config.Cgroups.Parent = self.cgroup.Parent + + return &config, nil +} + +func (self *dockerContainerHandler) readLibcontainerState() (state *libcontainer.State, err error) { + // TODO(vmarmol): Remove this once we can depend on a newer Docker. + // Libcontainer changed how its state was stored, try the old way of a "pid" file + if !utils.FileExists(self.libcontainerStatePath) { + if utils.FileExists(self.libcontainerPidPath) { + // We don't need the old state, return an empty state and we'll gracefully degrade. + return &libcontainer.State{}, nil + } + } + f, err := os.Open(self.libcontainerStatePath) + if err != nil { + return nil, fmt.Errorf("failed to open %s - %s\n", self.libcontainerStatePath, err) + } + defer f.Close() + d := json.NewDecoder(f) + retState := new(libcontainer.State) + err = d.Decode(retState) + if err != nil { + return nil, fmt.Errorf("failed to parse libcontainer state at %q: %v", self.libcontainerStatePath, err) + } + state = retState + + // Create cgroup paths if they don't exist. This is since older Docker clients don't write it. + if len(state.CgroupPaths) == 0 { + state.CgroupPaths = self.cgroupPaths + } + + return +} + +func libcontainerConfigToContainerSpec(config *libcontainer.Config, mi *info.MachineInfo) info.ContainerSpec { + var spec info.ContainerSpec + spec.HasMemory = true + spec.Memory.Limit = math.MaxUint64 + spec.Memory.SwapLimit = math.MaxUint64 + if config.Cgroups.Memory > 0 { + spec.Memory.Limit = uint64(config.Cgroups.Memory) + } + if config.Cgroups.MemorySwap > 0 { + spec.Memory.SwapLimit = uint64(config.Cgroups.MemorySwap) + } + + // Get CPU info + spec.HasCpu = true + spec.Cpu.Limit = 1024 + if config.Cgroups.CpuShares != 0 { + spec.Cpu.Limit = uint64(config.Cgroups.CpuShares) + } + spec.Cpu.Mask = utils.FixCpuMask(config.Cgroups.CpusetCpus, mi.NumCores) + + spec.HasNetwork = true + spec.HasDiskIo = true + + return spec +} + +func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) { + mi, err := self.machineInfoFactory.GetMachineInfo() + if err != nil { + return info.ContainerSpec{}, err + } + libcontainerConfig, err := self.readLibcontainerConfig() + if err != nil { + return info.ContainerSpec{}, err + } + + spec := libcontainerConfigToContainerSpec(libcontainerConfig, mi) + spec.CreationTime = self.creationTime + if self.usesAufsDriver { + spec.HasFilesystem = true + } + + return spec, err +} + +func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error { + // No support for non-aufs storage drivers. + if !self.usesAufsDriver { + return nil + } + + // As of now we assume that all the storage dirs are on the same device. + // The first storage dir will be that of the image layers. + deviceInfo, err := self.fsInfo.GetDirFsDevice(self.storageDirs[0]) + if err != nil { + return err + } + + mi, err := self.machineInfoFactory.GetMachineInfo() + if err != nil { + return err + } + var limit uint64 = 0 + // Docker does not impose any filesystem limits for containers. So use capacity as limit. + for _, fs := range mi.Filesystems { + if fs.Device == deviceInfo.Device { + limit = fs.Capacity + break + } + } + + fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit} + + var usage uint64 = 0 + for _, dir := range self.storageDirs { + // TODO(Vishh): Add support for external mounts. + dirUsage, err := self.fsInfo.GetDirUsage(dir) + if err != nil { + return err + } + usage += dirUsage + } + fsStat.Usage = usage + stats.Filesystem = append(stats.Filesystem, fsStat) + + return nil +} + +func (self *dockerContainerHandler) GetStats() (stats *info.ContainerStats, err error) { + state, err := self.readLibcontainerState() + if err != nil { + return nil, err + } + + stats, err = containerLibcontainer.GetStats(self.cgroupPaths, state) + if err != nil { + return stats, err + } + err = self.getFsStats(stats) + if err != nil { + return stats, err + } + + return stats, nil +} + +func (self *dockerContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) { + if self.name != "/docker" { + return []info.ContainerReference{}, nil + } + opt := docker.ListContainersOptions{ + All: true, + } + containers, err := self.client.ListContainers(opt) + if err != nil { + return nil, err + } + + ret := make([]info.ContainerReference, 0, len(containers)+1) + for _, c := range containers { + if !strings.HasPrefix(c.Status, "Up ") { + continue + } + + ref := info.ContainerReference{ + Name: FullContainerName(c.ID), + Aliases: append(c.Names, c.ID), + Namespace: DockerNamespace, + } + ret = append(ret, ref) + } + + return ret, nil +} + +func (self *dockerContainerHandler) GetCgroupPath(resource string) (string, error) { + path, ok := self.cgroupPaths[resource] + if !ok { + return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.name) + } + return path, nil +} + +func (self *dockerContainerHandler) ListThreads(listType container.ListType) ([]int, error) { + return nil, nil +} + +func (self *dockerContainerHandler) ListProcesses(listType container.ListType) ([]int, error) { + return cgroup_fs.GetPids(&self.cgroup) +} + +func (self *dockerContainerHandler) WatchSubcontainers(events chan container.SubcontainerEvent) error { + return fmt.Errorf("watch is unimplemented in the Docker container driver") +} + +func (self *dockerContainerHandler) StopWatchingSubcontainers() error { + // No-op for Docker driver. + return nil +} + +func (self *dockerContainerHandler) Exists() bool { + // We consider the container existing if both libcontainer config and state files exist. + return utils.FileExists(self.libcontainerConfigPath) && utils.FileExists(self.libcontainerStatePath) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/factory.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/factory.go new file mode 100644 index 00000000000..520da9e9bf1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/factory.go @@ -0,0 +1,79 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package container + +import ( + "fmt" + "sync" + + "github.com/golang/glog" +) + +type ContainerHandlerFactory interface { + // Create a new ContainerHandler using this factory. CanHandle() must have returned true. + NewContainerHandler(name string) (ContainerHandler, error) + + // Returns whether this factory can handle the specified container. + CanHandle(name string) (bool, error) + + // Name of the factory. + String() string +} + +// TODO(vmarmol): Consider not making this global. +// Global list of factories. +var ( + factories []ContainerHandlerFactory + factoriesLock sync.RWMutex +) + +// Register a ContainerHandlerFactory. These should be registered from least general to most general +// as they will be asked in order whether they can handle a particular container. +func RegisterContainerHandlerFactory(factory ContainerHandlerFactory) { + factoriesLock.Lock() + defer factoriesLock.Unlock() + + factories = append(factories, factory) +} + +// Create a new ContainerHandler for the specified container. +func NewContainerHandler(name string) (ContainerHandler, error) { + factoriesLock.RLock() + defer factoriesLock.RUnlock() + + // Create the ContainerHandler with the first factory that supports it. + for _, factory := range factories { + canHandle, err := factory.CanHandle(name) + if err != nil { + glog.V(1).Infof("Error trying to work out if we can hande %s: %v", name, err) + } + if canHandle { + glog.V(1).Infof("Using factory %q for container %q", factory, name) + return factory.NewContainerHandler(name) + } else { + glog.V(1).Infof("Factory %q was unable to handle container %q", factory, name) + } + } + + return nil, fmt.Errorf("no known factory can handle creation of container") +} + +// Clear the known factories. +func ClearContainerHandlerFactories() { + factoriesLock.Lock() + defer factoriesLock.Unlock() + + factories = make([]ContainerHandlerFactory, 0, 4) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/factory_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/factory_test.go new file mode 100644 index 00000000000..45b7945d58c --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/factory_test.go @@ -0,0 +1,122 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package container + +import ( + "testing" + + "github.com/stretchr/testify/mock" +) + +type mockContainerHandlerFactory struct { + mock.Mock + Name string + CanHandleValue bool +} + +func (self *mockContainerHandlerFactory) String() string { + return self.Name +} + +func (self *mockContainerHandlerFactory) CanHandle(name string) (bool, error) { + return self.CanHandleValue, nil +} + +func (self *mockContainerHandlerFactory) NewContainerHandler(name string) (ContainerHandler, error) { + args := self.Called(name) + return args.Get(0).(ContainerHandler), args.Error(1) +} + +const testContainerName = "/test" + +var mockFactory FactoryForMockContainerHandler + +func TestNewContainerHandler_FirstMatches(t *testing.T) { + ClearContainerHandlerFactories() + + // Register one allways yes factory. + allwaysYes := &mockContainerHandlerFactory{ + Name: "yes", + CanHandleValue: true, + } + RegisterContainerHandlerFactory(allwaysYes) + + // The yes factory should be asked to create the ContainerHandler. + mockContainer, err := mockFactory.NewContainerHandler(testContainerName) + if err != nil { + t.Error(err) + } + allwaysYes.On("NewContainerHandler", testContainerName).Return(mockContainer, nil) + + cont, err := NewContainerHandler(testContainerName) + if err != nil { + t.Error(err) + } + if cont == nil { + t.Error("Expected container to not be nil") + } +} + +func TestNewContainerHandler_SecondMatches(t *testing.T) { + ClearContainerHandlerFactories() + + // Register one allways no and one always yes factory. + allwaysNo := &mockContainerHandlerFactory{ + Name: "no", + CanHandleValue: false, + } + RegisterContainerHandlerFactory(allwaysNo) + allwaysYes := &mockContainerHandlerFactory{ + Name: "yes", + CanHandleValue: true, + } + RegisterContainerHandlerFactory(allwaysYes) + + // The yes factory should be asked to create the ContainerHandler. + mockContainer, err := mockFactory.NewContainerHandler(testContainerName) + if err != nil { + t.Error(err) + } + allwaysYes.On("NewContainerHandler", testContainerName).Return(mockContainer, nil) + + cont, err := NewContainerHandler(testContainerName) + if err != nil { + t.Error(err) + } + if cont == nil { + t.Error("Expected container to not be nil") + } +} + +func TestNewContainerHandler_NoneMatch(t *testing.T) { + ClearContainerHandlerFactories() + + // Register two allways no factories. + allwaysNo1 := &mockContainerHandlerFactory{ + Name: "no", + CanHandleValue: false, + } + RegisterContainerHandlerFactory(allwaysNo1) + allwaysNo2 := &mockContainerHandlerFactory{ + Name: "no", + CanHandleValue: false, + } + RegisterContainerHandlerFactory(allwaysNo2) + + _, err := NewContainerHandler(testContainerName) + if err == nil { + t.Error("Expected NewContainerHandler to fail") + } +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go new file mode 100644 index 00000000000..e33030c7132 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go @@ -0,0 +1,185 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcontainer + +import ( + "fmt" + "time" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/cgroups" + cgroupfs "github.com/docker/libcontainer/cgroups/fs" + "github.com/docker/libcontainer/network" + info "github.com/google/cadvisor/info/v1" +) + +type CgroupSubsystems struct { + // Cgroup subsystem mounts. + // e.g.: "/sys/fs/cgroup/cpu" -> ["cpu", "cpuacct"] + Mounts []cgroups.Mount + + // Cgroup subsystem to their mount location. + // e.g.: "cpu" -> "/sys/fs/cgroup/cpu" + MountPoints map[string]string +} + +// Get information about the cgroup subsystems. +func GetCgroupSubsystems() (CgroupSubsystems, error) { + // Get all cgroup mounts. + allCgroups, err := cgroups.GetCgroupMounts() + if err != nil { + return CgroupSubsystems{}, err + } + if len(allCgroups) == 0 { + return CgroupSubsystems{}, fmt.Errorf("failed to find cgroup mounts") + } + + // Trim the mounts to only the subsystems we care about. + supportedCgroups := make([]cgroups.Mount, 0, len(allCgroups)) + mountPoints := make(map[string]string, len(allCgroups)) + for _, mount := range allCgroups { + for _, subsystem := range mount.Subsystems { + if _, ok := supportedSubsystems[subsystem]; ok { + supportedCgroups = append(supportedCgroups, mount) + mountPoints[subsystem] = mount.Mountpoint + } + } + } + + return CgroupSubsystems{ + Mounts: supportedCgroups, + MountPoints: mountPoints, + }, nil +} + +// Cgroup subsystems we support listing (should be the minimal set we need stats from). +var supportedSubsystems map[string]struct{} = map[string]struct{}{ + "cpu": {}, + "cpuacct": {}, + "memory": {}, + "cpuset": {}, + "blkio": {}, +} + +// Get stats of the specified container +func GetStats(cgroupPaths map[string]string, state *libcontainer.State) (*info.ContainerStats, error) { + // TODO(vmarmol): Use libcontainer's Stats() in the new API when that is ready. + stats := &libcontainer.ContainerStats{} + + var err error + stats.CgroupStats, err = cgroupfs.GetStats(cgroupPaths) + if err != nil { + return &info.ContainerStats{}, err + } + + stats.NetworkStats, err = network.GetStats(&state.NetworkState) + if err != nil { + return &info.ContainerStats{}, err + } + + return toContainerStats(stats), nil +} + +func DiskStatsCopy(blkio_stats []cgroups.BlkioStatEntry) (stat []info.PerDiskStats) { + if len(blkio_stats) == 0 { + return + } + type DiskKey struct { + Major uint64 + Minor uint64 + } + disk_stat := make(map[DiskKey]*info.PerDiskStats) + for i := range blkio_stats { + major := blkio_stats[i].Major + minor := blkio_stats[i].Minor + disk_key := DiskKey{ + Major: major, + Minor: minor, + } + diskp, ok := disk_stat[disk_key] + if !ok { + disk := info.PerDiskStats{ + Major: major, + Minor: minor, + } + disk.Stats = make(map[string]uint64) + diskp = &disk + disk_stat[disk_key] = diskp + } + op := blkio_stats[i].Op + if op == "" { + op = "Count" + } + diskp.Stats[op] = blkio_stats[i].Value + } + i := 0 + stat = make([]info.PerDiskStats, len(disk_stat)) + for _, disk := range disk_stat { + stat[i] = *disk + i++ + } + return +} + +// Convert libcontainer stats to info.ContainerStats. +func toContainerStats(libcontainerStats *libcontainer.ContainerStats) *info.ContainerStats { + s := libcontainerStats.CgroupStats + ret := new(info.ContainerStats) + ret.Timestamp = time.Now() + + if s != nil { + ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode + ret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode + n := len(s.CpuStats.CpuUsage.PercpuUsage) + ret.Cpu.Usage.PerCpu = make([]uint64, n) + + ret.Cpu.Usage.Total = 0 + for i := 0; i < n; i++ { + ret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i] + ret.Cpu.Usage.Total += s.CpuStats.CpuUsage.PercpuUsage[i] + } + + ret.DiskIo.IoServiceBytes = DiskStatsCopy(s.BlkioStats.IoServiceBytesRecursive) + ret.DiskIo.IoServiced = DiskStatsCopy(s.BlkioStats.IoServicedRecursive) + ret.DiskIo.IoQueued = DiskStatsCopy(s.BlkioStats.IoQueuedRecursive) + ret.DiskIo.Sectors = DiskStatsCopy(s.BlkioStats.SectorsRecursive) + ret.DiskIo.IoServiceTime = DiskStatsCopy(s.BlkioStats.IoServiceTimeRecursive) + ret.DiskIo.IoWaitTime = DiskStatsCopy(s.BlkioStats.IoWaitTimeRecursive) + ret.DiskIo.IoMerged = DiskStatsCopy(s.BlkioStats.IoMergedRecursive) + ret.DiskIo.IoTime = DiskStatsCopy(s.BlkioStats.IoTimeRecursive) + + ret.Memory.Usage = s.MemoryStats.Usage + if v, ok := s.MemoryStats.Stats["pgfault"]; ok { + ret.Memory.ContainerData.Pgfault = v + ret.Memory.HierarchicalData.Pgfault = v + } + if v, ok := s.MemoryStats.Stats["pgmajfault"]; ok { + ret.Memory.ContainerData.Pgmajfault = v + ret.Memory.HierarchicalData.Pgmajfault = v + } + if v, ok := s.MemoryStats.Stats["total_inactive_anon"]; ok { + ret.Memory.WorkingSet = ret.Memory.Usage - v + if v, ok := s.MemoryStats.Stats["total_active_file"]; ok { + ret.Memory.WorkingSet -= v + } + } + } + // TODO(vishh): Perform a deep copy or alias libcontainer network stats. + if libcontainerStats.NetworkStats != nil { + ret.Network = *(*info.NetworkStats)(libcontainerStats.NetworkStats) + } + + return ret +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go new file mode 100644 index 00000000000..1d5498854f2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go @@ -0,0 +1,117 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package container + +import ( + info "github.com/google/cadvisor/info/v1" + "github.com/stretchr/testify/mock" +) + +// This struct mocks a container handler. +type MockContainerHandler struct { + mock.Mock + Name string + Aliases []string +} + +func NewMockContainerHandler(containerName string) *MockContainerHandler { + return &MockContainerHandler{ + Name: containerName, + } +} + +// If self.Name is not empty, then ContainerReference() will return self.Name and self.Aliases. +// Otherwise, it will use the value provided by .On().Return(). +func (self *MockContainerHandler) ContainerReference() (info.ContainerReference, error) { + if len(self.Name) > 0 { + var aliases []string + if len(self.Aliases) > 0 { + aliases = make([]string, len(self.Aliases)) + copy(aliases, self.Aliases) + } + return info.ContainerReference{ + Name: self.Name, + Aliases: aliases, + }, nil + } + args := self.Called() + return args.Get(0).(info.ContainerReference), args.Error(1) +} + +func (self *MockContainerHandler) GetSpec() (info.ContainerSpec, error) { + args := self.Called() + return args.Get(0).(info.ContainerSpec), args.Error(1) +} + +func (self *MockContainerHandler) GetStats() (*info.ContainerStats, error) { + args := self.Called() + return args.Get(0).(*info.ContainerStats), args.Error(1) +} + +func (self *MockContainerHandler) ListContainers(listType ListType) ([]info.ContainerReference, error) { + args := self.Called(listType) + return args.Get(0).([]info.ContainerReference), args.Error(1) +} + +func (self *MockContainerHandler) ListThreads(listType ListType) ([]int, error) { + args := self.Called(listType) + return args.Get(0).([]int), args.Error(1) +} + +func (self *MockContainerHandler) ListProcesses(listType ListType) ([]int, error) { + args := self.Called(listType) + return args.Get(0).([]int), args.Error(1) +} + +func (self *MockContainerHandler) WatchSubcontainers(events chan SubcontainerEvent) error { + args := self.Called(events) + return args.Error(0) +} + +func (self *MockContainerHandler) StopWatchingSubcontainers() error { + args := self.Called() + return args.Error(0) +} + +func (self *MockContainerHandler) Exists() bool { + args := self.Called() + return args.Get(0).(bool) +} + +func (self *MockContainerHandler) GetCgroupPath(path string) (string, error) { + args := self.Called(path) + return args.Get(0).(string), args.Error(1) +} + +type FactoryForMockContainerHandler struct { + Name string + PrepareContainerHandlerFunc func(name string, handler *MockContainerHandler) +} + +func (self *FactoryForMockContainerHandler) String() string { + return self.Name +} + +func (self *FactoryForMockContainerHandler) NewContainerHandler(name string) (ContainerHandler, error) { + handler := &MockContainerHandler{} + if self.PrepareContainerHandlerFunc != nil { + self.PrepareContainerHandlerFunc(name, handler) + } + return handler, nil +} + +func (self *FactoryForMockContainerHandler) CanHandle(name string) bool { + return true +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/container_hints.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/container_hints.go new file mode 100644 index 00000000000..e1d90233e83 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/container_hints.go @@ -0,0 +1,61 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Unmarshal's a Containers description json file. The json file contains +// an array of ContainerHint structs, each with a container's id and networkInterface +// This allows collecting stats about network interfaces configured outside docker +// and lxc +package raw + +import ( + "encoding/json" + "flag" + "io/ioutil" + "os" +) + +var argContainerHints = flag.String("container_hints", "/etc/cadvisor/container_hints.json", "location of the container hints file") + +type containerHints struct { + AllHosts []containerHint `json:"all_hosts,omitempty"` +} + +type containerHint struct { + FullName string `json:"full_path,omitempty"` + NetworkInterface *networkInterface `json:"network_interface,omitempty"` + Mounts []mount `json:"mounts,omitempty"` +} + +type mount struct { + HostDir string `json:"host_dir,omitempty"` + ContainerDir string `json:"container_dir,omitempty"` +} + +type networkInterface struct { + VethHost string `json:"veth_host,omitempty"` + VethChild string `json:"veth_child,omitempty"` +} + +func getContainerHintsFromFile(containerHintsFile string) (containerHints, error) { + dat, err := ioutil.ReadFile(containerHintsFile) + if os.IsNotExist(err) { + return containerHints{}, nil + } + var cHints containerHints + if err == nil { + err = json.Unmarshal(dat, &cHints) + } + + return cHints, err +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/container_hints_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/container_hints_test.go new file mode 100644 index 00000000000..83d32e74086 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/container_hints_test.go @@ -0,0 +1,57 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raw + +import ( + "testing" +) + +func TestGetContainerHintsFromFile(t *testing.T) { + cHints, err := getContainerHintsFromFile("test_resources/container_hints.json") + + if err != nil { + t.Fatalf("Error in unmarshalling: %s", err) + } + + if cHints.AllHosts[0].NetworkInterface.VethHost != "veth24031eth1" && + cHints.AllHosts[0].NetworkInterface.VethChild != "eth1" { + t.Errorf("Cannot find network interface in %s", cHints) + } + + correctMountDirs := [...]string{ + "/var/run/nm-sdc1", + "/var/run/nm-sdb3", + "/var/run/nm-sda3", + "/var/run/netns/root", + "/var/run/openvswitch/db.sock", + } + + if len(cHints.AllHosts[0].Mounts) == 0 { + t.Errorf("Cannot find any mounts") + } + + for i, mountDir := range cHints.AllHosts[0].Mounts { + if correctMountDirs[i] != mountDir.HostDir { + t.Errorf("Cannot find mount %s in %s", mountDir.HostDir, cHints) + } + } +} + +func TestFileNotExist(t *testing.T) { + _, err := getContainerHintsFromFile("/file_does_not_exist.json") + if err != nil { + t.Fatalf("getContainerHintsFromFile must not error for blank file: %s", err) + } +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/factory.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/factory.go new file mode 100644 index 00000000000..4cd918f37c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/factory.go @@ -0,0 +1,63 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raw + +import ( + "fmt" + + "github.com/golang/glog" + "github.com/google/cadvisor/container" + "github.com/google/cadvisor/container/libcontainer" + info "github.com/google/cadvisor/info/v1" +) + +type rawFactory struct { + // Factory for machine information. + machineInfoFactory info.MachineInfoFactory + + // Information about the cgroup subsystems. + cgroupSubsystems *libcontainer.CgroupSubsystems +} + +func (self *rawFactory) String() string { + return "raw" +} + +func (self *rawFactory) NewContainerHandler(name string) (container.ContainerHandler, error) { + return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory) +} + +// The raw factory can handle any container. +func (self *rawFactory) CanHandle(name string) (bool, error) { + return true, nil +} + +func Register(machineInfoFactory info.MachineInfoFactory) error { + cgroupSubsystems, err := libcontainer.GetCgroupSubsystems() + if err != nil { + return fmt.Errorf("failed to get cgroup subsystems: %v", err) + } + if len(cgroupSubsystems.Mounts) == 0 { + return fmt.Errorf("failed to find supported cgroup mounts for the raw factory") + } + + glog.Infof("Registering Raw factory") + factory := &rawFactory{ + machineInfoFactory: machineInfoFactory, + cgroupSubsystems: &cgroupSubsystems, + } + container.RegisterContainerHandlerFactory(factory) + return nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go new file mode 100644 index 00000000000..d5781e76a01 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go @@ -0,0 +1,570 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Handler for "raw" containers. +package raw + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + "strings" + "time" + + "code.google.com/p/go.exp/inotify" + dockerlibcontainer "github.com/docker/libcontainer" + "github.com/docker/libcontainer/cgroups" + cgroup_fs "github.com/docker/libcontainer/cgroups/fs" + "github.com/docker/libcontainer/network" + "github.com/golang/glog" + "github.com/google/cadvisor/container" + "github.com/google/cadvisor/container/libcontainer" + "github.com/google/cadvisor/fs" + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/utils" + "github.com/google/cadvisor/utils/sysinfo" +) + +type rawContainerHandler struct { + // Name of the container for this handler. + name string + cgroup *cgroups.Cgroup + cgroupSubsystems *libcontainer.CgroupSubsystems + machineInfoFactory info.MachineInfoFactory + + // Inotify event watcher. + watcher *inotify.Watcher + + // Signal for watcher thread to stop. + stopWatcher chan error + + // Containers being watched for new subcontainers. + watches map[string]struct{} + + // Cgroup paths being watchd for new subcontainers + cgroupWatches map[string]struct{} + + // Absolute path to the cgroup hierarchies of this container. + // (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test") + cgroupPaths map[string]string + + // Equivalent libcontainer state for this container. + libcontainerState dockerlibcontainer.State + + // Whether this container has network isolation enabled. + hasNetwork bool + + fsInfo fs.FsInfo + externalMounts []mount +} + +func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory) (container.ContainerHandler, error) { + // Create the cgroup paths. + cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints)) + for key, val := range cgroupSubsystems.MountPoints { + cgroupPaths[key] = path.Join(val, name) + } + + // TODO(vmarmol): Get from factory. + fsInfo, err := fs.NewFsInfo() + if err != nil { + return nil, err + } + cHints, err := getContainerHintsFromFile(*argContainerHints) + if err != nil { + return nil, err + } + + // Generate the equivalent libcontainer state for this container. + libcontainerState := dockerlibcontainer.State{ + CgroupPaths: cgroupPaths, + } + + hasNetwork := false + var externalMounts []mount + for _, container := range cHints.AllHosts { + if name == container.FullName { + libcontainerState.NetworkState = network.NetworkState{ + VethHost: container.NetworkInterface.VethHost, + VethChild: container.NetworkInterface.VethChild, + } + hasNetwork = true + externalMounts = container.Mounts + break + } + } + + return &rawContainerHandler{ + name: name, + cgroup: &cgroups.Cgroup{ + Parent: "/", + Name: name, + }, + cgroupSubsystems: cgroupSubsystems, + machineInfoFactory: machineInfoFactory, + stopWatcher: make(chan error), + watches: make(map[string]struct{}), + cgroupWatches: make(map[string]struct{}), + cgroupPaths: cgroupPaths, + libcontainerState: libcontainerState, + fsInfo: fsInfo, + hasNetwork: hasNetwork, + externalMounts: externalMounts, + }, nil +} + +func (self *rawContainerHandler) ContainerReference() (info.ContainerReference, error) { + // We only know the container by its one name. + return info.ContainerReference{ + Name: self.name, + }, nil +} + +func readString(dirpath string, file string) string { + cgroupFile := path.Join(dirpath, file) + + // Ignore non-existent files + if !utils.FileExists(cgroupFile) { + return "" + } + + // Read + out, err := ioutil.ReadFile(cgroupFile) + if err != nil { + glog.Errorf("raw driver: Failed to read %q: %s", cgroupFile, err) + return "" + } + return strings.TrimSpace(string(out)) +} + +func readInt64(dirpath string, file string) uint64 { + out := readString(dirpath, file) + if out == "" { + return 0 + } + + val, err := strconv.ParseUint(out, 10, 64) + if err != nil { + glog.Errorf("raw driver: Failed to parse int %q from file %q: %s", out, path.Join(dirpath, file), err) + return 0 + } + + return val +} + +func (self *rawContainerHandler) GetRootNetworkDevices() ([]info.NetInfo, error) { + nd := []info.NetInfo{} + if self.name == "/" { + mi, err := self.machineInfoFactory.GetMachineInfo() + if err != nil { + return nd, err + } + return mi.NetworkDevices, nil + } + return nd, nil +} + +func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) { + var spec info.ContainerSpec + + // The raw driver assumes unified hierarchy containers. + + // Get the lowest creation time from all hierarchies as the container creation time. + now := time.Now() + lowestTime := now + for _, cgroupPath := range self.cgroupPaths { + // The modified time of the cgroup directory is when the container was created. + fi, err := os.Stat(cgroupPath) + if err == nil && fi.ModTime().Before(lowestTime) { + lowestTime = fi.ModTime() + } + } + if lowestTime != now { + spec.CreationTime = lowestTime + } + + // Get machine info. + mi, err := self.machineInfoFactory.GetMachineInfo() + if err != nil { + return spec, err + } + + // CPU. + cpuRoot, ok := self.cgroupPaths["cpu"] + if ok { + if utils.FileExists(cpuRoot) { + spec.HasCpu = true + spec.Cpu.Limit = readInt64(cpuRoot, "cpu.shares") + } + } + + // Cpu Mask. + // This will fail for non-unified hierarchies. We'll return the whole machine mask in that case. + cpusetRoot, ok := self.cgroupPaths["cpuset"] + if ok { + if utils.FileExists(cpusetRoot) { + spec.HasCpu = true + mask := readString(cpusetRoot, "cpuset.cpus") + spec.Cpu.Mask = utils.FixCpuMask(mask, mi.NumCores) + } + } + + // Memory. + memoryRoot, ok := self.cgroupPaths["memory"] + if ok { + if utils.FileExists(memoryRoot) { + spec.HasMemory = true + spec.Memory.Limit = readInt64(memoryRoot, "memory.limit_in_bytes") + spec.Memory.SwapLimit = readInt64(memoryRoot, "memory.memsw.limit_in_bytes") + } + } + + // Fs. + if self.name == "/" || self.externalMounts != nil { + spec.HasFilesystem = true + } + + //Network + spec.HasNetwork = self.hasNetwork + + // DiskIo. + if blkioRoot, ok := self.cgroupPaths["blkio"]; ok && utils.FileExists(blkioRoot) { + spec.HasDiskIo = true + } + + // Check physical network devices for root container. + nd, err := self.GetRootNetworkDevices() + if err != nil { + return spec, err + } + if len(nd) != 0 { + spec.HasNetwork = true + } + return spec, nil +} + +func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error { + // Get Filesystem information only for the root cgroup. + if self.name == "/" { + filesystems, err := self.fsInfo.GetGlobalFsInfo() + if err != nil { + return err + } + for _, fs := range filesystems { + stats.Filesystem = append(stats.Filesystem, + info.FsStats{ + Device: fs.Device, + Limit: fs.Capacity, + Usage: fs.Capacity - fs.Free, + ReadsCompleted: fs.DiskStats.ReadsCompleted, + ReadsMerged: fs.DiskStats.ReadsMerged, + SectorsRead: fs.DiskStats.SectorsRead, + ReadTime: fs.DiskStats.ReadTime, + WritesCompleted: fs.DiskStats.WritesCompleted, + WritesMerged: fs.DiskStats.WritesMerged, + SectorsWritten: fs.DiskStats.SectorsWritten, + WriteTime: fs.DiskStats.WriteTime, + IoInProgress: fs.DiskStats.IoInProgress, + IoTime: fs.DiskStats.IoTime, + WeightedIoTime: fs.DiskStats.WeightedIoTime, + }) + } + } else if len(self.externalMounts) > 0 { + var mountSet map[string]struct{} + mountSet = make(map[string]struct{}) + for _, mount := range self.externalMounts { + mountSet[mount.HostDir] = struct{}{} + } + filesystems, err := self.fsInfo.GetFsInfoForPath(mountSet) + if err != nil { + return err + } + for _, fs := range filesystems { + stats.Filesystem = append(stats.Filesystem, + info.FsStats{ + Device: fs.Device, + Limit: fs.Capacity, + Usage: fs.Capacity - fs.Free, + ReadsCompleted: fs.DiskStats.ReadsCompleted, + ReadsMerged: fs.DiskStats.ReadsMerged, + SectorsRead: fs.DiskStats.SectorsRead, + ReadTime: fs.DiskStats.ReadTime, + WritesCompleted: fs.DiskStats.WritesCompleted, + WritesMerged: fs.DiskStats.WritesMerged, + SectorsWritten: fs.DiskStats.SectorsWritten, + WriteTime: fs.DiskStats.WriteTime, + IoInProgress: fs.DiskStats.IoInProgress, + IoTime: fs.DiskStats.IoTime, + WeightedIoTime: fs.DiskStats.WeightedIoTime, + }) + } + } + return nil +} + +func (self *rawContainerHandler) GetStats() (*info.ContainerStats, error) { + stats, err := libcontainer.GetStats(self.cgroupPaths, &self.libcontainerState) + if err != nil { + return stats, err + } + + err = self.getFsStats(stats) + if err != nil { + return stats, err + } + + // Fill in network stats for root. + nd, err := self.GetRootNetworkDevices() + if err != nil { + return stats, err + } + if len(nd) != 0 { + // ContainerStats only reports stat for one network device. + // TODO(rjnagal): Handle multiple physical network devices. + stats.Network, err = sysinfo.GetNetworkStats(nd[0].Name) + if err != nil { + return stats, err + } + } + return stats, nil +} + +func (self *rawContainerHandler) GetCgroupPath(resource string) (string, error) { + path, ok := self.cgroupPaths[resource] + if !ok { + return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.name) + } + return path, nil +} + +// Lists all directories under "path" and outputs the results as children of "parent". +func listDirectories(dirpath string, parent string, recursive bool, output map[string]struct{}) error { + // Ignore if this hierarchy does not exist. + if !utils.FileExists(dirpath) { + return nil + } + + entries, err := ioutil.ReadDir(dirpath) + if err != nil { + return err + } + for _, entry := range entries { + // We only grab directories. + if entry.IsDir() { + name := path.Join(parent, entry.Name()) + output[name] = struct{}{} + + // List subcontainers if asked to. + if recursive { + err := listDirectories(path.Join(dirpath, entry.Name()), name, true, output) + if err != nil { + return err + } + } + } + } + return nil +} + +func (self *rawContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) { + containers := make(map[string]struct{}) + for _, cgroupPath := range self.cgroupPaths { + err := listDirectories(cgroupPath, self.name, listType == container.ListRecursive, containers) + if err != nil { + return nil, err + } + } + + // Make into container references. + ret := make([]info.ContainerReference, 0, len(containers)) + for cont := range containers { + ret = append(ret, info.ContainerReference{ + Name: cont, + }) + } + + return ret, nil +} + +func (self *rawContainerHandler) ListThreads(listType container.ListType) ([]int, error) { + // TODO(vmarmol): Implement + return nil, nil +} + +func (self *rawContainerHandler) ListProcesses(listType container.ListType) ([]int, error) { + return cgroup_fs.GetPids(self.cgroup) +} + +func (self *rawContainerHandler) watchDirectory(dir string, containerName string) error { + err := self.watcher.AddWatch(dir, inotify.IN_CREATE|inotify.IN_DELETE|inotify.IN_MOVE) + if err != nil { + return err + } + self.watches[containerName] = struct{}{} + self.cgroupWatches[dir] = struct{}{} + + // TODO(vmarmol): We should re-do this once we're done to ensure directories were not added in the meantime. + // Watch subdirectories as well. + entries, err := ioutil.ReadDir(dir) + if err != nil { + return err + } + for _, entry := range entries { + if entry.IsDir() { + err = self.watchDirectory(path.Join(dir, entry.Name()), path.Join(containerName, entry.Name())) + if err != nil { + return err + } + } + } + return nil +} + +func (self *rawContainerHandler) processEvent(event *inotify.Event, events chan container.SubcontainerEvent) error { + // Convert the inotify event type to a container create or delete. + var eventType container.SubcontainerEventType + switch { + case (event.Mask & inotify.IN_CREATE) > 0: + eventType = container.SubcontainerAdd + case (event.Mask & inotify.IN_DELETE) > 0: + eventType = container.SubcontainerDelete + case (event.Mask & inotify.IN_MOVED_FROM) > 0: + eventType = container.SubcontainerDelete + case (event.Mask & inotify.IN_MOVED_TO) > 0: + eventType = container.SubcontainerAdd + default: + // Ignore other events. + return nil + } + + // Derive the container name from the path name. + var containerName string + for _, mount := range self.cgroupSubsystems.Mounts { + mountLocation := path.Clean(mount.Mountpoint) + "/" + if strings.HasPrefix(event.Name, mountLocation) { + containerName = event.Name[len(mountLocation)-1:] + break + } + } + if containerName == "" { + return fmt.Errorf("unable to detect container from watch event on directory %q", event.Name) + } + + // Maintain the watch for the new or deleted container. + switch { + case eventType == container.SubcontainerAdd: + _, alreadyWatched := self.watches[containerName] + + // New container was created, watch it. + err := self.watchDirectory(event.Name, containerName) + if err != nil { + return err + } + + // Only report container creation once. + if alreadyWatched { + return nil + } + case eventType == container.SubcontainerDelete: + // Container was deleted, stop watching for it. Only delete the event if we registered it. + if _, ok := self.cgroupWatches[event.Name]; ok { + err := self.watcher.RemoveWatch(event.Name) + if err != nil { + return err + } + delete(self.cgroupWatches, event.Name) + } + + // Only report container deletion once. + if _, ok := self.watches[containerName]; !ok { + return nil + } + delete(self.watches, containerName) + default: + return fmt.Errorf("unknown event type %v", eventType) + } + + // Deliver the event. + events <- container.SubcontainerEvent{ + EventType: eventType, + Name: containerName, + } + + return nil +} + +func (self *rawContainerHandler) WatchSubcontainers(events chan container.SubcontainerEvent) error { + // Lazily initialize the watcher so we don't use it when not asked to. + if self.watcher == nil { + w, err := inotify.NewWatcher() + if err != nil { + return err + } + self.watcher = w + } + + // Watch this container (all its cgroups) and all subdirectories. + for _, cgroupPath := range self.cgroupPaths { + err := self.watchDirectory(cgroupPath, self.name) + if err != nil { + return err + } + } + + // Process the events received from the kernel. + go func() { + for { + select { + case event := <-self.watcher.Event: + err := self.processEvent(event, events) + if err != nil { + glog.Warningf("Error while processing event (%+v): %v", event, err) + } + case err := <-self.watcher.Error: + glog.Warningf("Error while watching %q:", self.name, err) + case <-self.stopWatcher: + err := self.watcher.Close() + if err == nil { + self.stopWatcher <- err + self.watcher = nil + return + } + } + } + }() + + return nil +} + +func (self *rawContainerHandler) StopWatchingSubcontainers() error { + if self.watcher == nil { + return fmt.Errorf("can't stop watch that has not started for container %q", self.name) + } + + // Rendezvous with the watcher thread. + self.stopWatcher <- nil + return <-self.stopWatcher +} + +func (self *rawContainerHandler) Exists() bool { + // If any cgroup exists, the container is still alive. + for _, cgroupPath := range self.cgroupPaths { + if utils.FileExists(cgroupPath) { + return true + } + } + return false +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/test_resources/container_hints.json b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/test_resources/container_hints.json new file mode 100644 index 00000000000..d8842e5539b --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/test_resources/container_hints.json @@ -0,0 +1,40 @@ +{ + "name": "Container Hints", + "description": "Container hints file", + "all_hosts": [ + { + "network_interface": { + "veth_child": "eth1", + "veth_host": "veth24031eth1" + }, + "mounts": [ + { + "host_dir": "/var/run/nm-sdc1", + "container_dir": "/var/run/nm-sdc1", + "permission": "rw" + }, + { + "host_dir": "/var/run/nm-sdb3", + "container_dir": "/var/run/nm-sdb3", + "permission": "rw" + }, + { + "host_dir": "/var/run/nm-sda3", + "container_dir": "/var/run/nm-sda3", + "permission": "rw" + }, + { + "host_dir": "/var/run/netns/root", + "container_dir": "/var/run/netns/root", + "permission": "ro" + }, + { + "host_dir": "/var/run/openvswitch/db.sock", + "container_dir": "/var/run/openvswitch/db.sock", + "permission": "rw" + } + ], + "full_path": "18a4585950db428e4d5a65c216a5d708d241254709626f4cb300ee963fb4b144" + } + ] +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/events/handler.go b/Godeps/_workspace/src/github.com/google/cadvisor/events/handler.go new file mode 100644 index 00000000000..eb586e53071 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/events/handler.go @@ -0,0 +1,279 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package events + +import ( + "errors" + "sort" + "strings" + "sync" + "time" +) + +// EventManager is implemented by Events. It provides two ways to monitor +// events and one way to add events +type EventManager interface { + // Watch checks if events fed to it by the caller of AddEvent satisfy the + // request and if so sends the event back to the caller on outChannel + WatchEvents(outChannel chan *Event, request *Request) error + // GetEvents() returns a slice of all events detected that have passed + // the *Request object parameters to the caller + GetEvents(request *Request) (EventSlice, error) + // AddEvent allows the caller to add an event to an EventManager + // object + AddEvent(e *Event) error +} + +// Events holds a slice of *Event objects with a potential field +// that caps the number of events held. It is an implementation of the +// EventManager interface +type events struct { + // eventlist holds the complete set of events found over an + // EventManager events instantiation. + eventlist EventSlice + // the slice of watch pointers allows the EventManager access to channels + // linked to different calls of WatchEvents. When new events are found that + // satisfy the request of a given watch object in watchers, the event + // is sent over the channel to that caller of WatchEvents + watchers []*watch + // lock that blocks eventlist from being accessed until a writer releases it + eventsLock sync.RWMutex + // lock that blocks watchers from being accessed until a writer releases it + watcherLock sync.RWMutex +} + +// initialized by a call to WatchEvents(), a watch struct will then be added +// to the events slice of *watch objects. When AddEvent() finds an event that +// satisfies the request parameter of a watch object in events.watchers, +// it will send that event out over the watch object's channel. The caller that +// called WatchEvents will receive the event over the channel provided to +// WatchEvents +type watch struct { + // request specifies all the parameters that events sent through the + // channel must satisfy. Specified by the creator of the watch object + request *Request + // a channel created by the caller through which events satisfying the + // request are sent to the caller + channel chan *Event +} + +// typedef of a slice of Event pointers +type EventSlice []*Event + +// Event contains information general to events such as the time at which they +// occurred, their specific type, and the actual event. Event types are +// differentiated by the EventType field of Event. +type Event struct { + // the absolute container name for which the event occurred + ContainerName string + // the time at which the event occurred + Timestamp time.Time + // the type of event. EventType is an enumerated type + EventType EventType + // the original event object and all of its extraneous data, ex. an + // OomInstance + EventData EventDataInterface +} + +// Request holds a set of parameters by which Event objects may be screened. +// The caller may want events that occurred within a specific timeframe +// or of a certain type, which may be specified in the *Request object +// they pass to an EventManager function +type Request struct { + // events falling before StartTime do not satisfy the request. StartTime + // must be left blank in calls to WatchEvents + StartTime time.Time + // events falling after EndTime do not satisfy the request. EndTime + // must be left blank in calls to WatchEvents + EndTime time.Time + // EventType is a map that specifies the type(s) of events wanted + EventType map[EventType]bool + // allows the caller to put a limit on how many + // events they receive. If there are more events than MaxEventsReturned + // then the most chronologically recent events in the time period + // specified are returned. Must be >= 1 + MaxEventsReturned int + // the absolute container name for which the event occurred + ContainerName string + // if IncludeSubcontainers is false, only events occurring in the specific + // container, and not the subcontainers, will be returned + IncludeSubcontainers bool +} + +// EventType is an enumerated type which lists the categories under which +// events may fall. The Event field EventType is populated by this enum. +type EventType int + +const ( + TypeOom EventType = iota + TypeContainerCreation + TypeContainerDeletion +) + +// a general interface which populates the Event field EventData. The actual +// object, such as an OomInstance, is set as an Event's EventData +type EventDataInterface interface { +} + +// returns a pointer to an initialized Events object +func NewEventManager() *events { + return &events{ + eventlist: make(EventSlice, 0), + watchers: []*watch{}, + } +} + +// returns a pointer to an initialized Request object +func NewRequest() *Request { + return &Request{ + EventType: map[EventType]bool{}, + IncludeSubcontainers: false, + } +} + +// returns a pointer to an initialized watch object +func newWatch(request *Request, outChannel chan *Event) *watch { + return &watch{ + request: request, + channel: outChannel, + } +} + +// function necessary to implement the sort interface on the Events struct +func (e EventSlice) Len() int { + return len(e) +} + +// function necessary to implement the sort interface on the Events struct +func (e EventSlice) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +// function necessary to implement the sort interface on the Events struct +func (e EventSlice) Less(i, j int) bool { + return e[i].Timestamp.Before(e[j].Timestamp) +} + +// sorts and returns up to the last MaxEventsReturned chronological elements +func getMaxEventsReturned(request *Request, eSlice EventSlice) EventSlice { + sort.Sort(eSlice) + n := request.MaxEventsReturned + if n >= eSlice.Len() || n <= 0 { + return eSlice + } + return eSlice[eSlice.Len()-n:] +} + +// If the request wants all subcontainers, this returns if the request's +// container path is a prefix of the event container path. Otherwise, +// it checks that the container paths of the event and request are +// equivalent +func checkIfIsSubcontainer(request *Request, event *Event) bool { + if request.IncludeSubcontainers == true { + return strings.HasPrefix(event.ContainerName+"/", request.ContainerName+"/") + } + return event.ContainerName == request.ContainerName +} + +// determines if an event occurs within the time set in the request object and is the right type +func checkIfEventSatisfiesRequest(request *Request, event *Event) bool { + startTime := request.StartTime + endTime := request.EndTime + eventTime := event.Timestamp + if !startTime.IsZero() { + if startTime.After(eventTime) { + return false + } + } + if !endTime.IsZero() { + if endTime.Before(eventTime) { + return false + } + } + if request.EventType[event.EventType] != true { + return false + } + if request.ContainerName != "" { + return checkIfIsSubcontainer(request, event) + } + return true +} + +// method of Events object that screens Event objects found in the eventlist +// attribute and if they fit the parameters passed by the Request object, +// adds it to a slice of *Event objects that is returned. If both MaxEventsReturned +// and StartTime/EndTime are specified in the request object, then only +// up to the most recent MaxEventsReturned events in that time range are returned. +func (self *events) GetEvents(request *Request) (EventSlice, error) { + returnEventList := EventSlice{} + self.eventsLock.RLock() + defer self.eventsLock.RUnlock() + for _, e := range self.eventlist { + if checkIfEventSatisfiesRequest(request, e) { + returnEventList = append(returnEventList, e) + } + } + returnEventList = getMaxEventsReturned(request, returnEventList) + return returnEventList, nil +} + +// method of Events object that maintains an *Event channel passed by the user. +// When an event is added by AddEvents that satisfies the parameters in the passed +// Request object it is fed to the channel. The StartTime and EndTime of the watch +// request should be uninitialized because the purpose is to watch indefinitely +// for events that will happen in the future +func (self *events) WatchEvents(outChannel chan *Event, request *Request) error { + if !request.StartTime.IsZero() || !request.EndTime.IsZero() { + return errors.New( + "for a call to watch, request.StartTime and request.EndTime must be uninitialized") + } + newWatcher := newWatch(request, outChannel) + self.watcherLock.Lock() + defer self.watcherLock.Unlock() + self.watchers = append(self.watchers, newWatcher) + return nil +} + +// helper function to update the event manager's eventlist +func (self *events) updateEventList(e *Event) { + self.eventsLock.Lock() + defer self.eventsLock.Unlock() + self.eventlist = append(self.eventlist, e) +} + +func (self *events) findValidWatchers(e *Event) []*watch { + watchesToSend := make([]*watch, 0) + self.watcherLock.RLock() + defer self.watcherLock.RUnlock() + for _, watcher := range self.watchers { + watchRequest := watcher.request + if checkIfEventSatisfiesRequest(watchRequest, e) { + watchesToSend = append(watchesToSend, watcher) + } + } + return watchesToSend +} + +// method of Events object that adds the argument Event object to the +// eventlist. It also feeds the event to a set of watch channels +// held by the manager if it satisfies the request keys of the channels +func (self *events) AddEvent(e *Event) error { + self.updateEventList(e) + watchesToSend := self.findValidWatchers(e) + for _, watchObject := range watchesToSend { + watchObject.channel <- e + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/events/handler_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/events/handler_test.go new file mode 100644 index 00000000000..c1bf4caca58 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/events/handler_test.go @@ -0,0 +1,190 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package events + +import ( + "testing" + "time" +) + +func createOldTime(t *testing.T) time.Time { + const longForm = "Jan 2, 2006 at 3:04pm (MST)" + linetime, err := time.Parse(longForm, "Feb 3, 2013 at 7:54pm (PST)") + if err != nil { + t.Fatalf("could not format time.Time object") + } else { + return linetime + } + return time.Now() +} + +// used to convert an OomInstance to an Event object +func makeEvent(inTime time.Time, containerName string) *Event { + return &Event{ + ContainerName: containerName, + Timestamp: inTime, + EventType: TypeOom, + } +} + +// returns EventManager and Request to use in tests +func initializeScenario(t *testing.T) (*events, *Request, *Event, *Event) { + fakeEvent := makeEvent(createOldTime(t), "/") + fakeEvent2 := makeEvent(time.Now(), "/") + + return NewEventManager(), NewRequest(), fakeEvent, fakeEvent2 +} + +func checkNumberOfEvents(t *testing.T, numEventsExpected int, numEventsReceived int) { + if numEventsReceived != numEventsExpected { + t.Fatalf("Expected to return %v events but received %v", + numEventsExpected, numEventsReceived) + } +} + +func ensureProperEventReturned(t *testing.T, expectedEvent *Event, eventObjectFound *Event) { + if eventObjectFound != expectedEvent { + t.Errorf("Expected to find test object %v but found a different object: %v", + expectedEvent, eventObjectFound) + } +} + +func TestCheckIfIsSubcontainer(t *testing.T) { + myRequest := NewRequest() + myRequest.ContainerName = "/root" + + sameContainerEvent := &Event{ + ContainerName: "/root", + } + subContainerEvent := &Event{ + ContainerName: "/root/subdir", + } + differentContainerEvent := &Event{ + ContainerName: "/root-completely-different-container", + } + + if !checkIfIsSubcontainer(myRequest, sameContainerEvent) { + t.Errorf("should have found %v and %v had the same container name", + myRequest, sameContainerEvent) + } + if checkIfIsSubcontainer(myRequest, subContainerEvent) { + t.Errorf("should have found %v and %v had different containers", + myRequest, subContainerEvent) + } + + myRequest.IncludeSubcontainers = true + + if !checkIfIsSubcontainer(myRequest, sameContainerEvent) { + t.Errorf("should have found %v and %v had the same container", + myRequest.ContainerName, sameContainerEvent.ContainerName) + } + if !checkIfIsSubcontainer(myRequest, subContainerEvent) { + t.Errorf("should have found %v was a subcontainer of %v", + subContainerEvent.ContainerName, myRequest.ContainerName) + } + if checkIfIsSubcontainer(myRequest, differentContainerEvent) { + t.Errorf("should have found %v and %v had different containers", + myRequest.ContainerName, differentContainerEvent.ContainerName) + } +} + +func TestWatchEventsDetectsNewEvents(t *testing.T) { + myEventHolder, myRequest, fakeEvent, fakeEvent2 := initializeScenario(t) + myRequest.EventType[TypeOom] = true + outChannel := make(chan *Event, 10) + myEventHolder.WatchEvents(outChannel, myRequest) + + myEventHolder.AddEvent(fakeEvent) + myEventHolder.AddEvent(fakeEvent2) + + startTime := time.Now() + go func() { + time.Sleep(5 * time.Second) + if time.Since(startTime) > (5 * time.Second) { + t.Errorf("Took too long to receive all the events") + close(outChannel) + } + }() + + eventsFound := 0 + go func() { + for event := range outChannel { + eventsFound += 1 + if eventsFound == 1 { + ensureProperEventReturned(t, fakeEvent, event) + } else if eventsFound == 2 { + ensureProperEventReturned(t, fakeEvent2, event) + close(outChannel) + break + } + } + }() +} + +func TestAddEventAddsEventsToEventManager(t *testing.T) { + myEventHolder, _, fakeEvent, _ := initializeScenario(t) + + myEventHolder.AddEvent(fakeEvent) + + checkNumberOfEvents(t, 1, myEventHolder.eventlist.Len()) + ensureProperEventReturned(t, fakeEvent, myEventHolder.eventlist[0]) +} + +func TestGetEventsForOneEvent(t *testing.T) { + myEventHolder, myRequest, fakeEvent, fakeEvent2 := initializeScenario(t) + myRequest.MaxEventsReturned = 1 + myRequest.EventType[TypeOom] = true + + myEventHolder.AddEvent(fakeEvent) + myEventHolder.AddEvent(fakeEvent2) + + receivedEvents, err := myEventHolder.GetEvents(myRequest) + if err != nil { + t.Errorf("Failed to GetEvents: %v", err) + } + checkNumberOfEvents(t, 1, receivedEvents.Len()) + ensureProperEventReturned(t, fakeEvent2, receivedEvents[0]) +} + +func TestGetEventsForTimePeriod(t *testing.T) { + myEventHolder, myRequest, fakeEvent, fakeEvent2 := initializeScenario(t) + myRequest.StartTime = createOldTime(t).Add(-1 * time.Second * 10) + myRequest.EndTime = createOldTime(t).Add(time.Second * 10) + myRequest.EventType[TypeOom] = true + + myEventHolder.AddEvent(fakeEvent) + myEventHolder.AddEvent(fakeEvent2) + + receivedEvents, err := myEventHolder.GetEvents(myRequest) + if err != nil { + t.Errorf("Failed to GetEvents: %v", err) + } + + checkNumberOfEvents(t, 1, receivedEvents.Len()) + ensureProperEventReturned(t, fakeEvent, receivedEvents[0]) +} + +func TestGetEventsForNoTypeRequested(t *testing.T) { + myEventHolder, myRequest, fakeEvent, fakeEvent2 := initializeScenario(t) + + myEventHolder.AddEvent(fakeEvent) + myEventHolder.AddEvent(fakeEvent2) + + receivedEvents, err := myEventHolder.GetEvents(myRequest) + if err != nil { + t.Errorf("Failed to GetEvents: %v", err) + } + checkNumberOfEvents(t, 0, receivedEvents.Len()) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/fs/fs.go b/Godeps/_workspace/src/github.com/google/cadvisor/fs/fs.go new file mode 100644 index 00000000000..2869f72ce48 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/fs/fs.go @@ -0,0 +1,210 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +// Provides Filesystem Stats +package fs + +/* + extern int getBytesFree(const char *path, unsigned long long *bytes); + extern int getBytesTotal(const char *path, unsigned long long *bytes); +*/ +import "C" + +import ( + "bufio" + "fmt" + "os" + "os/exec" + "path" + "regexp" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/mount" + "github.com/golang/glog" +) + +var partitionRegex = regexp.MustCompile("^(:?(:?s|xv)d[a-z]+\\d*|dm-\\d+)$") + +type partition struct { + mountpoint string + major uint + minor uint +} + +type RealFsInfo struct { + partitions map[string]partition +} + +func NewFsInfo() (FsInfo, error) { + mounts, err := mount.GetMounts() + if err != nil { + return nil, err + } + partitions := make(map[string]partition, 0) + for _, mount := range mounts { + if !strings.HasPrefix(mount.Fstype, "ext") && mount.Fstype != "btrfs" { + continue + } + // Avoid bind mounts. + if _, ok := partitions[mount.Source]; ok { + continue + } + partitions[mount.Source] = partition{mount.Mountpoint, uint(mount.Major), uint(mount.Minor)} + } + glog.Infof("Filesystem partitions: %+v", partitions) + return &RealFsInfo{partitions}, nil +} + +func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, error) { + filesystems := make([]Fs, 0) + deviceSet := make(map[string]struct{}) + diskStatsMap, err := getDiskStatsMap("/proc/diskstats") + if err != nil { + return nil, err + } + for device, partition := range self.partitions { + _, hasMount := mountSet[partition.mountpoint] + _, hasDevice := deviceSet[device] + if mountSet == nil || (hasMount && !hasDevice) { + total, free, err := getVfsStats(partition.mountpoint) + if err != nil { + glog.Errorf("Statvfs failed. Error: %v", err) + } else { + deviceSet[device] = struct{}{} + deviceInfo := DeviceInfo{ + Device: device, + Major: uint(partition.major), + Minor: uint(partition.minor), + } + fs := Fs{deviceInfo, total, free, diskStatsMap[device]} + filesystems = append(filesystems, fs) + } + } + } + return filesystems, nil +} + +func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) { + diskStatsMap := make(map[string]DiskStats) + file, err := os.Open(diskStatsFile) + if err != nil { + if os.IsNotExist(err) { + glog.Infof("not collecting filesystem statistics because file %q was not available", diskStatsFile) + return diskStatsMap, nil + } + return nil, err + } + + defer file.Close() + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + line := scanner.Text() + words := strings.Fields(line) + if !partitionRegex.MatchString(words[2]) { + continue + } + // 8 50 sdd2 40 0 280 223 7 0 22 108 0 330 330 + deviceName := path.Join("/dev", words[2]) + wordLength := len(words) + offset := 3 + var stats = make([]uint64, wordLength-offset) + if len(stats) < 11 { + return nil, fmt.Errorf("could not parse all 11 columns of /proc/diskstats") + } + var error error + for i := offset; i < wordLength; i++ { + stats[i-offset], error = strconv.ParseUint(words[i], 10, 64) + if error != nil { + return nil, error + } + } + diskStats := DiskStats{ + ReadsCompleted: stats[0], + ReadsMerged: stats[1], + SectorsRead: stats[2], + ReadTime: stats[3], + WritesCompleted: stats[4], + WritesMerged: stats[5], + SectorsWritten: stats[6], + WriteTime: stats[7], + IoInProgress: stats[8], + IoTime: stats[9], + WeightedIoTime: stats[10], + } + diskStatsMap[deviceName] = diskStats + } + return diskStatsMap, nil +} + +func (self *RealFsInfo) GetGlobalFsInfo() ([]Fs, error) { + return self.GetFsInfoForPath(nil) +} + +func major(devNumber uint64) uint { + return uint((devNumber >> 8) & 0xfff) +} + +func minor(devNumber uint64) uint { + return uint((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00)) +} + +func (self *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) { + var buf syscall.Stat_t + err := syscall.Stat(dir, &buf) + if err != nil { + return nil, fmt.Errorf("stat failed on %s with error: %s", dir, err) + } + major := major(buf.Dev) + minor := minor(buf.Dev) + for device, partition := range self.partitions { + if partition.major == major && partition.minor == minor { + return &DeviceInfo{device, major, minor}, nil + } + } + return nil, fmt.Errorf("could not find device with major: %d, minor: %d in cached partitions map", major, minor) +} + +func (self *RealFsInfo) GetDirUsage(dir string) (uint64, error) { + out, err := exec.Command("du", "-s", dir).CombinedOutput() + if err != nil { + return 0, fmt.Errorf("du command failed on %s with output %s - %s", dir, out, err) + } + usageInKb, err := strconv.ParseUint(strings.Fields(string(out))[0], 10, 64) + if err != nil { + return 0, fmt.Errorf("cannot parse 'du' output %s - %s", out, err) + } + return usageInKb * 1024, nil +} + +func getVfsStats(path string) (total uint64, free uint64, err error) { + _p0, err := syscall.BytePtrFromString(path) + if err != nil { + return 0, 0, err + } + res, err := C.getBytesFree((*C.char)(unsafe.Pointer(_p0)), (*_Ctype_ulonglong)(unsafe.Pointer(&free))) + if res != 0 { + return 0, 0, err + } + res, err = C.getBytesTotal((*C.char)(unsafe.Pointer(_p0)), (*_Ctype_ulonglong)(unsafe.Pointer(&total))) + if res != 0 { + return 0, 0, err + } + return total, free, nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/fs/fs_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/fs/fs_test.go new file mode 100644 index 00000000000..2071f02d16b --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/fs/fs_test.go @@ -0,0 +1,78 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "testing" +) + +func TestGetDiskStatsMap(t *testing.T) { + diskStatsMap, err := getDiskStatsMap("test_resources/diskstats") + if err != nil { + t.Errorf("Error calling getDiskStatMap %s", err) + } + if len(diskStatsMap) != 30 { + t.Errorf("diskStatsMap %+v not valid", diskStatsMap) + } + keySet := map[string]string{ + "/dev/sda": "/dev/sda", + "/dev/sdb": "/dev/sdb", + "/dev/sdc": "/dev/sdc", + "/dev/sdd": "/dev/sdd", + "/dev/sde": "/dev/sde", + "/dev/sdf": "/dev/sdf", + "/dev/sdg": "/dev/sdg", + "/dev/sdh": "/dev/sdh", + "/dev/sdb1": "/dev/sdb1", + "/dev/sdb2": "/dev/sdb2", + "/dev/sda1": "/dev/sda1", + "/dev/sda2": "/dev/sda2", + "/dev/sdc1": "/dev/sdc1", + "/dev/sdc2": "/dev/sdc2", + "/dev/sdc3": "/dev/sdc3", + "/dev/sdc4": "/dev/sdc4", + "/dev/sdd1": "/dev/sdd1", + "/dev/sdd2": "/dev/sdd2", + "/dev/sdd3": "/dev/sdd3", + "/dev/sdd4": "/dev/sdd4", + "/dev/sde1": "/dev/sde1", + "/dev/sde2": "/dev/sde2", + "/dev/sdf1": "/dev/sdf1", + "/dev/sdf2": "/dev/sdf2", + "/dev/sdg1": "/dev/sdg1", + "/dev/sdg2": "/dev/sdg2", + "/dev/sdh1": "/dev/sdh1", + "/dev/sdh2": "/dev/sdh2", + "/dev/dm-0": "/dev/dm-0", + "/dev/dm-1": "/dev/dm-1", + } + + for device := range diskStatsMap { + if _, ok := keySet[device]; !ok { + t.Errorf("Cannot find device %s", device) + } + delete(keySet, device) + } + if len(keySet) != 0 { + t.Errorf("diskStatsMap %+v contains illegal keys %+v", diskStatsMap, keySet) + } +} + +func TestFileNotExist(t *testing.T) { + _, err := getDiskStatsMap("/file_does_not_exist") + if err != nil { + t.Fatalf("getDiskStatsMap must not error for absent file: %s", err) + } +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/fs/statvfs.c b/Godeps/_workspace/src/github.com/google/cadvisor/fs/statvfs.c new file mode 100644 index 00000000000..6961df4f68a --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/fs/statvfs.c @@ -0,0 +1,23 @@ +// +build cgo + +#include + +int getBytesFree(const char *path, unsigned long long *bytes) { + struct statvfs buf; + int res; + if ((res = statvfs(path, &buf)) && res != 0) { + return -1; + } + *bytes = buf.f_frsize * buf.f_bfree; + return 0; +} + +int getBytesTotal(const char *path, unsigned long long *bytes) { + struct statvfs buf; + int res; + if ((res = statvfs(path, &buf)) && res != 0) { + return -1; + } + *bytes = buf.f_frsize * buf.f_blocks; + return 0; +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/fs/test_resources/diskstats b/Godeps/_workspace/src/github.com/google/cadvisor/fs/test_resources/diskstats new file mode 100644 index 00000000000..ee4d233b0d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/fs/test_resources/diskstats @@ -0,0 +1,54 @@ + 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 + 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 + 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 + 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 + 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 + 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 + 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 + 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 + 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 + 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 + 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 + 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 + 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 + 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 + 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 + 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 + 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 + 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 + 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 + 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 + 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 + 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 + 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 + 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 + 8 16 sdb 931 1157 7601 960 2 0 16 0 0 919 960 + 8 17 sdb1 477 1147 3895 271 1 0 8 0 0 271 271 + 8 18 sdb2 395 0 3154 326 1 0 8 0 0 326 326 + 8 0 sda 931 1157 7601 1065 2 0 16 0 0 873 1065 + 8 1 sda1 477 1147 3895 419 1 0 8 0 0 419 419 + 8 2 sda2 395 0 3154 328 1 0 8 0 0 328 328 + 8 32 sdc 12390 470 457965 36363 72184 244851 9824537 5359169 0 607738 5437210 + 8 33 sdc1 10907 221 446193 34366 72173 244851 9824499 5359063 0 606972 5435214 + 8 34 sdc2 650 249 5120 901 7 0 22 93 0 956 994 + 8 35 sdc3 264 0 2106 380 1 0 8 0 0 380 380 + 8 36 sdc4 392 0 3130 476 1 0 8 0 0 475 475 + 8 48 sdd 3371 134 58909 18327 73997 243043 9824537 4532714 0 594248 4602162 + 8 49 sdd1 2498 134 51977 17192 73986 243043 9824499 4532600 0 593618 4600885 + 8 50 sdd2 40 0 280 223 7 0 22 108 0 330 330 + 8 51 sdd3 264 0 2106 328 1 0 8 0 0 328 328 + 8 52 sdd4 392 0 3130 373 1 0 8 1 0 374 374 + 8 64 sde 931 1157 7601 768 2 0 16 0 0 632 768 + 8 65 sde1 477 1147 3895 252 1 0 8 0 0 252 252 + 8 66 sde2 395 0 3154 281 1 0 8 0 0 281 281 + 8 80 sdf 931 1157 7601 936 2 0 16 0 0 717 936 + 8 81 sdf1 477 1147 3895 382 1 0 8 0 0 382 382 + 8 82 sdf2 395 0 3154 321 1 0 8 0 0 321 321 + 8 96 sdg 931 1157 7601 858 2 0 16 0 0 804 858 + 8 97 sdg1 477 1147 3895 244 1 0 8 0 0 244 244 + 8 98 sdg2 395 0 3154 299 1 0 8 0 0 299 299 + 8 112 sdh 931 1157 7601 895 2 0 16 0 0 841 895 + 8 113 sdh1 477 1147 3895 264 1 0 8 0 0 264 264 + 8 114 sdh2 395 0 3154 311 1 0 8 0 0 311 311 + 252 0 dm-0 1251094 0 108121362 21287644 111848 0 52908472 22236936 0 4838500 43524784 + 252 1 dm-1 58415638 0 2682446960 1719953592 20048040 0 543988240 1975572544 0 262085340 3695556828 diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/fs/types.go b/Godeps/_workspace/src/github.com/google/cadvisor/fs/types.go new file mode 100644 index 00000000000..9cba902bb2f --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/fs/types.go @@ -0,0 +1,56 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +type DeviceInfo struct { + Device string + Major uint + Minor uint +} + +type Fs struct { + DeviceInfo + Capacity uint64 + Free uint64 + DiskStats DiskStats +} + +type DiskStats struct { + ReadsCompleted uint64 + ReadsMerged uint64 + SectorsRead uint64 + ReadTime uint64 + WritesCompleted uint64 + WritesMerged uint64 + SectorsWritten uint64 + WriteTime uint64 + IoInProgress uint64 + IoTime uint64 + WeightedIoTime uint64 +} + +type FsInfo interface { + // Returns capacity and free space, in bytes, of all the ext2, ext3, ext4 filesystems on the host. + GetGlobalFsInfo() ([]Fs, error) + + // Returns capacity and free space, in bytes, of the set of mounts passed. + GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, error) + + // Returns number of bytes occupied by 'dir'. + GetDirUsage(dir string) (uint64, error) + + // Returns the block device info of the filesystem on which 'dir' resides. + GetDirFsDevice(dir string) (*DeviceInfo, error) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go new file mode 100644 index 00000000000..65d85c2e629 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go @@ -0,0 +1,99 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "time" +) + +type CpuSpec struct { + // Requested cpu shares. Default is 1024. + Limit uint64 `json:"limit"` + // Requested cpu hard limit. Default is unlimited (0). + // Units: milli-cpus. + MaxLimit uint64 `json:"max_limit"` + // Cpu affinity mask. + // TODO(rjnagal): Add a library to convert mask string to set of cpu bitmask. + Mask string `json:"mask,omitempty"` +} + +type MemorySpec struct { + // The amount of memory requested. Default is unlimited (-1). + // Units: bytes. + Limit uint64 `json:"limit,omitempty"` + + // The amount of guaranteed memory. Default is 0. + // Units: bytes. + Reservation uint64 `json:"reservation,omitempty"` + + // The amount of swap space requested. Default is unlimited (-1). + // Units: bytes. + SwapLimit uint64 `json:"swap_limit,omitempty"` +} + +type ContainerSpec struct { + // Time at which the container was created. + CreationTime time.Time `json:"creation_time,omitempty"` + + HasCpu bool `json:"has_cpu"` + Cpu CpuSpec `json:"cpu,omitempty"` + + HasMemory bool `json:"has_memory"` + Memory MemorySpec `json:"memory,omitempty"` +} + +type Percentiles struct { + // Indicates whether the stats are present or not. + // If true, values below do not have any data. + Present bool `json:"present"` + // Average over the collected sample. + Mean uint64 `json:"mean"` + // Max seen over the collected sample. + Max uint64 `json:"max"` + // 90th percentile over the collected sample. + Ninety uint64 `json:"ninety"` +} + +type Usage struct { + // Indicates amount of data available [0-100]. + // If we have data for half a day, we'll still process DayUsage, + // but set PercentComplete to 50. + PercentComplete int32 `json:"percent_complete"` + // Mean, Max, and 90p cpu rate value in milliCpus/seconds. Converted to milliCpus to avoid floats. + Cpu Percentiles `json:"cpu"` + // Mean, Max, and 90p memory size in bytes. + Memory Percentiles `json:"memory"` +} + +// latest sample collected for a container. +type InstantUsage struct { + // cpu rate in cpu milliseconds/second. + Cpu uint64 `json:"cpu"` + // Memory usage in bytes. + Memory uint64 `json:"memory"` +} + +type DerivedStats struct { + // Time of generation of these stats. + Timestamp time.Time `json:"timestamp"` + // Latest instantaneous sample. + LatestUsage InstantUsage `json:"latest_usage"` + // Percentiles in last observed minute. + MinuteUsage Usage `json:"minute_usage"` + // Percentile in last hour. + HourUsage Usage `json:"hour_usage"` + // Percentile in last day. + DayUsage Usage `json:"day_usage"` +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/container.go b/Godeps/_workspace/src/github.com/google/cadvisor/manager/container.go new file mode 100644 index 00000000000..59a7d5ef8d9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/manager/container.go @@ -0,0 +1,341 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manager + +import ( + "flag" + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/docker/docker/pkg/units" + "github.com/golang/glog" + "github.com/google/cadvisor/container" + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/info/v2" + "github.com/google/cadvisor/storage/memory" + "github.com/google/cadvisor/summary" + "github.com/google/cadvisor/utils/cpuload" +) + +// Housekeeping interval. +var HousekeepingInterval = flag.Duration("housekeeping_interval", 1*time.Second, "Interval between container housekeepings") +var maxHousekeepingInterval = flag.Duration("max_housekeeping_interval", 60*time.Second, "Largest interval to allow between container housekeepings") +var allowDynamicHousekeeping = flag.Bool("allow_dynamic_housekeeping", true, "Whether to allow the housekeeping interval to be dynamic") + +// Decay value used for load average smoothing. Interval length of 10 seconds is used. +var loadDecay = math.Exp(float64(-1 * (*HousekeepingInterval).Seconds() / 10)) + +type containerInfo struct { + info.ContainerReference + Subcontainers []info.ContainerReference + Spec info.ContainerSpec +} + +type containerData struct { + handler container.ContainerHandler + info containerInfo + memoryStorage *memory.InMemoryStorage + lock sync.Mutex + loadReader cpuload.CpuLoadReader + summaryReader *summary.StatsSummary + loadAvg float64 // smoothed load average seen so far. + housekeepingInterval time.Duration + lastUpdatedTime time.Time + lastErrorTime time.Time + + // Whether to log the usage of this container when it is updated. + logUsage bool + + // Tells the container to stop. + stop chan bool +} + +func (c *containerData) Start() error { + go c.housekeeping() + return nil +} + +func (c *containerData) Stop() error { + c.stop <- true + return nil +} + +func (c *containerData) allowErrorLogging() bool { + if time.Since(c.lastErrorTime) > time.Minute { + c.lastErrorTime = time.Now() + return true + } + return false +} + +func (c *containerData) GetInfo() (*containerInfo, error) { + // Get spec and subcontainers. + if time.Since(c.lastUpdatedTime) > 5*time.Second { + err := c.updateSpec() + if err != nil { + return nil, err + } + err = c.updateSubcontainers() + if err != nil { + return nil, err + } + c.lastUpdatedTime = time.Now() + } + // Make a copy of the info for the user. + c.lock.Lock() + defer c.lock.Unlock() + return &c.info, nil +} + +func (c *containerData) DerivedStats() (v2.DerivedStats, error) { + if c.summaryReader == nil { + return v2.DerivedStats{}, fmt.Errorf("derived stats not enabled for container %q", c.info.Name) + } + return c.summaryReader.DerivedStats() +} + +func newContainerData(containerName string, memoryStorage *memory.InMemoryStorage, handler container.ContainerHandler, loadReader cpuload.CpuLoadReader, logUsage bool) (*containerData, error) { + if memoryStorage == nil { + return nil, fmt.Errorf("nil memory storage") + } + if handler == nil { + return nil, fmt.Errorf("nil container handler") + } + ref, err := handler.ContainerReference() + if err != nil { + return nil, err + } + + cont := &containerData{ + handler: handler, + memoryStorage: memoryStorage, + housekeepingInterval: *HousekeepingInterval, + loadReader: loadReader, + logUsage: logUsage, + loadAvg: -1.0, // negative value indicates uninitialized. + stop: make(chan bool, 1), + } + cont.info.ContainerReference = ref + + err = cont.updateSpec() + if err != nil { + return nil, err + } + cont.summaryReader, err = summary.New(cont.info.Spec) + if err != nil { + cont.summaryReader = nil + glog.Warningf("Failed to create summary reader for %q: %v", ref.Name, err) + } + + return cont, nil +} + +// Determine when the next housekeeping should occur. +func (self *containerData) nextHousekeeping(lastHousekeeping time.Time) time.Time { + if *allowDynamicHousekeeping { + var empty time.Time + stats, err := self.memoryStorage.RecentStats(self.info.Name, empty, empty, 2) + if err != nil { + if self.allowErrorLogging() { + glog.Warningf("Failed to get RecentStats(%q) while determining the next housekeeping: %v", self.info.Name, err) + } + } else if len(stats) == 2 { + // TODO(vishnuk): Use no processes as a signal. + // Raise the interval if usage hasn't changed in the last housekeeping. + if stats[0].StatsEq(stats[1]) && (self.housekeepingInterval < *maxHousekeepingInterval) { + self.housekeepingInterval *= 2 + if self.housekeepingInterval > *maxHousekeepingInterval { + self.housekeepingInterval = *maxHousekeepingInterval + } + glog.V(3).Infof("Raising housekeeping interval for %q to %v", self.info.Name, self.housekeepingInterval) + } else if self.housekeepingInterval != *HousekeepingInterval { + // Lower interval back to the baseline. + self.housekeepingInterval = *HousekeepingInterval + glog.V(3).Infof("Lowering housekeeping interval for %q to %v", self.info.Name, self.housekeepingInterval) + } + } + } + + return lastHousekeeping.Add(self.housekeepingInterval) +} + +func (c *containerData) housekeeping() { + // Long housekeeping is either 100ms or half of the housekeeping interval. + longHousekeeping := 100 * time.Millisecond + if *HousekeepingInterval/2 < longHousekeeping { + longHousekeeping = *HousekeepingInterval / 2 + } + + // Housekeep every second. + glog.Infof("Start housekeeping for container %q\n", c.info.Name) + lastHousekeeping := time.Now() + for { + select { + case <-c.stop: + // Stop housekeeping when signaled. + return + default: + // Perform housekeeping. + start := time.Now() + c.housekeepingTick() + + // Log if housekeeping took too long. + duration := time.Since(start) + if duration >= longHousekeeping { + glog.V(3).Infof("[%s] Housekeeping took %s", c.info.Name, duration) + } + } + + // Log usage if asked to do so. + if c.logUsage { + const numSamples = 60 + var empty time.Time + stats, err := c.memoryStorage.RecentStats(c.info.Name, empty, empty, numSamples) + if err != nil { + if c.allowErrorLogging() { + glog.Infof("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err) + } + } else if len(stats) < numSamples { + // Ignore, not enough stats yet. + } else { + usageCpuNs := uint64(0) + for i := range stats { + if i > 0 { + usageCpuNs += (stats[i].Cpu.Usage.Total - stats[i-1].Cpu.Usage.Total) + } + } + usageMemory := stats[numSamples-1].Memory.Usage + + instantUsageInCores := float64(stats[numSamples-1].Cpu.Usage.Total-stats[numSamples-2].Cpu.Usage.Total) / float64(stats[numSamples-1].Timestamp.Sub(stats[numSamples-2].Timestamp).Nanoseconds()) + usageInCores := float64(usageCpuNs) / float64(stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds()) + usageInHuman := units.HumanSize(float64(usageMemory)) + glog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman) + } + } + + // Schedule the next housekeeping. Sleep until that time. + nextHousekeeping := c.nextHousekeeping(lastHousekeeping) + if time.Now().Before(nextHousekeeping) { + time.Sleep(nextHousekeeping.Sub(time.Now())) + } + lastHousekeeping = nextHousekeeping + } +} + +func (c *containerData) housekeepingTick() { + err := c.updateStats() + if err != nil { + if c.allowErrorLogging() { + glog.Infof("Failed to update stats for container \"%s\": %s", c.info.Name, err) + } + } +} + +func (c *containerData) updateSpec() error { + spec, err := c.handler.GetSpec() + if err != nil { + // Ignore errors if the container is dead. + if !c.handler.Exists() { + return nil + } + return err + } + c.lock.Lock() + defer c.lock.Unlock() + c.info.Spec = spec + return nil +} + +// Calculate new smoothed load average using the new sample of runnable threads. +// The decay used ensures that the load will stabilize on a new constant value within +// 10 seconds. +func (c *containerData) updateLoad(newLoad uint64) { + if c.loadAvg < 0 { + c.loadAvg = float64(newLoad) // initialize to the first seen sample for faster stabilization. + } else { + c.loadAvg = c.loadAvg*loadDecay + float64(newLoad)*(1.0-loadDecay) + } + glog.V(3).Infof("New load for %q: %v. latest sample: %d", c.info.Name, c.loadAvg, newLoad) +} + +func (c *containerData) updateStats() error { + stats, statsErr := c.handler.GetStats() + if statsErr != nil { + // Ignore errors if the container is dead. + if !c.handler.Exists() { + return nil + } + + // Stats may be partially populated, push those before we return an error. + statsErr = fmt.Errorf("%v, continuing to push stats", statsErr) + } + if stats == nil { + return statsErr + } + if c.loadReader != nil { + // TODO(vmarmol): Cache this path. + path, err := c.handler.GetCgroupPath("cpu") + if err == nil { + loadStats, err := c.loadReader.GetCpuLoad(c.info.Name, path) + if err != nil { + return fmt.Errorf("failed to get load stat for %q - path %q, error %s", c.info.Name, path, err) + } + stats.TaskStats = loadStats + c.updateLoad(loadStats.NrRunning) + // convert to 'milliLoad' to avoid floats and preserve precision. + stats.Cpu.LoadAverage = int32(c.loadAvg * 1000) + } + } + if c.summaryReader != nil { + err := c.summaryReader.AddSample(*stats) + if err != nil { + // Ignore summary errors for now. + glog.V(2).Infof("failed to add summary stats for %q: %v", c.info.Name, err) + } + } + ref, err := c.handler.ContainerReference() + if err != nil { + // Ignore errors if the container is dead. + if !c.handler.Exists() { + return nil + } + return err + } + err = c.memoryStorage.AddStats(ref, stats) + if err != nil { + return err + } + return statsErr +} + +func (c *containerData) updateSubcontainers() error { + var subcontainers info.ContainerReferenceSlice + subcontainers, err := c.handler.ListContainers(container.ListSelf) + if err != nil { + // Ignore errors if the container is dead. + if !c.handler.Exists() { + return nil + } + return err + } + sort.Sort(subcontainers) + c.lock.Lock() + defer c.lock.Unlock() + c.info.Subcontainers = subcontainers + return nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/container_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/manager/container_test.go new file mode 100644 index 00000000000..bd73a1199c9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/manager/container_test.go @@ -0,0 +1,204 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Per-container manager. + +package manager + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/google/cadvisor/container" + info "github.com/google/cadvisor/info/v1" + itest "github.com/google/cadvisor/info/v1/test" + "github.com/google/cadvisor/storage/memory" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const containerName = "/container" + +// Create a containerData instance for a test. +func setupContainerData(t *testing.T, spec info.ContainerSpec) (*containerData, *container.MockContainerHandler, *memory.InMemoryStorage) { + mockHandler := container.NewMockContainerHandler(containerName) + mockHandler.On("GetSpec").Return( + spec, + nil, + ) + memoryStorage := memory.New(60, nil) + ret, err := newContainerData(containerName, memoryStorage, mockHandler, nil, false) + if err != nil { + t.Fatal(err) + } + return ret, mockHandler, memoryStorage +} + +// Create a containerData instance for a test and add a default GetSpec mock. +func newTestContainerData(t *testing.T) (*containerData, *container.MockContainerHandler, *memory.InMemoryStorage) { + spec := itest.GenerateRandomContainerSpec(4) + ret, mockHandler, memoryStorage := setupContainerData(t, spec) + return ret, mockHandler, memoryStorage +} + +func TestUpdateSubcontainers(t *testing.T) { + subcontainers := []info.ContainerReference{ + {Name: "/container/ee0103"}, + {Name: "/container/abcd"}, + {Name: "/container/something"}, + } + cd, mockHandler, _ := newTestContainerData(t) + mockHandler.On("ListContainers", container.ListSelf).Return( + subcontainers, + nil, + ) + + err := cd.updateSubcontainers() + if err != nil { + t.Fatal(err) + } + + if len(cd.info.Subcontainers) != len(subcontainers) { + t.Errorf("Received %v subcontainers, should be %v", len(cd.info.Subcontainers), len(subcontainers)) + } + + for _, sub := range cd.info.Subcontainers { + found := false + for _, sub2 := range subcontainers { + if sub.Name == sub2.Name { + found = true + } + } + if !found { + t.Errorf("Received unknown sub container %v", sub) + } + } + + mockHandler.AssertExpectations(t) +} + +func TestUpdateSubcontainersWithError(t *testing.T) { + cd, mockHandler, _ := newTestContainerData(t) + mockHandler.On("ListContainers", container.ListSelf).Return( + []info.ContainerReference{}, + fmt.Errorf("some error"), + ) + mockHandler.On("Exists").Return(true) + + assert.NotNil(t, cd.updateSubcontainers()) + assert.Empty(t, cd.info.Subcontainers, "subcontainers should not be populated on failure") + mockHandler.AssertExpectations(t) +} + +func TestUpdateSubcontainersWithErrorOnDeadContainer(t *testing.T) { + cd, mockHandler, _ := newTestContainerData(t) + mockHandler.On("ListContainers", container.ListSelf).Return( + []info.ContainerReference{}, + fmt.Errorf("some error"), + ) + mockHandler.On("Exists").Return(false) + + assert.Nil(t, cd.updateSubcontainers()) + mockHandler.AssertExpectations(t) +} + +func checkNumStats(t *testing.T, memoryStorage *memory.InMemoryStorage, numStats int) { + var empty time.Time + stats, err := memoryStorage.RecentStats(containerName, empty, empty, -1) + require.Nil(t, err) + assert.Len(t, stats, numStats) +} + +func TestUpdateStats(t *testing.T) { + statsList := itest.GenerateRandomStats(1, 4, 1*time.Second) + stats := statsList[0] + + cd, mockHandler, memoryStorage := newTestContainerData(t) + mockHandler.On("GetStats").Return( + stats, + nil, + ) + + err := cd.updateStats() + if err != nil { + t.Fatal(err) + } + + checkNumStats(t, memoryStorage, 1) + mockHandler.AssertExpectations(t) +} + +func TestUpdateSpec(t *testing.T) { + spec := itest.GenerateRandomContainerSpec(4) + cd, mockHandler, _ := newTestContainerData(t) + mockHandler.On("GetSpec").Return( + spec, + nil, + ) + + err := cd.updateSpec() + if err != nil { + t.Fatal(err) + } + + mockHandler.AssertExpectations(t) +} + +func TestGetInfo(t *testing.T) { + spec := itest.GenerateRandomContainerSpec(4) + subcontainers := []info.ContainerReference{ + {Name: "/container/ee0103"}, + {Name: "/container/abcd"}, + {Name: "/container/something"}, + } + cd, mockHandler, _ := setupContainerData(t, spec) + mockHandler.On("ListContainers", container.ListSelf).Return( + subcontainers, + nil, + ) + mockHandler.Aliases = []string{"a1", "a2"} + + info, err := cd.GetInfo() + if err != nil { + t.Fatal(err) + } + + mockHandler.AssertExpectations(t) + + if len(info.Subcontainers) != len(subcontainers) { + t.Errorf("Received %v subcontainers, should be %v", len(info.Subcontainers), len(subcontainers)) + } + + for _, sub := range info.Subcontainers { + found := false + for _, sub2 := range subcontainers { + if sub.Name == sub2.Name { + found = true + } + } + if !found { + t.Errorf("Received unknown sub container %v", sub) + } + } + + if !reflect.DeepEqual(spec, info.Spec) { + t.Errorf("received wrong container spec") + } + + if info.Name != mockHandler.Name { + t.Errorf("received wrong container name: received %v; should be %v", info.Name, mockHandler.Name) + } +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/machine.go b/Godeps/_workspace/src/github.com/google/cadvisor/manager/machine.go new file mode 100644 index 00000000000..8a0a659209c --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/manager/machine.go @@ -0,0 +1,351 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manager + +import ( + "bytes" + "flag" + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" + "syscall" + + dclient "github.com/fsouza/go-dockerclient" + "github.com/golang/glog" + "github.com/google/cadvisor/container/docker" + "github.com/google/cadvisor/fs" + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/utils" + "github.com/google/cadvisor/utils/sysfs" + "github.com/google/cadvisor/utils/sysinfo" + version "github.com/google/cadvisor/version" +) + +var cpuRegExp = regexp.MustCompile("processor\\t*: +([0-9]+)") +var coreRegExp = regexp.MustCompile("core id\\t*: +([0-9]+)") +var nodeRegExp = regexp.MustCompile("physical id\\t*: +([0-9]+)") +var CpuClockSpeedMHz = regexp.MustCompile("cpu MHz\\t*: +([0-9]+.[0-9]+)") +var memoryCapacityRegexp = regexp.MustCompile("MemTotal: *([0-9]+) kB") + +var machineIdFilePath = flag.String("machine_id_file", "/etc/machine-id,/var/lib/dbus/machine-id", "Comma-separated list of files to check for machine-id. Use the first one that exists.") + +func getClockSpeed(procInfo []byte) (uint64, error) { + // First look through sys to find a max supported cpu frequency. + const maxFreqFile = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq" + if utils.FileExists(maxFreqFile) { + val, err := ioutil.ReadFile(maxFreqFile) + if err != nil { + return 0, err + } + var maxFreq uint64 + n, err := fmt.Sscanf(string(val), "%d", &maxFreq) + if err != nil || n != 1 { + return 0, fmt.Errorf("could not parse frequency %q", val) + } + return maxFreq, nil + } + // Fall back to /proc/cpuinfo + matches := CpuClockSpeedMHz.FindSubmatch(procInfo) + if len(matches) != 2 { + return 0, fmt.Errorf("could not detect clock speed from output: %q", string(procInfo)) + } + speed, err := strconv.ParseFloat(string(matches[1]), 64) + if err != nil { + return 0, err + } + // Convert to kHz + return uint64(speed * 1000), nil +} + +func getMemoryCapacity(b []byte) (int64, error) { + matches := memoryCapacityRegexp.FindSubmatch(b) + if len(matches) != 2 { + return -1, fmt.Errorf("failed to find memory capacity in output: %q", string(b)) + } + m, err := strconv.ParseInt(string(matches[1]), 10, 64) + if err != nil { + return -1, err + } + + // Convert to bytes. + return m * 1024, err +} + +func extractValue(s string, r *regexp.Regexp) (bool, int, error) { + matches := r.FindSubmatch([]byte(s)) + if len(matches) == 2 { + val, err := strconv.ParseInt(string(matches[1]), 10, 32) + if err != nil { + return true, -1, err + } + return true, int(val), nil + } + return false, -1, nil +} + +func findNode(nodes []info.Node, id int) (bool, int) { + for i, n := range nodes { + if n.Id == id { + return true, i + } + } + return false, -1 +} + +func addNode(nodes *[]info.Node, id int) (int, error) { + var idx int + if id == -1 { + // Some VMs don't fill topology data. Export single package. + id = 0 + } + + ok, idx := findNode(*nodes, id) + if !ok { + // New node + node := info.Node{Id: id} + // Add per-node memory information. + meminfo := fmt.Sprintf("/sys/devices/system/node/node%d/meminfo", id) + out, err := ioutil.ReadFile(meminfo) + // Ignore if per-node info is not available. + if err == nil { + m, err := getMemoryCapacity(out) + if err != nil { + return -1, err + } + node.Memory = uint64(m) + } + *nodes = append(*nodes, node) + idx = len(*nodes) - 1 + } + return idx, nil +} + +func getTopology(sysFs sysfs.SysFs, cpuinfo string) ([]info.Node, int, error) { + nodes := []info.Node{} + numCores := 0 + lastThread := -1 + lastCore := -1 + lastNode := -1 + for _, line := range strings.Split(cpuinfo, "\n") { + ok, val, err := extractValue(line, cpuRegExp) + if err != nil { + return nil, -1, fmt.Errorf("could not parse cpu info from %q: %v", line, err) + } + if ok { + thread := val + numCores++ + if lastThread != -1 { + // New cpu section. Save last one. + nodeIdx, err := addNode(&nodes, lastNode) + if err != nil { + return nil, -1, fmt.Errorf("failed to add node %d: %v", lastNode, err) + } + nodes[nodeIdx].AddThread(lastThread, lastCore) + lastCore = -1 + lastNode = -1 + } + lastThread = thread + } + ok, val, err = extractValue(line, coreRegExp) + if err != nil { + return nil, -1, fmt.Errorf("could not parse core info from %q: %v", line, err) + } + if ok { + lastCore = val + } + ok, val, err = extractValue(line, nodeRegExp) + if err != nil { + return nil, -1, fmt.Errorf("could not parse node info from %q: %v", line, err) + } + if ok { + lastNode = val + } + } + nodeIdx, err := addNode(&nodes, lastNode) + if err != nil { + return nil, -1, fmt.Errorf("failed to add node %d: %v", lastNode, err) + } + nodes[nodeIdx].AddThread(lastThread, lastCore) + if numCores < 1 { + return nil, numCores, fmt.Errorf("could not detect any cores") + } + for idx, node := range nodes { + caches, err := sysinfo.GetCacheInfo(sysFs, node.Cores[0].Threads[0]) + if err != nil { + return nil, -1, fmt.Errorf("failed to get cache information for node %d: %v", node.Id, err) + } + numThreadsPerCore := len(node.Cores[0].Threads) + numThreadsPerNode := len(node.Cores) * numThreadsPerCore + for _, cache := range caches { + c := info.Cache{ + Size: cache.Size, + Level: cache.Level, + Type: cache.Type, + } + if cache.Cpus == numThreadsPerNode && cache.Level > 2 { + // Add a node-level cache. + nodes[idx].AddNodeCache(c) + } else if cache.Cpus == numThreadsPerCore { + // Add to each core. + nodes[idx].AddPerCoreCache(c) + } + // Ignore unknown caches. + } + } + return nodes, numCores, nil +} + +func getMachineID() string { + if len(*machineIdFilePath) == 0 { + return "" + } + for _, file := range strings.Split(*machineIdFilePath, ",") { + id, err := ioutil.ReadFile(file) + if err == nil { + return strings.TrimSpace(string(id)) + } + } + glog.Infof("Couldn't collect machine-id from any of the files in %q", *machineIdFilePath) + return "" +} + +func getMachineInfo(sysFs sysfs.SysFs) (*info.MachineInfo, error) { + cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo") + clockSpeed, err := getClockSpeed(cpuinfo) + if err != nil { + return nil, err + } + + // Get the amount of usable memory from /proc/meminfo. + out, err := ioutil.ReadFile("/proc/meminfo") + if err != nil { + return nil, err + } + + memoryCapacity, err := getMemoryCapacity(out) + if err != nil { + return nil, err + } + + fsInfo, err := fs.NewFsInfo() + if err != nil { + return nil, err + } + filesystems, err := fsInfo.GetGlobalFsInfo() + if err != nil { + return nil, err + } + + diskMap, err := sysinfo.GetBlockDeviceInfo(sysFs) + if err != nil { + return nil, err + } + + netDevices, err := sysinfo.GetNetworkDevices(sysFs) + if err != nil { + return nil, err + } + + topology, numCores, err := getTopology(sysFs, string(cpuinfo)) + if err != nil { + return nil, err + } + + systemUUID, err := sysinfo.GetSystemUUID(sysFs) + if err != nil { + glog.Errorf("Failed to get system UUID: %v", err) + systemUUID = "" + } + + machineInfo := &info.MachineInfo{ + NumCores: numCores, + CpuFrequency: clockSpeed, + MemoryCapacity: memoryCapacity, + DiskMap: diskMap, + NetworkDevices: netDevices, + Topology: topology, + MachineID: getMachineID(), + SystemUUID: systemUUID, + } + + for _, fs := range filesystems { + machineInfo.Filesystems = append(machineInfo.Filesystems, info.FsInfo{Device: fs.Device, Capacity: fs.Capacity}) + } + + return machineInfo, nil +} + +func getVersionInfo() (*info.VersionInfo, error) { + + kernel_version := getKernelVersion() + container_os := getContainerOsVersion() + docker_version := getDockerVersion() + + return &info.VersionInfo{ + KernelVersion: kernel_version, + ContainerOsVersion: container_os, + DockerVersion: docker_version, + CadvisorVersion: version.VERSION, + }, nil +} + +func getContainerOsVersion() string { + container_os := "Unknown" + os_release, err := ioutil.ReadFile("/etc/os-release") + if err == nil { + // We might be running in a busybox or some hand-crafted image. + // It's useful to know why cadvisor didn't come up. + for _, line := range strings.Split(string(os_release), "\n") { + parsed := strings.Split(line, "\"") + if len(parsed) == 3 && parsed[0] == "PRETTY_NAME=" { + container_os = parsed[1] + break + } + } + } + return container_os +} + +func getDockerVersion() string { + docker_version := "Unknown" + client, err := dclient.NewClient(*docker.ArgDockerEndpoint) + if err == nil { + version, err := client.Version() + if err == nil { + docker_version = version.Get("Version") + } + } + return docker_version +} + +func getKernelVersion() string { + uname := &syscall.Utsname{} + + if err := syscall.Uname(uname); err != nil { + return "Unknown" + } + + release := make([]byte, len(uname.Release)) + i := 0 + for _, c := range uname.Release { + release[i] = byte(c) + i++ + } + release = release[:bytes.IndexByte(release, 0)] + + return string(release) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go b/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go new file mode 100644 index 00000000000..b7edeed9532 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go @@ -0,0 +1,733 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Manager of cAdvisor-monitored containers. +package manager + +import ( + "flag" + "fmt" + "path" + "regexp" + "strings" + "sync" + "time" + + "github.com/docker/libcontainer/cgroups" + "github.com/golang/glog" + "github.com/google/cadvisor/container" + "github.com/google/cadvisor/container/docker" + "github.com/google/cadvisor/container/raw" + "github.com/google/cadvisor/events" + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/info/v2" + "github.com/google/cadvisor/storage/memory" + "github.com/google/cadvisor/utils/cpuload" + "github.com/google/cadvisor/utils/oomparser" + "github.com/google/cadvisor/utils/sysfs" +) + +var globalHousekeepingInterval = flag.Duration("global_housekeeping_interval", 1*time.Minute, "Interval between global housekeepings") +var logCadvisorUsage = flag.Bool("log_cadvisor_usage", false, "Whether to log the usage of the cAdvisor container") + +// The Manager interface defines operations for starting a manager and getting +// container and machine information. +type Manager interface { + // Start the manager. + Start() error + + // Stops the manager. + Stop() error + + // Get information about a container. + GetContainerInfo(containerName string, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) + + // Get information about all subcontainers of the specified container (includes self). + SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) + + // Gets all the Docker containers. Return is a map from full container name to ContainerInfo. + AllDockerContainers(query *info.ContainerInfoRequest) (map[string]info.ContainerInfo, error) + + // Gets information about a specific Docker container. The specified name is within the Docker namespace. + DockerContainer(dockerName string, query *info.ContainerInfoRequest) (info.ContainerInfo, error) + + // Gets spec for a container. + GetContainerSpec(containerName string) (info.ContainerSpec, error) + + // Get derived stats for a container. + GetContainerDerivedStats(containerName string) (v2.DerivedStats, error) + + // Get information about the machine. + GetMachineInfo() (*info.MachineInfo, error) + + // Get version information about different components we depend on. + GetVersionInfo() (*info.VersionInfo, error) + + // Get events streamed through passedChannel that fit the request. + WatchForEvents(request *events.Request, passedChannel chan *events.Event) error + + // Get past events that have been detected and that fit the request. + GetPastEvents(request *events.Request) (events.EventSlice, error) +} + +// New takes a memory storage and returns a new manager. +func New(memoryStorage *memory.InMemoryStorage, sysfs sysfs.SysFs) (Manager, error) { + if memoryStorage == nil { + return nil, fmt.Errorf("manager requires memory storage") + } + + // Detect the container we are running on. + selfContainer, err := cgroups.GetThisCgroupDir("cpu") + if err != nil { + return nil, err + } + glog.Infof("cAdvisor running in container: %q", selfContainer) + + newManager := &manager{ + containers: make(map[namespacedContainerName]*containerData), + quitChannels: make([]chan error, 0, 2), + memoryStorage: memoryStorage, + cadvisorContainer: selfContainer, + startupTime: time.Now(), + } + + machineInfo, err := getMachineInfo(sysfs) + if err != nil { + return nil, err + } + newManager.machineInfo = *machineInfo + glog.Infof("Machine: %+v", newManager.machineInfo) + + versionInfo, err := getVersionInfo() + if err != nil { + return nil, err + } + newManager.versionInfo = *versionInfo + glog.Infof("Version: %+v", newManager.versionInfo) + + newManager.eventHandler = events.NewEventManager() + + // Register Docker container factory. + err = docker.Register(newManager) + if err != nil { + glog.Errorf("Docker container factory registration failed: %v.", err) + } + + // Register the raw driver. + err = raw.Register(newManager) + if err != nil { + return nil, fmt.Errorf("registration of the raw container factory failed: %v", err) + } + + return newManager, nil +} + +// A namespaced container name. +type namespacedContainerName struct { + // The namespace of the container. Can be empty for the root namespace. + Namespace string + + // The name of the container in this namespace. + Name string +} + +type manager struct { + containers map[namespacedContainerName]*containerData + containersLock sync.RWMutex + memoryStorage *memory.InMemoryStorage + machineInfo info.MachineInfo + versionInfo info.VersionInfo + quitChannels []chan error + cadvisorContainer string + dockerContainersRegexp *regexp.Regexp + loadReader cpuload.CpuLoadReader + eventHandler events.EventManager + startupTime time.Time +} + +// Start the container manager. +func (self *manager) Start() error { + // TODO(rjnagal): Skip creating cpu load reader while we improve resource usage and accuracy. + if false { + // Create cpu load reader. + cpuLoadReader, err := cpuload.New() + if err != nil { + // TODO(rjnagal): Promote to warning once we support cpu load inside namespaces. + glog.Infof("Could not initialize cpu load reader: %s", err) + } else { + err = cpuLoadReader.Start() + if err != nil { + glog.Warning("Could not start cpu load stat collector: %s", err) + } else { + self.loadReader = cpuLoadReader + } + } + } + + // Create root and then recover all containers. + err := self.createContainer("/") + if err != nil { + return err + } + glog.Infof("Starting recovery of all containers") + err = self.detectSubcontainers("/") + if err != nil { + return err + } + glog.Infof("Recovery completed") + + // Watch for new container. + quitWatcher := make(chan error) + err = self.watchForNewContainers(quitWatcher) + if err != nil { + return err + } + self.quitChannels = append(self.quitChannels, quitWatcher) + err = self.watchForNewOoms() + if err != nil { + glog.Errorf("Failed to start OOM watcher, will not get OOM events: %v", err) + } + + // Look for new containers in the main housekeeping thread. + quitGlobalHousekeeping := make(chan error) + self.quitChannels = append(self.quitChannels, quitGlobalHousekeeping) + go self.globalHousekeeping(quitGlobalHousekeeping) + + return nil +} + +func (self *manager) Stop() error { + // Stop and wait on all quit channels. + for i, c := range self.quitChannels { + // Send the exit signal and wait on the thread to exit (by closing the channel). + c <- nil + err := <-c + if err != nil { + // Remove the channels that quit successfully. + self.quitChannels = self.quitChannels[i:] + return err + } + } + self.quitChannels = make([]chan error, 0, 2) + if self.loadReader != nil { + self.loadReader.Stop() + self.loadReader = nil + } + return nil +} + +func (self *manager) globalHousekeeping(quit chan error) { + // Long housekeeping is either 100ms or half of the housekeeping interval. + longHousekeeping := 100 * time.Millisecond + if *globalHousekeepingInterval/2 < longHousekeeping { + longHousekeeping = *globalHousekeepingInterval / 2 + } + + ticker := time.Tick(*globalHousekeepingInterval) + for { + select { + case t := <-ticker: + start := time.Now() + + // Check for new containers. + err := self.detectSubcontainers("/") + if err != nil { + glog.Errorf("Failed to detect containers: %s", err) + } + + // Log if housekeeping took too long. + duration := time.Since(start) + if duration >= longHousekeeping { + glog.V(1).Infof("Global Housekeeping(%d) took %s", t.Unix(), duration) + } + case <-quit: + // Quit if asked to do so. + quit <- nil + glog.Infof("Exiting global housekeeping thread") + return + } + } +} + +func (self *manager) getContainerData(containerName string) (*containerData, error) { + var cont *containerData + var ok bool + func() { + self.containersLock.RLock() + defer self.containersLock.RUnlock() + + // Ensure we have the container. + cont, ok = self.containers[namespacedContainerName{ + Name: containerName, + }] + }() + if !ok { + return nil, fmt.Errorf("unknown container %q", containerName) + } + return cont, nil +} + +func (self *manager) GetContainerSpec(containerName string) (info.ContainerSpec, error) { + cont, err := self.getContainerData(containerName) + if err != nil { + return info.ContainerSpec{}, err + } + cinfo, err := cont.GetInfo() + if err != nil { + return info.ContainerSpec{}, err + } + return self.getAdjustedSpec(cinfo), nil +} + +func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec { + spec := cinfo.Spec + + // Set default value to an actual value + if spec.HasMemory { + // Memory.Limit is 0 means there's no limit + if spec.Memory.Limit == 0 { + spec.Memory.Limit = uint64(self.machineInfo.MemoryCapacity) + } + } + return spec +} + +// Get a container by name. +func (self *manager) GetContainerInfo(containerName string, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) { + cont, err := self.getContainerData(containerName) + if err != nil { + return nil, err + } + return self.containerDataToContainerInfo(cont, query) +} + +func (self *manager) containerDataToContainerInfo(cont *containerData, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) { + // Get the info from the container. + cinfo, err := cont.GetInfo() + if err != nil { + return nil, err + } + + stats, err := self.memoryStorage.RecentStats(cinfo.Name, query.Start, query.End, query.NumStats) + if err != nil { + return nil, err + } + + // Make a copy of the info for the user. + ret := &info.ContainerInfo{ + ContainerReference: cinfo.ContainerReference, + Subcontainers: cinfo.Subcontainers, + Spec: self.getAdjustedSpec(cinfo), + Stats: stats, + } + return ret, nil +} + +func (self *manager) SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) { + var containers []*containerData + func() { + self.containersLock.RLock() + defer self.containersLock.RUnlock() + containers = make([]*containerData, 0, len(self.containers)) + + // Get all the subcontainers of the specified container + matchedName := path.Join(containerName, "/") + for i := range self.containers { + name := self.containers[i].info.Name + if name == containerName || strings.HasPrefix(name, matchedName) { + containers = append(containers, self.containers[i]) + } + } + }() + + return self.containerDataSliceToContainerInfoSlice(containers, query) +} + +func (self *manager) AllDockerContainers(query *info.ContainerInfoRequest) (map[string]info.ContainerInfo, error) { + var containers map[string]*containerData + func() { + self.containersLock.RLock() + defer self.containersLock.RUnlock() + containers = make(map[string]*containerData, len(self.containers)) + + // Get containers in the Docker namespace. + for name, cont := range self.containers { + if name.Namespace == docker.DockerNamespace { + containers[cont.info.Name] = cont + } + } + }() + + output := make(map[string]info.ContainerInfo, len(containers)) + for name, cont := range containers { + inf, err := self.containerDataToContainerInfo(cont, query) + if err != nil { + return nil, err + } + output[name] = *inf + } + return output, nil +} + +func (self *manager) DockerContainer(containerName string, query *info.ContainerInfoRequest) (info.ContainerInfo, error) { + var container *containerData = nil + func() { + self.containersLock.RLock() + defer self.containersLock.RUnlock() + + // Check for the container in the Docker container namespace. + cont, ok := self.containers[namespacedContainerName{ + Namespace: docker.DockerNamespace, + Name: containerName, + }] + if ok { + container = cont + } + }() + if container == nil { + return info.ContainerInfo{}, fmt.Errorf("unable to find Docker container %q", containerName) + } + + inf, err := self.containerDataToContainerInfo(container, query) + if err != nil { + return info.ContainerInfo{}, err + } + return *inf, nil +} + +func (self *manager) containerDataSliceToContainerInfoSlice(containers []*containerData, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) { + if len(containers) == 0 { + return nil, fmt.Errorf("no containers found") + } + + // Get the info for each container. + output := make([]*info.ContainerInfo, 0, len(containers)) + for i := range containers { + cinfo, err := self.containerDataToContainerInfo(containers[i], query) + if err != nil { + // Skip containers with errors, we try to degrade gracefully. + continue + } + output = append(output, cinfo) + } + + return output, nil +} + +func (self *manager) GetContainerDerivedStats(containerName string) (v2.DerivedStats, error) { + var ok bool + var cont *containerData + func() { + self.containersLock.RLock() + defer self.containersLock.RUnlock() + cont, ok = self.containers[namespacedContainerName{Name: containerName}] + }() + if !ok { + return v2.DerivedStats{}, fmt.Errorf("unknown container %q", containerName) + } + return cont.DerivedStats() +} + +func (m *manager) GetMachineInfo() (*info.MachineInfo, error) { + // Copy and return the MachineInfo. + return &m.machineInfo, nil +} + +func (m *manager) GetVersionInfo() (*info.VersionInfo, error) { + return &m.versionInfo, nil +} + +// Create a container. +func (m *manager) createContainer(containerName string) error { + handler, err := container.NewContainerHandler(containerName) + if err != nil { + return err + } + logUsage := *logCadvisorUsage && containerName == m.cadvisorContainer + cont, err := newContainerData(containerName, m.memoryStorage, handler, m.loadReader, logUsage) + if err != nil { + return err + } + + // Add to the containers map. + alreadyExists := func() bool { + m.containersLock.Lock() + defer m.containersLock.Unlock() + + namespacedName := namespacedContainerName{ + Name: containerName, + } + + // Check that the container didn't already exist. + _, ok := m.containers[namespacedName] + if ok { + return true + } + + // Add the container name and all its aliases. The aliases must be within the namespace of the factory. + m.containers[namespacedName] = cont + for _, alias := range cont.info.Aliases { + m.containers[namespacedContainerName{ + Namespace: cont.info.Namespace, + Name: alias, + }] = cont + } + + return false + }() + if alreadyExists { + return nil + } + glog.Infof("Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace) + + contSpecs, err := cont.handler.GetSpec() + if err != nil { + return err + } + + if contSpecs.CreationTime.After(m.startupTime) { + contRef, err := cont.handler.ContainerReference() + if err != nil { + return err + } + + newEvent := &events.Event{ + ContainerName: contRef.Name, + EventData: contSpecs, + Timestamp: contSpecs.CreationTime, + EventType: events.TypeContainerCreation, + } + err = m.eventHandler.AddEvent(newEvent) + if err != nil { + return err + } + } + + // Start the container's housekeeping. + cont.Start() + + return nil +} + +func (m *manager) destroyContainer(containerName string) error { + m.containersLock.Lock() + defer m.containersLock.Unlock() + + namespacedName := namespacedContainerName{ + Name: containerName, + } + cont, ok := m.containers[namespacedName] + if !ok { + // Already destroyed, done. + return nil + } + + // Tell the container to stop. + err := cont.Stop() + if err != nil { + return err + } + + // Remove the container from our records (and all its aliases). + delete(m.containers, namespacedName) + for _, alias := range cont.info.Aliases { + delete(m.containers, namespacedContainerName{ + Namespace: cont.info.Namespace, + Name: alias, + }) + } + glog.Infof("Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace) + + contRef, err := cont.handler.ContainerReference() + if err != nil { + return err + } + + newEvent := &events.Event{ + ContainerName: contRef.Name, + Timestamp: time.Now(), + EventType: events.TypeContainerDeletion, + } + err = m.eventHandler.AddEvent(newEvent) + if err != nil { + return err + } + return nil +} + +// Detect all containers that have been added or deleted from the specified container. +func (m *manager) getContainersDiff(containerName string) (added []info.ContainerReference, removed []info.ContainerReference, err error) { + m.containersLock.RLock() + defer m.containersLock.RUnlock() + + // Get all subcontainers recursively. + cont, ok := m.containers[namespacedContainerName{ + Name: containerName, + }] + if !ok { + return nil, nil, fmt.Errorf("failed to find container %q while checking for new containers", containerName) + } + allContainers, err := cont.handler.ListContainers(container.ListRecursive) + if err != nil { + return nil, nil, err + } + allContainers = append(allContainers, info.ContainerReference{Name: containerName}) + + // Determine which were added and which were removed. + allContainersSet := make(map[string]*containerData) + for name, d := range m.containers { + // Only add the canonical name. + if d.info.Name == name.Name { + allContainersSet[name.Name] = d + } + } + + // Added containers + for _, c := range allContainers { + delete(allContainersSet, c.Name) + _, ok := m.containers[namespacedContainerName{ + Name: c.Name, + }] + if !ok { + added = append(added, c) + } + } + + // Removed ones are no longer in the container listing. + for _, d := range allContainersSet { + removed = append(removed, d.info.ContainerReference) + } + + return +} + +// Detect the existing subcontainers and reflect the setup here. +func (m *manager) detectSubcontainers(containerName string) error { + added, removed, err := m.getContainersDiff(containerName) + if err != nil { + return err + } + + // Add the new containers. + for _, cont := range added { + err = m.createContainer(cont.Name) + if err != nil { + glog.Errorf("Failed to create existing container: %s: %s", cont.Name, err) + } + } + + // Remove the old containers. + for _, cont := range removed { + err = m.destroyContainer(cont.Name) + if err != nil { + glog.Errorf("Failed to destroy existing container: %s: %s", cont.Name, err) + } + } + + return nil +} + +// Watches for new containers started in the system. Runs forever unless there is a setup error. +func (self *manager) watchForNewContainers(quit chan error) error { + var root *containerData + var ok bool + func() { + self.containersLock.RLock() + defer self.containersLock.RUnlock() + root, ok = self.containers[namespacedContainerName{ + Name: "/", + }] + }() + if !ok { + return fmt.Errorf("root container does not exist when watching for new containers") + } + + // Register for new subcontainers. + eventsChannel := make(chan container.SubcontainerEvent, 16) + err := root.handler.WatchSubcontainers(eventsChannel) + if err != nil { + return err + } + + // There is a race between starting the watch and new container creation so we do a detection before we read new containers. + err = self.detectSubcontainers("/") + if err != nil { + return err + } + + // Listen to events from the container handler. + go func() { + for { + select { + case event := <-eventsChannel: + switch { + case event.EventType == container.SubcontainerAdd: + err = self.createContainer(event.Name) + case event.EventType == container.SubcontainerDelete: + err = self.destroyContainer(event.Name) + } + if err != nil { + glog.Warning("Failed to process watch event: %v", err) + } + case <-quit: + // Stop processing events if asked to quit. + err := root.handler.StopWatchingSubcontainers() + quit <- err + if err == nil { + glog.Infof("Exiting thread watching subcontainers") + return + } + } + } + }() + return nil +} + +func (self *manager) watchForNewOoms() error { + outStream := make(chan *oomparser.OomInstance, 10) + oomLog, err := oomparser.New() + if err != nil { + return err + } + err = oomLog.StreamOoms(outStream) + if err != nil { + return err + } + go func() { + for oomInstance := range outStream { + newEvent := &events.Event{ + ContainerName: oomInstance.ContainerName, + Timestamp: oomInstance.TimeOfDeath, + EventType: events.TypeOom, + EventData: oomInstance, + } + glog.V(1).Infof("Created an oom event: %v", newEvent) + err := self.eventHandler.AddEvent(newEvent) + if err != nil { + glog.Errorf("Failed to add event %v, got error: %v", newEvent, err) + } + } + }() + return nil +} + +// can be called by the api which will take events returned on the channel +func (self *manager) WatchForEvents(request *events.Request, passedChannel chan *events.Event) error { + return self.eventHandler.WatchEvents(passedChannel, request) +} + +// can be called by the api which will return all events satisfying the request +func (self *manager) GetPastEvents(request *events.Request) (events.EventSlice, error) { + return self.eventHandler.GetEvents(request) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager_mock.go b/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager_mock.go new file mode 100644 index 00000000000..655c4b2e667 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager_mock.go @@ -0,0 +1,86 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manager + +import ( + "github.com/google/cadvisor/events" + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/info/v2" + "github.com/stretchr/testify/mock" +) + +type ManagerMock struct { + mock.Mock +} + +func (c *ManagerMock) Start() error { + args := c.Called() + return args.Error(0) +} + +func (c *ManagerMock) Stop() error { + args := c.Called() + return args.Error(0) +} + +func (c *ManagerMock) GetContainerInfo(name string, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) { + args := c.Called(name, query) + return args.Get(0).(*info.ContainerInfo), args.Error(1) +} + +func (c *ManagerMock) SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) { + args := c.Called(containerName, query) + return args.Get(0).([]*info.ContainerInfo), args.Error(1) +} + +func (c *ManagerMock) AllDockerContainers(query *info.ContainerInfoRequest) (map[string]info.ContainerInfo, error) { + args := c.Called(query) + return args.Get(0).(map[string]info.ContainerInfo), args.Error(1) +} + +func (c *ManagerMock) DockerContainer(name string, query *info.ContainerInfoRequest) (info.ContainerInfo, error) { + args := c.Called(name, query) + return args.Get(0).(info.ContainerInfo), args.Error(1) +} + +func (c *ManagerMock) GetContainerSpec(containerName string) (info.ContainerSpec, error) { + args := c.Called(containerName) + return args.Get(0).(info.ContainerSpec), args.Error(1) +} + +func (c *ManagerMock) GetContainerDerivedStats(containerName string) (v2.DerivedStats, error) { + args := c.Called(containerName) + return args.Get(0).(v2.DerivedStats), args.Error(1) +} + +func (c *ManagerMock) WatchForEvents(queryuest *events.Request, passedChannel chan *events.Event) error { + args := c.Called(queryuest, passedChannel) + return args.Error(0) +} + +func (c *ManagerMock) GetPastEvents(queryuest *events.Request) (events.EventSlice, error) { + args := c.Called(queryuest) + return args.Get(0).(events.EventSlice), args.Error(1) +} + +func (c *ManagerMock) GetMachineInfo() (*info.MachineInfo, error) { + args := c.Called() + return args.Get(0).(*info.MachineInfo), args.Error(1) +} + +func (c *ManagerMock) GetVersionInfo() (*info.VersionInfo, error) { + args := c.Called() + return args.Get(0).(*info.VersionInfo), args.Error(1) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager_test.go new file mode 100644 index 00000000000..d24ae611b20 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager_test.go @@ -0,0 +1,211 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Per-container manager. + +package manager + +import ( + "reflect" + "strings" + "testing" + "time" + + "github.com/google/cadvisor/container" + "github.com/google/cadvisor/container/docker" + info "github.com/google/cadvisor/info/v1" + itest "github.com/google/cadvisor/info/v1/test" + "github.com/google/cadvisor/storage/memory" + "github.com/google/cadvisor/utils/sysfs/fakesysfs" +) + +// TODO(vmarmol): Refactor these tests. + +func createManagerAndAddContainers( + memoryStorage *memory.InMemoryStorage, + sysfs *fakesysfs.FakeSysFs, + containers []string, + f func(*container.MockContainerHandler), + t *testing.T, +) *manager { + container.ClearContainerHandlerFactories() + mif := &manager{ + containers: make(map[namespacedContainerName]*containerData), + quitChannels: make([]chan error, 0, 2), + memoryStorage: memoryStorage, + } + for _, name := range containers { + mockHandler := container.NewMockContainerHandler(name) + spec := itest.GenerateRandomContainerSpec(4) + mockHandler.On("GetSpec").Return( + spec, + nil, + ).Once() + cont, err := newContainerData(name, memoryStorage, mockHandler, nil, false) + if err != nil { + t.Fatal(err) + } + mif.containers[namespacedContainerName{ + Name: name, + }] = cont + // Add Docker containers under their namespace. + if strings.HasPrefix(name, "/docker") { + mif.containers[namespacedContainerName{ + Namespace: docker.DockerNamespace, + Name: strings.TrimPrefix(name, "/docker/"), + }] = cont + } + f(mockHandler) + } + return mif +} + +// Expect a manager with the specified containers and query. Returns the manager, map of ContainerInfo objects, +// and map of MockContainerHandler objects.} +func expectManagerWithContainers(containers []string, query *info.ContainerInfoRequest, t *testing.T) (*manager, map[string]*info.ContainerInfo, map[string]*container.MockContainerHandler) { + infosMap := make(map[string]*info.ContainerInfo, len(containers)) + handlerMap := make(map[string]*container.MockContainerHandler, len(containers)) + + for _, container := range containers { + infosMap[container] = itest.GenerateRandomContainerInfo(container, 4, query, 1*time.Second) + } + + memoryStorage := memory.New(query.NumStats, nil) + sysfs := &fakesysfs.FakeSysFs{} + m := createManagerAndAddContainers( + memoryStorage, + sysfs, + containers, + func(h *container.MockContainerHandler) { + cinfo := infosMap[h.Name] + ref, err := h.ContainerReference() + if err != nil { + t.Error(err) + } + for _, stat := range cinfo.Stats { + err = memoryStorage.AddStats(ref, stat) + if err != nil { + t.Error(err) + } + } + spec := cinfo.Spec + + h.On("ListContainers", container.ListSelf).Return( + []info.ContainerReference(nil), + nil, + ) + h.On("GetSpec").Return( + spec, + nil, + ) + handlerMap[h.Name] = h + }, + t, + ) + + return m, infosMap, handlerMap +} + +func TestGetContainerInfo(t *testing.T) { + containers := []string{ + "/c1", + "/c2", + } + + query := &info.ContainerInfoRequest{ + NumStats: 256, + } + + m, infosMap, handlerMap := expectManagerWithContainers(containers, query, t) + + returnedInfos := make(map[string]*info.ContainerInfo, len(containers)) + + for _, container := range containers { + cinfo, err := m.GetContainerInfo(container, query) + if err != nil { + t.Fatalf("Unable to get info for container %v: %v", container, err) + } + returnedInfos[container] = cinfo + } + + for container, handler := range handlerMap { + handler.AssertExpectations(t) + returned := returnedInfos[container] + expected := infosMap[container] + if !reflect.DeepEqual(returned, expected) { + t.Errorf("returned unexpected info for container %v; returned %+v; expected %+v", container, returned, expected) + } + } + +} + +func TestSubcontainersInfo(t *testing.T) { + containers := []string{ + "/c1", + "/c2", + } + + query := &info.ContainerInfoRequest{ + NumStats: 64, + } + + m, _, _ := expectManagerWithContainers(containers, query, t) + + result, err := m.SubcontainersInfo("/", query) + if err != nil { + t.Fatalf("expected to succeed: %s", err) + } + if len(result) != len(containers) { + t.Errorf("expected to received containers: %v, but received: %v", containers, result) + } + for _, res := range result { + found := false + for _, name := range containers { + if res.Name == name { + found = true + break + } + } + if !found { + t.Errorf("unexpected container %q in result, expected one of %v", res.Name, containers) + } + } +} + +func TestDockerContainersInfo(t *testing.T) { + containers := []string{ + "/docker/c1", + } + + query := &info.ContainerInfoRequest{ + NumStats: 2, + } + + m, _, _ := expectManagerWithContainers(containers, query, t) + + result, err := m.DockerContainer("c1", query) + if err != nil { + t.Fatalf("expected to succeed: %s", err) + } + if result.Name != containers[0] { + t.Errorf("Unexpected container %q in result. Expected container %q", result.Name, containers[0]) + } +} + +func TestNewNilManager(t *testing.T) { + _, err := New(nil, nil) + if err == nil { + t.Fatalf("Expected nil manager to return error") + } +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/testdata/cpuinfo b/Godeps/_workspace/src/github.com/google/cadvisor/manager/testdata/cpuinfo new file mode 100644 index 00000000000..ca2b722a560 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/manager/testdata/cpuinfo @@ -0,0 +1,251 @@ +processor : 0 +cpu family : 6 +stepping : 2 +microcode : 0x10 +cpu MHz : 1596.000 +cache size : 12288 KB +physical id : 0 +siblings : 6 +core id : 0 +cpu cores : 6 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +bogomips : 5333.60 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual + +processor : 1 +cpu family : 6 +stepping : 2 +microcode : 0x10 +cpu MHz : 1596.000 +cache size : 12288 KB +physical id : 0 +siblings : 6 +core id : 1 +cpu cores : 6 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +bogomips : 5333.60 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual + +processor : 2 +cpu family : 6 +stepping : 2 +microcode : 0x10 +cpu MHz : 1596.000 +cache size : 12288 KB +physical id : 0 +siblings : 6 +core id : 2 +cpu cores : 6 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +bogomips : 5333.60 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual + +processor : 3 +cpu family : 6 +stepping : 2 +microcode : 0x10 +cpu MHz : 1596.000 +cache size : 12288 KB +physical id : 1 +siblings : 6 +core id : 3 +cpu cores : 6 +apicid : 16 +initial apicid : 16 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +bogomips : 5333.60 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual + +processor : 4 +cpu family : 6 +stepping : 2 +microcode : 0x10 +cpu MHz : 1596.000 +cache size : 12288 KB +physical id : 1 +siblings : 6 +core id : 4 +cpu cores : 6 +apicid : 18 +initial apicid : 18 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +bogomips : 5333.60 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual + +processor : 5 +cpu family : 6 +stepping : 2 +microcode : 0x10 +cpu MHz : 1596.000 +cache size : 12288 KB +physical id : 1 +siblings : 6 +core id : 5 +cpu cores : 6 +apicid : 20 +initial apicid : 20 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +bogomips : 5333.60 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual + +processor : 6 +cpu family : 6 +stepping : 2 +microcode : 0x10 +cpu MHz : 2661.000 +cache size : 12288 KB +physical id : 0 +siblings : 6 +core id : 0 +cpu cores : 6 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +bogomips : 5333.60 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual + +processor : 7 +cpu family : 6 +stepping : 2 +microcode : 0x10 +cpu MHz : 2661.000 +cache size : 12288 KB +physical id : 0 +siblings : 6 +core id : 1 +cpu cores : 6 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +bogomips : 5333.60 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual + +processor : 8 +cpu family : 6 +stepping : 2 +microcode : 0x10 +cpu MHz : 1596.000 +cache size : 12288 KB +physical id : 0 +siblings : 6 +core id : 2 +cpu cores : 6 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +bogomips : 5333.60 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual + +processor : 9 +cpu family : 6 +stepping : 2 +microcode : 0x10 +cpu MHz : 2661.000 +cache size : 12288 KB +physical id : 1 +siblings : 6 +core id : 3 +cpu cores : 6 +apicid : 17 +initial apicid : 17 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +bogomips : 5333.60 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual + +processor : 10 +cpu family : 6 +stepping : 2 +microcode : 0x10 +cpu MHz : 1596.000 +cache size : 12288 KB +physical id : 1 +siblings : 6 +core id : 4 +cpu cores : 6 +apicid : 19 +initial apicid : 19 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +bogomips : 5333.60 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +processor : 11 +cpu family : 6 +stepping : 2 +microcode : 0x10 +cpu MHz : 2661.000 +cache size : 12288 KB +physical id : 1 +siblings : 6 +core id : 5 +cpu cores : 6 +apicid : 21 +initial apicid : 21 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +bogomips : 5333.60 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual + diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/topology_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/manager/topology_test.go new file mode 100644 index 00000000000..dbae6df3c83 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/manager/topology_test.go @@ -0,0 +1,117 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manager + +import ( + "io/ioutil" + "reflect" + "testing" + + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/utils/sysfs" + "github.com/google/cadvisor/utils/sysfs/fakesysfs" +) + +func TestTopology(t *testing.T) { + testfile := "./testdata/cpuinfo" + testcpuinfo, err := ioutil.ReadFile(testfile) + if err != nil { + t.Fatalf("unable to read input test file %s", testfile) + } + sysFs := &fakesysfs.FakeSysFs{} + c := sysfs.CacheInfo{ + Size: 32 * 1024, + Type: "unified", + Level: 1, + Cpus: 2, + } + sysFs.SetCacheInfo(c) + topology, numCores, err := getTopology(sysFs, string(testcpuinfo)) + if err != nil { + t.Errorf("failed to get topology for sample cpuinfo %s", string(testcpuinfo)) + } + + if numCores != 12 { + t.Errorf("Expected 12 cores, found %d", numCores) + } + expected_topology := []info.Node{} + numNodes := 2 + numCoresPerNode := 3 + numThreads := 2 + cache := info.Cache{ + Size: 32 * 1024, + Type: "unified", + Level: 1, + } + for i := 0; i < numNodes; i++ { + node := info.Node{Id: i} + // Copy over Memory from result. TODO(rjnagal): Use memory from fake. + node.Memory = topology[i].Memory + for j := 0; j < numCoresPerNode; j++ { + core := info.Core{Id: i*numCoresPerNode + j} + core.Caches = append(core.Caches, cache) + for k := 0; k < numThreads; k++ { + core.Threads = append(core.Threads, k*numCoresPerNode*numNodes+core.Id) + } + node.Cores = append(node.Cores, core) + } + expected_topology = append(expected_topology, node) + } + + if !reflect.DeepEqual(topology, expected_topology) { + t.Errorf("Expected topology %+v, got %+v", expected_topology, topology) + } +} + +func TestTopologyWithSimpleCpuinfo(t *testing.T) { + sysFs := &fakesysfs.FakeSysFs{} + c := sysfs.CacheInfo{ + Size: 32 * 1024, + Type: "unified", + Level: 1, + Cpus: 1, + } + sysFs.SetCacheInfo(c) + topology, numCores, err := getTopology(sysFs, "processor\t: 0\n") + if err != nil { + t.Errorf("Expected cpuinfo with no topology data to succeed.") + } + node := info.Node{Id: 0} + core := info.Core{Id: 0} + core.Threads = append(core.Threads, 0) + cache := info.Cache{ + Size: 32 * 1024, + Type: "unified", + Level: 1, + } + core.Caches = append(core.Caches, cache) + node.Cores = append(node.Cores, core) + // Copy over Memory from result. TODO(rjnagal): Use memory from fake. + node.Memory = topology[0].Memory + expected := []info.Node{node} + if !reflect.DeepEqual(topology, expected) { + t.Errorf("Expected topology %+v, got %+v", expected, topology) + } + if numCores != 1 { + t.Errorf("Expected 1 core, found %d", numCores) + } +} + +func TestTopologyEmptyCpuinfo(t *testing.T) { + _, _, err := getTopology(&fakesysfs.FakeSysFs{}, "") + if err == nil { + t.Errorf("Expected empty cpuinfo to fail.") + } +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/bigquery/README.md b/Godeps/_workspace/src/github.com/google/cadvisor/storage/bigquery/README.md new file mode 100644 index 00000000000..eefbe285256 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/bigquery/README.md @@ -0,0 +1,29 @@ +BigQuery Storage Driver +======= + +[EXPERIMENTAL] Support for BigQuery backend as cAdvisor storage driver. +The current implementation takes bunch of BigQuery specific flags for authentication. +These will be merged into a single backend config. + +To run the current version, following flags need to be specified: +``` + # Storage driver to use. + -storage_driver=bigquery + + # Information about server-to-server Oauth token. + # These can be obtained by creating a Service Account client id under `Google Developer API` + + # service client id + -bq_id="XYZ.apps.googleusercontent.com" + + # service email address + -bq_account="ABC@developer.gserviceaccount.com" + + # path to pem key (converted from p12 file) + -bq_credentials_file="/path/to/key.pem" + + # project id to use for storing datasets. + -bq_project_id="awesome_project" +``` + +See [Service account Authentication](https://developers.google.com/accounts/docs/OAuth2) for Oauth related details. diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/bigquery/bigquery.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/bigquery/bigquery.go new file mode 100644 index 00000000000..fe4edf74c01 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/bigquery/bigquery.go @@ -0,0 +1,465 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + "strconv" + "time" + + bigquery "code.google.com/p/google-api-go-client/bigquery/v2" + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/storage" + "github.com/google/cadvisor/storage/bigquery/client" +) + +type bigqueryStorage struct { + client *client.Client + machineName string +} + +const ( + // Bigquery schema types + typeTimestamp string = "TIMESTAMP" + typeString string = "STRING" + typeInteger string = "INTEGER" + + colTimestamp string = "timestamp" + colMachineName string = "machine" + colContainerName string = "container_name" + colCpuCumulativeUsage string = "cpu_cumulative_usage" + // Cumulative Cpu usage in system and user mode + colCpuCumulativeUsageSystem string = "cpu_cumulative_usage_system" + colCpuCumulativeUsageUser string = "cpu_cumulative_usage_user" + // Memory usage + colMemoryUsage string = "memory_usage" + // Working set size + colMemoryWorkingSet string = "memory_working_set" + // Container page fault + colMemoryContainerPgfault string = "memory_container_pgfault" + // Constainer major page fault + colMemoryContainerPgmajfault string = "memory_container_pgmajfault" + // Hierarchical page fault + colMemoryHierarchicalPgfault string = "memory_hierarchical_pgfault" + // Hierarchical major page fault + colMemoryHierarchicalPgmajfault string = "memory_hierarchical_pgmajfault" + // Cumulative count of bytes received. + colRxBytes string = "rx_bytes" + // Cumulative count of receive errors encountered. + colRxErrors string = "rx_errors" + // Cumulative count of bytes transmitted. + colTxBytes string = "tx_bytes" + // Cumulative count of transmit errors encountered. + colTxErrors string = "tx_errors" + // Filesystem device. + colFsDevice = "fs_device" + // Filesystem limit. + colFsLimit = "fs_limit" + // Filesystem available space. + colFsUsage = "fs_usage" +) + +// TODO(jnagal): Infer schema through reflection. (See bigquery/client/example) +func (self *bigqueryStorage) GetSchema() *bigquery.TableSchema { + fields := make([]*bigquery.TableFieldSchema, 18) + i := 0 + fields[i] = &bigquery.TableFieldSchema{ + Type: typeTimestamp, + Name: colTimestamp, + Mode: "REQUIRED", + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeString, + Name: colMachineName, + Mode: "REQUIRED", + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeString, + Name: colContainerName, + Mode: "REQUIRED", + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeInteger, + Name: colCpuCumulativeUsage, + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeInteger, + Name: colCpuCumulativeUsageSystem, + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeInteger, + Name: colCpuCumulativeUsageUser, + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeInteger, + Name: colMemoryUsage, + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeInteger, + Name: colMemoryWorkingSet, + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeInteger, + Name: colMemoryContainerPgfault, + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeInteger, + Name: colMemoryContainerPgmajfault, + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeInteger, + Name: colMemoryHierarchicalPgfault, + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeInteger, + Name: colMemoryHierarchicalPgmajfault, + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeInteger, + Name: colRxBytes, + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeInteger, + Name: colRxErrors, + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeInteger, + Name: colTxBytes, + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeInteger, + Name: colTxErrors, + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeString, + Name: colFsDevice, + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeInteger, + Name: colFsLimit, + } + i++ + fields[i] = &bigquery.TableFieldSchema{ + Type: typeInteger, + Name: colFsUsage, + } + return &bigquery.TableSchema{ + Fields: fields, + } +} + +func (self *bigqueryStorage) containerStatsToRows( + ref info.ContainerReference, + stats *info.ContainerStats, +) (row map[string]interface{}) { + row = make(map[string]interface{}) + + // Timestamp + row[colTimestamp] = stats.Timestamp + + // Machine name + row[colMachineName] = self.machineName + + // Container name + name := ref.Name + if len(ref.Aliases) > 0 { + name = ref.Aliases[0] + } + row[colContainerName] = name + + // Cumulative Cpu Usage + row[colCpuCumulativeUsage] = stats.Cpu.Usage.Total + + // Cumulative Cpu Usage in system mode + row[colCpuCumulativeUsageSystem] = stats.Cpu.Usage.System + + // Cumulative Cpu Usage in user mode + row[colCpuCumulativeUsageUser] = stats.Cpu.Usage.User + + // Memory Usage + row[colMemoryUsage] = stats.Memory.Usage + + // Working set size + row[colMemoryWorkingSet] = stats.Memory.WorkingSet + + // container page fault + row[colMemoryContainerPgfault] = stats.Memory.ContainerData.Pgfault + + // container major page fault + row[colMemoryContainerPgmajfault] = stats.Memory.ContainerData.Pgmajfault + + // hierarchical page fault + row[colMemoryHierarchicalPgfault] = stats.Memory.HierarchicalData.Pgfault + + // hierarchical major page fault + row[colMemoryHierarchicalPgmajfault] = stats.Memory.HierarchicalData.Pgmajfault + + // Network stats. + row[colRxBytes] = stats.Network.RxBytes + row[colRxErrors] = stats.Network.RxErrors + row[colTxBytes] = stats.Network.TxBytes + row[colTxErrors] = stats.Network.TxErrors + + // TODO(jnagal): Handle per-cpu stats. + + return +} + +func (self *bigqueryStorage) containerFilesystemStatsToRows( + ref info.ContainerReference, + stats *info.ContainerStats, +) (rows []map[string]interface{}) { + for _, fsStat := range stats.Filesystem { + row := make(map[string]interface{}, 0) + row[colFsDevice] = fsStat.Device + row[colFsLimit] = fsStat.Limit + row[colFsUsage] = fsStat.Usage + rows = append(rows, row) + } + return rows +} + +func convertToUint64(v interface{}) (uint64, error) { + if v == nil { + return 0, nil + } + switch x := v.(type) { + case uint64: + return x, nil + case int: + if x < 0 { + return 0, fmt.Errorf("negative value: %v", x) + } + return uint64(x), nil + case int32: + if x < 0 { + return 0, fmt.Errorf("negative value: %v", x) + } + return uint64(x), nil + case int64: + if x < 0 { + return 0, fmt.Errorf("negative value: %v", x) + } + return uint64(x), nil + case float64: + if x < 0 { + return 0, fmt.Errorf("negative value: %v", x) + } + return uint64(x), nil + case uint32: + return uint64(x), nil + case string: + return strconv.ParseUint(x, 10, 64) + } + + return 0, fmt.Errorf("unknown type") +} + +func (self *bigqueryStorage) valuesToContainerStats(columns []string, values []interface{}) (*info.ContainerStats, error) { + stats := &info.ContainerStats{ + Filesystem: make([]info.FsStats, 0), + } + var err error + for i, col := range columns { + v := values[i] + switch { + case col == colTimestamp: + if t, ok := v.(time.Time); ok { + stats.Timestamp = t + } + case col == colMachineName: + if m, ok := v.(string); ok { + if m != self.machineName { + return nil, fmt.Errorf("different machine") + } + } else { + return nil, fmt.Errorf("machine name field is not a string: %v", v) + } + // Cumulative Cpu Usage + case col == colCpuCumulativeUsage: + stats.Cpu.Usage.Total, err = convertToUint64(v) + // Cumulative Cpu used by the system + case col == colCpuCumulativeUsageSystem: + stats.Cpu.Usage.System, err = convertToUint64(v) + // Cumulative Cpu Usage in user mode + case col == colCpuCumulativeUsageUser: + stats.Cpu.Usage.User, err = convertToUint64(v) + // Memory Usage + case col == colMemoryUsage: + stats.Memory.Usage, err = convertToUint64(v) + // Working set size + case col == colMemoryWorkingSet: + stats.Memory.WorkingSet, err = convertToUint64(v) + // container page fault + case col == colMemoryContainerPgfault: + stats.Memory.ContainerData.Pgfault, err = convertToUint64(v) + // container major page fault + case col == colMemoryContainerPgmajfault: + stats.Memory.ContainerData.Pgmajfault, err = convertToUint64(v) + // hierarchical page fault + case col == colMemoryHierarchicalPgfault: + stats.Memory.HierarchicalData.Pgfault, err = convertToUint64(v) + // hierarchical major page fault + case col == colMemoryHierarchicalPgmajfault: + stats.Memory.HierarchicalData.Pgmajfault, err = convertToUint64(v) + case col == colRxBytes: + stats.Network.RxBytes, err = convertToUint64(v) + case col == colRxErrors: + stats.Network.RxErrors, err = convertToUint64(v) + case col == colTxBytes: + stats.Network.TxBytes, err = convertToUint64(v) + case col == colTxErrors: + stats.Network.TxErrors, err = convertToUint64(v) + case col == colFsDevice: + device, ok := v.(string) + if !ok { + return nil, fmt.Errorf("filesystem name field is not a string: %+v", v) + } + if len(stats.Filesystem) == 0 { + stats.Filesystem = append(stats.Filesystem, info.FsStats{Device: device}) + } else { + stats.Filesystem[0].Device = device + } + case col == colFsLimit: + limit, err := convertToUint64(v) + if err != nil { + return nil, fmt.Errorf("filesystem limit field %+v invalid: %s", v, err) + } + if len(stats.Filesystem) == 0 { + stats.Filesystem = append(stats.Filesystem, info.FsStats{Limit: limit}) + } else { + stats.Filesystem[0].Limit = limit + } + case col == colFsUsage: + usage, err := convertToUint64(v) + if err != nil { + return nil, fmt.Errorf("filesystem usage field %+v invalid: %s", v, err) + } + if len(stats.Filesystem) == 0 { + stats.Filesystem = append(stats.Filesystem, info.FsStats{Usage: usage}) + } else { + stats.Filesystem[0].Usage = usage + } + } + if err != nil { + return nil, fmt.Errorf("column %v has invalid value %v: %v", col, v, err) + } + } + return stats, nil +} + +func (self *bigqueryStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error { + if stats == nil { + return nil + } + rows := make([]map[string]interface{}, 0) + rows = append(rows, self.containerStatsToRows(ref, stats)) + rows = append(rows, self.containerFilesystemStatsToRows(ref, stats)...) + for _, row := range rows { + err := self.client.InsertRow(row) + if err != nil { + return err + } + } + return nil +} + +func (self *bigqueryStorage) getRecentRows(containerName string, numRows int) ([]string, [][]interface{}, error) { + tableName, err := self.client.GetTableName() + if err != nil { + return nil, nil, err + } + + query := fmt.Sprintf("SELECT * FROM %v WHERE %v='%v' and %v='%v'", tableName, colContainerName, containerName, colMachineName, self.machineName) + if numRows > 0 { + query = fmt.Sprintf("%v LIMIT %v", query, numRows) + } + + return self.client.Query(query) +} + +func (self *bigqueryStorage) RecentStats(containerName string, numStats int) ([]*info.ContainerStats, error) { + if numStats == 0 { + return nil, nil + } + header, rows, err := self.getRecentRows(containerName, numStats) + if err != nil { + return nil, err + } + statsList := make([]*info.ContainerStats, 0, len(rows)) + for _, row := range rows { + stats, err := self.valuesToContainerStats(header, row) + if err != nil { + return nil, err + } + if stats == nil { + continue + } + statsList = append(statsList, stats) + } + return statsList, nil +} + +func (self *bigqueryStorage) Close() error { + self.client.Close() + self.client = nil + return nil +} + +// Create a new bigquery storage driver. +// machineName: A unique identifier to identify the host that current cAdvisor +// instance is running on. +// tableName: BigQuery table used for storing stats. +func New(machineName, + datasetId, + tableName string, +) (storage.StorageDriver, error) { + bqClient, err := client.NewClient() + if err != nil { + return nil, err + } + err = bqClient.CreateDataset(datasetId) + if err != nil { + return nil, err + } + + ret := &bigqueryStorage{ + client: bqClient, + machineName: machineName, + } + schema := ret.GetSchema() + err = bqClient.CreateTable(tableName, schema) + if err != nil { + return nil, err + } + return ret, nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/bigquery/client/client.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/bigquery/client/client.go new file mode 100644 index 00000000000..8ead5000965 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/bigquery/client/client.go @@ -0,0 +1,283 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "flag" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "code.google.com/p/goauth2/oauth" + "code.google.com/p/goauth2/oauth/jwt" + bigquery "code.google.com/p/google-api-go-client/bigquery/v2" +) + +var ( + // TODO(jnagal): Condense all flags to an identity file and a pem key file. + clientId = flag.String("bq_id", "", "Client ID") + clientSecret = flag.String("bq_secret", "notasecret", "Client Secret") + projectId = flag.String("bq_project_id", "", "Bigquery project ID") + serviceAccount = flag.String("bq_account", "", "Service account email") + pemFile = flag.String("bq_credentials_file", "", "Credential Key file (pem)") +) + +const ( + errAlreadyExists string = "Error 409: Already Exists" + queryLimit int64 = 200 +) + +type Client struct { + service *bigquery.Service + token *oauth.Token + datasetId string + tableId string +} + +// Helper method to create an authenticated connection. +func connect() (*oauth.Token, *bigquery.Service, error) { + if *clientId == "" { + return nil, nil, fmt.Errorf("no client id specified") + } + if *serviceAccount == "" { + return nil, nil, fmt.Errorf("no service account specified") + } + if *projectId == "" { + return nil, nil, fmt.Errorf("no project id specified") + } + authScope := bigquery.BigqueryScope + if *pemFile == "" { + return nil, nil, fmt.Errorf("no credentials specified") + } + pemBytes, err := ioutil.ReadFile(*pemFile) + if err != nil { + return nil, nil, fmt.Errorf("could not access credential file %v - %v", pemFile, err) + } + + t := jwt.NewToken(*serviceAccount, authScope, pemBytes) + token, err := t.Assert(&http.Client{}) + if err != nil { + fmt.Printf("Invalid token: %v\n", err) + return nil, nil, err + } + config := &oauth.Config{ + ClientId: *clientId, + ClientSecret: *clientSecret, + Scope: authScope, + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", + } + + transport := &oauth.Transport{ + Token: token, + Config: config, + } + client := transport.Client() + + service, err := bigquery.New(client) + if err != nil { + fmt.Printf("Failed to create new service: %v\n", err) + return nil, nil, err + } + + return token, service, nil +} + +// Creates a new client instance with an authenticated connection to bigquery. +func NewClient() (*Client, error) { + token, service, err := connect() + if err != nil { + return nil, err + } + c := &Client{ + token: token, + service: service, + } + return c, nil +} + +func (c *Client) Close() error { + c.service = nil + return nil +} + +// Helper method to return the bigquery service connection. +// Expired connection is refreshed. +func (c *Client) getService() (*bigquery.Service, error) { + if c.token == nil || c.service == nil { + return nil, fmt.Errorf("service not initialized") + } + + // Refresh expired token. + if c.token.Expired() { + token, service, err := connect() + if err != nil { + return nil, err + } + c.token = token + c.service = service + return service, nil + } + return c.service, nil +} + +func (c *Client) PrintDatasets() error { + datasetList, err := c.service.Datasets.List(*projectId).Do() + if err != nil { + fmt.Printf("Failed to get list of datasets\n") + return err + } else { + fmt.Printf("Successfully retrieved datasets. Retrieved: %d\n", len(datasetList.Datasets)) + } + + for _, d := range datasetList.Datasets { + fmt.Printf("%s %s\n", d.Id, d.FriendlyName) + } + return nil +} + +func (c *Client) CreateDataset(datasetId string) error { + if c.service == nil { + return fmt.Errorf("no service created") + } + _, err := c.service.Datasets.Insert(*projectId, &bigquery.Dataset{ + DatasetReference: &bigquery.DatasetReference{ + DatasetId: datasetId, + ProjectId: *projectId, + }, + }).Do() + // TODO(jnagal): Do a Get() to verify dataset already exists. + if err != nil && !strings.Contains(err.Error(), errAlreadyExists) { + return err + } + c.datasetId = datasetId + return nil +} + +// Create a table with provided table ID and schema. +// Schema is currently not updated if the table already exists. +func (c *Client) CreateTable(tableId string, schema *bigquery.TableSchema) error { + if c.service == nil || c.datasetId == "" { + return fmt.Errorf("no dataset created") + } + _, err := c.service.Tables.Get(*projectId, c.datasetId, tableId).Do() + if err != nil { + // Create a new table. + _, err := c.service.Tables.Insert(*projectId, c.datasetId, &bigquery.Table{ + Schema: schema, + TableReference: &bigquery.TableReference{ + DatasetId: c.datasetId, + ProjectId: *projectId, + TableId: tableId, + }, + }).Do() + if err != nil { + return err + } + } + // TODO(jnagal): Update schema if it has changed. We can only extend existing schema. + c.tableId = tableId + return nil +} + +// Add a row to the connected table. +func (c *Client) InsertRow(rowData map[string]interface{}) error { + service, _ := c.getService() + if service == nil || c.datasetId == "" || c.tableId == "" { + return fmt.Errorf("table not setup to add rows") + } + jsonRows := make(map[string]bigquery.JsonValue) + for key, value := range rowData { + jsonRows[key] = bigquery.JsonValue(value) + } + rows := []*bigquery.TableDataInsertAllRequestRows{ + { + Json: jsonRows, + }, + } + + // TODO(jnagal): Batch insert requests. + insertRequest := &bigquery.TableDataInsertAllRequest{Rows: rows} + + result, err := service.Tabledata.InsertAll(*projectId, c.datasetId, c.tableId, insertRequest).Do() + if err != nil { + return fmt.Errorf("error inserting row: %v", err) + } + + if len(result.InsertErrors) > 0 { + errstr := fmt.Sprintf("Insertion for %d rows failed\n", len(result.InsertErrors)) + for _, errors := range result.InsertErrors { + for _, errorproto := range errors.Errors { + errstr += fmt.Sprintf("Error inserting row %d: %+v\n", errors.Index, errorproto) + } + } + return fmt.Errorf(errstr) + } + return nil +} + +// Returns a bigtable table name (format: datasetID.tableID) +func (c *Client) GetTableName() (string, error) { + if c.service == nil || c.datasetId == "" || c.tableId == "" { + return "", fmt.Errorf("table not setup") + } + return fmt.Sprintf("%s.%s", c.datasetId, c.tableId), nil +} + +// Do a synchronous query on bigtable and return a header and data rows. +// Number of rows are capped to queryLimit. +func (c *Client) Query(query string) ([]string, [][]interface{}, error) { + service, err := c.getService() + if err != nil { + return nil, nil, err + } + datasetRef := &bigquery.DatasetReference{ + DatasetId: c.datasetId, + ProjectId: *projectId, + } + + queryRequest := &bigquery.QueryRequest{ + DefaultDataset: datasetRef, + MaxResults: queryLimit, + Kind: "json", + Query: query, + } + + results, err := service.Jobs.Query(*projectId, queryRequest).Do() + if err != nil { + return nil, nil, err + } + numRows := results.TotalRows + if numRows < 1 { + return nil, nil, fmt.Errorf("query returned no data") + } + + headers := []string{} + for _, col := range results.Schema.Fields { + headers = append(headers, col.Name) + } + + rows := [][]interface{}{} + numColumns := len(results.Schema.Fields) + for _, data := range results.Rows { + row := make([]interface{}, numColumns) + for c := 0; c < numColumns; c++ { + row[c] = data.F[c].V + } + rows = append(rows, row) + } + return headers, rows, nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/bigquery/client/example/example.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/bigquery/client/example/example.go new file mode 100644 index 00000000000..08eaffd30ba --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/bigquery/client/example/example.go @@ -0,0 +1,108 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "flag" + "fmt" + "time" + + "github.com/SeanDolphin/bqschema" + "github.com/google/cadvisor/storage/bigquery/client" +) + +type container struct { + Name string `json:"name"` + CpuUsage uint64 `json:"cpuusage,omitempty"` + MemoryUsage uint64 `json:"memoryusage,omitempty"` + NetworkUsage uint64 `json:"networkusage,omitempty"` + Timestamp time.Time `json:"timestamp"` +} + +func main() { + flag.Parse() + c, err := client.NewClient() + if err != nil { + fmt.Printf("Failed to connect to bigquery\n") + panic(err) + } + + c.PrintDatasets() + + // Create a new dataset. + err = c.CreateDataset("sampledataset") + if err != nil { + fmt.Printf("Failed to create dataset %v\n", err) + panic(err) + } + + // Create a new table + containerData := container{ + Name: "test_container", + CpuUsage: 123456, + MemoryUsage: 1024, + NetworkUsage: 9046, + Timestamp: time.Now(), + } + schema, err := bqschema.ToSchema(containerData) + if err != nil { + fmt.Printf("Failed to create schema") + panic(err) + } + + err = c.CreateTable("sampletable", schema) + if err != nil { + fmt.Printf("Failed to create table") + panic(err) + } + + // Add Data + m := make(map[string]interface{}) + t := time.Now() + for i := 0; i < 10; i++ { + m["Name"] = containerData.Name + m["CpuUsage"] = containerData.CpuUsage + uint64(i*100) + m["MemoryUsage"] = containerData.MemoryUsage - uint64(i*10) + m["NetworkUsage"] = containerData.NetworkUsage + uint64(i*10) + m["Timestamp"] = t.Add(time.Duration(i) * time.Second) + + err = c.InsertRow(m) + if err != nil { + fmt.Printf("Failed to insert row") + panic(err) + } + } + + // Query + tableName, err := c.GetTableName() + if err != nil { + fmt.Printf("table not set") + panic(err) + } + + query := "SELECT * FROM " + tableName + " ORDER BY Timestamp LIMIT 100" + header, rows, err := c.Query(query) + if err != nil { + fmt.Printf("Failed query") + panic(err) + } + fmt.Printf("Headers: %v", header) + for _, row := range rows { + for i, val := range row { + fmt.Printf("%s:%v ", header[i], val) + } + fmt.Printf("\n") + } +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/influxdb/influxdb.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/influxdb/influxdb.go new file mode 100644 index 00000000000..00aafcd87fe --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/influxdb/influxdb.go @@ -0,0 +1,375 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package influxdb + +import ( + "fmt" + "sync" + "time" + + info "github.com/google/cadvisor/info/v1" + influxdb "github.com/influxdb/influxdb/client" +) + +type influxdbStorage struct { + client *influxdb.Client + machineName string + tableName string + bufferDuration time.Duration + lastWrite time.Time + series []*influxdb.Series + lock sync.Mutex + readyToFlush func() bool +} + +const ( + colTimestamp string = "time" + colMachineName string = "machine" + colContainerName string = "container_name" + colCpuCumulativeUsage string = "cpu_cumulative_usage" + // Memory Usage + colMemoryUsage string = "memory_usage" + // Working set size + colMemoryWorkingSet string = "memory_working_set" + // Cumulative count of bytes received. + colRxBytes string = "rx_bytes" + // Cumulative count of receive errors encountered. + colRxErrors string = "rx_errors" + // Cumulative count of bytes transmitted. + colTxBytes string = "tx_bytes" + // Cumulative count of transmit errors encountered. + colTxErrors string = "tx_errors" + // Filesystem device. + colFsDevice = "fs_device" + // Filesystem limit. + colFsLimit = "fs_limit" + // Filesystem usage. + colFsUsage = "fs_usage" +) + +func (self *influxdbStorage) getSeriesDefaultValues( + ref info.ContainerReference, + stats *info.ContainerStats, + columns *[]string, + values *[]interface{}) { + // Timestamp + *columns = append(*columns, colTimestamp) + *values = append(*values, stats.Timestamp.UnixNano()/1E3) + + // Machine name + *columns = append(*columns, colMachineName) + *values = append(*values, self.machineName) + + // Container name + *columns = append(*columns, colContainerName) + if len(ref.Aliases) > 0 { + *values = append(*values, ref.Aliases[0]) + } else { + *values = append(*values, ref.Name) + } +} + +// In order to maintain a fixed column format, we add a new series for each filesystem partition. +func (self *influxdbStorage) containerFilesystemStatsToSeries( + ref info.ContainerReference, + stats *info.ContainerStats) (series []*influxdb.Series) { + if len(stats.Filesystem) == 0 { + return series + } + for _, fsStat := range stats.Filesystem { + columns := make([]string, 0) + values := make([]interface{}, 0) + self.getSeriesDefaultValues(ref, stats, &columns, &values) + + columns = append(columns, colFsDevice) + values = append(values, fsStat.Device) + + columns = append(columns, colFsLimit) + values = append(values, fsStat.Limit) + + columns = append(columns, colFsUsage) + values = append(values, fsStat.Usage) + series = append(series, self.newSeries(columns, values)) + } + return series +} + +func (self *influxdbStorage) containerStatsToValues( + ref info.ContainerReference, + stats *info.ContainerStats, +) (columns []string, values []interface{}) { + self.getSeriesDefaultValues(ref, stats, &columns, &values) + // Cumulative Cpu Usage + columns = append(columns, colCpuCumulativeUsage) + values = append(values, stats.Cpu.Usage.Total) + + // Memory Usage + columns = append(columns, colMemoryUsage) + values = append(values, stats.Memory.Usage) + + // Working set size + columns = append(columns, colMemoryWorkingSet) + values = append(values, stats.Memory.WorkingSet) + + // Network stats. + columns = append(columns, colRxBytes) + values = append(values, stats.Network.RxBytes) + + columns = append(columns, colRxErrors) + values = append(values, stats.Network.RxErrors) + + columns = append(columns, colTxBytes) + values = append(values, stats.Network.TxBytes) + + columns = append(columns, colTxErrors) + values = append(values, stats.Network.TxErrors) + + return columns, values +} + +func convertToUint64(v interface{}) (uint64, error) { + if v == nil { + return 0, nil + } + switch x := v.(type) { + case uint64: + return x, nil + case int: + if x < 0 { + return 0, fmt.Errorf("negative value: %v", x) + } + return uint64(x), nil + case int32: + if x < 0 { + return 0, fmt.Errorf("negative value: %v", x) + } + return uint64(x), nil + case int64: + if x < 0 { + return 0, fmt.Errorf("negative value: %v", x) + } + return uint64(x), nil + case float64: + if x < 0 { + return 0, fmt.Errorf("negative value: %v", x) + } + return uint64(x), nil + case uint32: + return uint64(x), nil + } + return 0, fmt.Errorf("unknown type") +} + +func (self *influxdbStorage) valuesToContainerStats(columns []string, values []interface{}) (*info.ContainerStats, error) { + stats := &info.ContainerStats{ + Filesystem: make([]info.FsStats, 0), + } + var err error + for i, col := range columns { + v := values[i] + switch { + case col == colTimestamp: + if f64sec, ok := v.(float64); ok && stats.Timestamp.IsZero() { + stats.Timestamp = time.Unix(int64(f64sec)/1E3, (int64(f64sec)%1E3)*1E6) + } + case col == colMachineName: + if m, ok := v.(string); ok { + if m != self.machineName { + return nil, fmt.Errorf("different machine") + } + } else { + return nil, fmt.Errorf("machine name field is not a string: %v", v) + } + // Cumulative Cpu Usage + case col == colCpuCumulativeUsage: + stats.Cpu.Usage.Total, err = convertToUint64(v) + // Memory Usage + case col == colMemoryUsage: + stats.Memory.Usage, err = convertToUint64(v) + // Working set size + case col == colMemoryWorkingSet: + stats.Memory.WorkingSet, err = convertToUint64(v) + case col == colRxBytes: + stats.Network.RxBytes, err = convertToUint64(v) + case col == colRxErrors: + stats.Network.RxErrors, err = convertToUint64(v) + case col == colTxBytes: + stats.Network.TxBytes, err = convertToUint64(v) + case col == colTxErrors: + stats.Network.TxErrors, err = convertToUint64(v) + case col == colFsDevice: + device, ok := v.(string) + if !ok { + return nil, fmt.Errorf("filesystem name field is not a string: %+v", v) + } + if len(stats.Filesystem) == 0 { + stats.Filesystem = append(stats.Filesystem, info.FsStats{Device: device}) + } else { + stats.Filesystem[0].Device = device + } + case col == colFsLimit: + limit, err := convertToUint64(v) + if err != nil { + return nil, fmt.Errorf("filesystem limit field %+v invalid: %s", v, err) + } + if len(stats.Filesystem) == 0 { + stats.Filesystem = append(stats.Filesystem, info.FsStats{Limit: limit}) + } else { + stats.Filesystem[0].Limit = limit + } + case col == colFsUsage: + usage, err := convertToUint64(v) + if err != nil { + return nil, fmt.Errorf("filesystem usage field %+v invalid: %s", v, err) + } + if len(stats.Filesystem) == 0 { + stats.Filesystem = append(stats.Filesystem, info.FsStats{Usage: usage}) + } else { + stats.Filesystem[0].Usage = usage + } + } + if err != nil { + return nil, fmt.Errorf("column %v has invalid value %v: %v", col, v, err) + } + } + return stats, nil +} + +func (self *influxdbStorage) OverrideReadyToFlush(readyToFlush func() bool) { + self.readyToFlush = readyToFlush +} + +func (self *influxdbStorage) defaultReadyToFlush() bool { + return time.Since(self.lastWrite) >= self.bufferDuration +} + +func (self *influxdbStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error { + if stats == nil { + return nil + } + var seriesToFlush []*influxdb.Series + func() { + // AddStats will be invoked simultaneously from multiple threads and only one of them will perform a write. + self.lock.Lock() + defer self.lock.Unlock() + + self.series = append(self.series, self.newSeries(self.containerStatsToValues(ref, stats))) + self.series = append(self.series, self.containerFilesystemStatsToSeries(ref, stats)...) + if self.readyToFlush() { + seriesToFlush = self.series + self.series = make([]*influxdb.Series, 0) + self.lastWrite = time.Now() + } + }() + if len(seriesToFlush) > 0 { + err := self.client.WriteSeriesWithTimePrecision(seriesToFlush, influxdb.Microsecond) + if err != nil { + return fmt.Errorf("failed to write stats to influxDb - %s", err) + } + } + + return nil +} + +func (self *influxdbStorage) RecentStats(containerName string, numStats int) ([]*info.ContainerStats, error) { + if numStats == 0 { + return nil, nil + } + // TODO(dengnan): select only columns that we need + // TODO(dengnan): escape names + query := fmt.Sprintf("select * from %v where %v='%v' and %v='%v'", self.tableName, colContainerName, containerName, colMachineName, self.machineName) + if numStats > 0 { + query = fmt.Sprintf("%v limit %v", query, numStats) + } + series, err := self.client.Query(query) + if err != nil { + return nil, err + } + statsList := make([]*info.ContainerStats, 0, len(series)) + // By default, influxDB returns data in time descending order. + // RecentStats() requires stats in time increasing order, + // so we need to go through from the last one to the first one. + for i := len(series) - 1; i >= 0; i-- { + s := series[i] + + for j := len(s.Points) - 1; j >= 0; j-- { + values := s.Points[j] + stats, err := self.valuesToContainerStats(s.Columns, values) + if err != nil { + return nil, err + } + if stats == nil { + continue + } + statsList = append(statsList, stats) + } + } + return statsList, nil +} + +func (self *influxdbStorage) Close() error { + self.client = nil + return nil +} + +// Returns a new influxdb series. +func (self *influxdbStorage) newSeries(columns []string, points []interface{}) *influxdb.Series { + out := &influxdb.Series{ + Name: self.tableName, + Columns: columns, + // There's only one point for each stats + Points: make([][]interface{}, 1), + } + out.Points[0] = points + return out +} + +// machineName: A unique identifier to identify the host that current cAdvisor +// instance is running on. +// influxdbHost: The host which runs influxdb. +func New(machineName, + tablename, + database, + username, + password, + influxdbHost string, + isSecure bool, + bufferDuration time.Duration, +) (*influxdbStorage, error) { + config := &influxdb.ClientConfig{ + Host: influxdbHost, + Username: username, + Password: password, + Database: database, + IsSecure: isSecure, + } + client, err := influxdb.NewClient(config) + if err != nil { + return nil, err + } + // TODO(monnand): With go 1.3, we cannot compress data now. + client.DisableCompression() + + ret := &influxdbStorage{ + client: client, + machineName: machineName, + tableName: tablename, + bufferDuration: bufferDuration, + lastWrite: time.Now(), + series: make([]*influxdb.Series, 0), + } + ret.readyToFlush = ret.defaultReadyToFlush + return ret, nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/influxdb/influxdb_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/influxdb/influxdb_test.go new file mode 100644 index 00000000000..63a7dd4b506 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/influxdb/influxdb_test.go @@ -0,0 +1,187 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build influxdb_test +// To run unit test: go test -tags influxdb_test + +package influxdb + +import ( + "fmt" + "reflect" + "testing" + "time" + + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/storage" + "github.com/google/cadvisor/storage/test" + influxdb "github.com/influxdb/influxdb/client" +) + +// The duration in seconds for which stats will be buffered in the influxdb driver. +const kCacheDuration = 1 + +type influxDbTestStorageDriver struct { + count int + buffer int + base storage.StorageDriver +} + +func (self *influxDbTestStorageDriver) readyToFlush() bool { + if self.count >= self.buffer { + return true + } + return false +} + +func (self *influxDbTestStorageDriver) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error { + self.count++ + return self.base.AddStats(ref, stats) +} + +func (self *influxDbTestStorageDriver) RecentStats(containerName string, numStats int) ([]*info.ContainerStats, error) { + return self.base.RecentStats(containerName, numStats) +} + +func (self *influxDbTestStorageDriver) Percentiles(containerName string, cpuUsagePercentiles []int, memUsagePercentiles []int) (*info.ContainerStatsPercentiles, error) { + return self.base.Percentiles(containerName, cpuUsagePercentiles, memUsagePercentiles) +} + +func (self *influxDbTestStorageDriver) Samples(containerName string, numSamples int) ([]*info.ContainerStatsSample, error) { + return self.base.Samples(containerName, numSamples) +} + +func (self *influxDbTestStorageDriver) Close() error { + return self.base.Close() +} + +func (self *influxDbTestStorageDriver) StatsEq(a, b *info.ContainerStats) bool { + if !test.TimeEq(a.Timestamp, b.Timestamp, 10*time.Millisecond) { + return false + } + // Check only the stats populated in influxdb. + if a.Cpu.Usage.Total != b.Cpu.Usage.Total { + return false + } + + if a.Memory.Usage != b.Memory.Usage { + return false + } + + if a.Memory.WorkingSet != b.Memory.WorkingSet { + return false + } + + if !reflect.DeepEqual(a.Network, b.Network) { + return false + } + + if !reflect.DeepEqual(a.Filesystem, b.Filesystem) { + return false + } + return true +} + +func runStorageTest(f func(test.TestStorageDriver, *testing.T), t *testing.T, bufferCount int) { + machineName := "machineA" + tablename := "t" + database := "cadvisor" + username := "root" + password := "root" + hostname := "localhost:8086" + percentilesDuration := 10 * time.Minute + rootConfig := &influxdb.ClientConfig{ + Host: hostname, + Username: username, + Password: password, + IsSecure: false, + } + rootClient, err := influxdb.NewClient(rootConfig) + if err != nil { + t.Fatal(err) + } + // create the data base first. + rootClient.CreateDatabase(database) + config := &influxdb.ClientConfig{ + Host: hostname, + Username: username, + Password: password, + Database: database, + IsSecure: false, + } + client, err := influxdb.NewClient(config) + if err != nil { + t.Fatal(err) + } + client.DisableCompression() + deleteAll := fmt.Sprintf("drop series %v", tablename) + _, err = client.Query(deleteAll) + if err != nil { + t.Fatal(err) + } + // delete all data by the end of the call + defer client.Query(deleteAll) + + driver, err := New(machineName, + tablename, + database, + username, + password, + hostname, + false, + time.Duration(bufferCount), + percentilesDuration) + if err != nil { + t.Fatal(err) + } + testDriver := &influxDbTestStorageDriver{buffer: bufferCount} + driver.OverrideReadyToFlush(testDriver.readyToFlush) + testDriver.base = driver + + // generate another container's data on same machine. + test.StorageDriverFillRandomStatsFunc("containerOnSameMachine", 100, testDriver, t) + + // generate another container's data on another machine. + driverForAnotherMachine, err := New("machineB", + tablename, + database, + username, + password, + hostname, + false, + time.Duration(bufferCount), + percentilesDuration) + if err != nil { + t.Fatal(err) + } + defer driverForAnotherMachine.Close() + testDriverOtherMachine := &influxDbTestStorageDriver{buffer: bufferCount} + driverForAnotherMachine.OverrideReadyToFlush(testDriverOtherMachine.readyToFlush) + testDriverOtherMachine.base = driverForAnotherMachine + + test.StorageDriverFillRandomStatsFunc("containerOnAnotherMachine", 100, testDriverOtherMachine, t) + f(testDriver, t) +} + +func TestRetrievePartialRecentStats(t *testing.T) { + runStorageTest(test.StorageDriverTestRetrievePartialRecentStats, t, 20) +} + +func TestRetrieveAllRecentStats(t *testing.T) { + runStorageTest(test.StorageDriverTestRetrieveAllRecentStats, t, 10) +} + +func TestNoRecentStats(t *testing.T) { + runStorageTest(test.StorageDriverTestNoRecentStats, t, kCacheDuration) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/memory/memory.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/memory/memory.go new file mode 100644 index 00000000000..2d5b14a309a --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/memory/memory.go @@ -0,0 +1,125 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memory + +import ( + "fmt" + "sync" + "time" + + "github.com/golang/glog" + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/storage" +) + +// TODO(vmarmol): See about refactoring this class, we have an unecessary redirection of containerStorage and InMemoryStorage. +// containerStorage is used to store per-container information +type containerStorage struct { + ref info.ContainerReference + recentStats *StatsBuffer + maxNumStats int + lock sync.RWMutex +} + +func (self *containerStorage) AddStats(stats *info.ContainerStats) error { + self.lock.Lock() + defer self.lock.Unlock() + + // Add the stat to storage. + self.recentStats.Add(stats) + return nil +} + +func (self *containerStorage) RecentStats(start, end time.Time, maxStats int) ([]*info.ContainerStats, error) { + self.lock.RLock() + defer self.lock.RUnlock() + return self.recentStats.InTimeRange(start, end, maxStats), nil +} + +func newContainerStore(ref info.ContainerReference, maxNumStats int) *containerStorage { + return &containerStorage{ + ref: ref, + recentStats: NewStatsBuffer(maxNumStats), + maxNumStats: maxNumStats, + } +} + +type InMemoryStorage struct { + lock sync.RWMutex + containerStorageMap map[string]*containerStorage + maxNumStats int + backend storage.StorageDriver +} + +func (self *InMemoryStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error { + var cstore *containerStorage + var ok bool + + func() { + self.lock.Lock() + defer self.lock.Unlock() + if cstore, ok = self.containerStorageMap[ref.Name]; !ok { + cstore = newContainerStore(ref, self.maxNumStats) + self.containerStorageMap[ref.Name] = cstore + } + }() + + if self.backend != nil { + // TODO(monnand): To deal with long delay write operations, we + // may want to start a pool of goroutines to do write + // operations. + if err := self.backend.AddStats(ref, stats); err != nil { + glog.Error(err) + } + } + return cstore.AddStats(stats) +} + +func (self *InMemoryStorage) RecentStats(name string, start, end time.Time, maxStats int) ([]*info.ContainerStats, error) { + var cstore *containerStorage + var ok bool + err := func() error { + self.lock.RLock() + defer self.lock.RUnlock() + if cstore, ok = self.containerStorageMap[name]; !ok { + return fmt.Errorf("unable to find data for container %v", name) + } + return nil + }() + if err != nil { + return nil, err + } + + return cstore.RecentStats(start, end, maxStats) +} + +func (self *InMemoryStorage) Close() error { + self.lock.Lock() + self.containerStorageMap = make(map[string]*containerStorage, 32) + self.lock.Unlock() + return nil +} + +func New( + maxNumStats int, + backend storage.StorageDriver, +) *InMemoryStorage { + ret := &InMemoryStorage{ + containerStorageMap: make(map[string]*containerStorage, 32), + maxNumStats: maxNumStats, + backend: backend, + } + return ret +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/memory/memory_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/memory/memory_test.go new file mode 100644 index 00000000000..2b2059f85f8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/memory/memory_test.go @@ -0,0 +1,97 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memory + +import ( + "testing" + "time" + + info "github.com/google/cadvisor/info/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const containerName = "/container" + +var ( + containerRef = info.ContainerReference{Name: containerName} + zero time.Time +) + +// Make stats with the specified identifier. +func makeStat(i int) *info.ContainerStats { + return &info.ContainerStats{ + Timestamp: zero.Add(time.Duration(i) * time.Second), + Cpu: info.CpuStats{ + LoadAverage: int32(i), + }, + } +} + +func getRecentStats(t *testing.T, memoryStorage *InMemoryStorage, numStats int) []*info.ContainerStats { + stats, err := memoryStorage.RecentStats(containerName, zero, zero, numStats) + require.Nil(t, err) + return stats +} + +func TestAddStats(t *testing.T) { + memoryStorage := New(60, nil) + + assert := assert.New(t) + assert.Nil(memoryStorage.AddStats(containerRef, makeStat(0))) + assert.Nil(memoryStorage.AddStats(containerRef, makeStat(1))) + assert.Nil(memoryStorage.AddStats(containerRef, makeStat(2))) + assert.Nil(memoryStorage.AddStats(containerRef, makeStat(0))) + containerRef2 := info.ContainerReference{ + Name: "/container2", + } + assert.Nil(memoryStorage.AddStats(containerRef2, makeStat(0))) + assert.Nil(memoryStorage.AddStats(containerRef2, makeStat(1))) +} + +func TestRecentStatsNoRecentStats(t *testing.T) { + memoryStorage := makeWithStats(0) + + _, err := memoryStorage.RecentStats(containerName, zero, zero, 60) + assert.NotNil(t, err) +} + +// Make an instance of InMemoryStorage with n stats. +func makeWithStats(n int) *InMemoryStorage { + memoryStorage := New(60, nil) + + for i := 0; i < n; i++ { + memoryStorage.AddStats(containerRef, makeStat(i)) + } + return memoryStorage +} + +func TestRecentStatsGetZeroStats(t *testing.T) { + memoryStorage := makeWithStats(10) + + assert.Len(t, getRecentStats(t, memoryStorage, 0), 0) +} + +func TestRecentStatsGetSomeStats(t *testing.T) { + memoryStorage := makeWithStats(10) + + assert.Len(t, getRecentStats(t, memoryStorage, 5), 5) +} + +func TestRecentStatsGetAllStats(t *testing.T) { + memoryStorage := makeWithStats(10) + + assert.Len(t, getRecentStats(t, memoryStorage, -1), 10) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/memory/stats_buffer.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/memory/stats_buffer.go new file mode 100644 index 00000000000..a1ed74581f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/memory/stats_buffer.go @@ -0,0 +1,144 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memory + +import ( + "sort" + "time" + + info "github.com/google/cadvisor/info/v1" +) + +// A circular buffer for ContainerStats. +type StatsBuffer struct { + buffer []info.ContainerStats + size int + index int +} + +// Returns a new thread-compatible StatsBuffer. +func NewStatsBuffer(size int) *StatsBuffer { + return &StatsBuffer{ + buffer: make([]info.ContainerStats, size), + size: 0, + index: size - 1, + } +} + +// Adds an element to the start of the buffer (removing one from the end if necessary). +func (self *StatsBuffer) Add(item *info.ContainerStats) { + if self.size < len(self.buffer) { + self.size++ + } + self.index = (self.index + 1) % len(self.buffer) + self.buffer[self.index] = *item +} + +// Returns up to maxResult elements in the specified time period (inclusive). +// Results are from first to last. maxResults of -1 means no limit. +func (self *StatsBuffer) InTimeRange(start, end time.Time, maxResults int) []*info.ContainerStats { + // No stats, return empty. + if self.size == 0 { + return []*info.ContainerStats{} + } + + // NOTE: Since we store the elments in descending timestamp order "start" will + // be a higher index than "end". + + var startIndex int + if start.IsZero() { + // None specified, start at the beginning. + startIndex = self.size - 1 + } else { + // Start is the index before the elements smaller than it. We do this by + // finding the first element smaller than start and taking the index + // before that element + startIndex = sort.Search(self.size, func(index int) bool { + // buffer[index] < start + return self.Get(index).Timestamp.Before(start) + }) - 1 + // Check if start is after all the data we have. + if startIndex < 0 { + return []*info.ContainerStats{} + } + } + + var endIndex int + if end.IsZero() { + // None specified, end with the latest stats. + endIndex = 0 + } else { + // End is the first index smaller than or equal to it (so, not larger). + endIndex = sort.Search(self.size, func(index int) bool { + // buffer[index] <= t -> !(buffer[index] > t) + return !self.Get(index).Timestamp.After(end) + }) + // Check if end is before all the data we have. + if endIndex == self.size { + return []*info.ContainerStats{} + } + } + + // Trim to maxResults size. + numResults := startIndex - endIndex + 1 + if maxResults != -1 && numResults > maxResults { + startIndex -= numResults - maxResults + numResults = maxResults + } + + // Return in sorted timestamp order so from the "back" to "front". + result := make([]*info.ContainerStats, numResults) + for i := 0; i < numResults; i++ { + result[i] = self.Get(startIndex - i) + } + return result +} + +// TODO(vmarmol): Remove this function as it will no longer be neededt. +// Returns the first N elements in the buffer. If N > size of buffer, size of buffer elements are returned. +// Returns the elements in ascending timestamp order. +func (self *StatsBuffer) FirstN(n int) []*info.ContainerStats { + // Cap n at the number of elements we have. + if n > self.size { + n = self.size + } + + // index points to the latest element, get n before that one (keeping in mind we may have gone through 0). + start := self.index - (n - 1) + if start < 0 { + start += len(self.buffer) + } + + // Copy the elements. + res := make([]*info.ContainerStats, n) + for i := 0; i < n; i++ { + index := (start + i) % len(self.buffer) + res[i] = &self.buffer[index] + } + return res +} + +// Gets the element at the specified index. Note that elements are stored in LIFO order. +func (self *StatsBuffer) Get(index int) *info.ContainerStats { + calculatedIndex := self.index - index + if calculatedIndex < 0 { + calculatedIndex += len(self.buffer) + } + return &self.buffer[calculatedIndex] +} + +func (self *StatsBuffer) Size() int { + return self.size +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/memory/stats_buffer_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/memory/stats_buffer_test.go new file mode 100644 index 00000000000..4ac28e63fe7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/memory/stats_buffer_test.go @@ -0,0 +1,206 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memory + +import ( + "strconv" + "strings" + "testing" + "time" + + info "github.com/google/cadvisor/info/v1" + "github.com/stretchr/testify/assert" +) + +func createTime(id int) time.Time { + var zero time.Time + return zero.Add(time.Duration(id+1) * time.Second) +} + +func createStats(id int32) *info.ContainerStats { + return &info.ContainerStats{ + Timestamp: createTime(int(id)), + Cpu: info.CpuStats{ + LoadAverage: id, + }, + } +} + +func expectSize(t *testing.T, sb *StatsBuffer, expectedSize int) { + if sb.Size() != expectedSize { + t.Errorf("Expected size %v, got %v", expectedSize, sb.Size()) + } +} + +func expectFirstN(t *testing.T, sb *StatsBuffer, expected []int32) { + expectElements(t, sb.FirstN(sb.Size()), expected) +} + +func expectElements(t *testing.T, actual []*info.ContainerStats, expected []int32) { + if len(actual) != len(expected) { + t.Errorf("Expected elements %v, got %v", expected, actual) + return + } + for i, el := range actual { + if el.Cpu.LoadAverage != expected[i] { + actualElements := make([]string, len(actual)) + for i, element := range actual { + actualElements[i] = strconv.Itoa(int(element.Cpu.LoadAverage)) + } + t.Errorf("Expected elements %v, got %v", expected, strings.Join(actualElements, ",")) + return + } + } +} + +func expectElement(t *testing.T, stat *info.ContainerStats, expected int32) { + if stat.Cpu.LoadAverage != expected { + t.Errorf("Expected %d, but received %d", expected, stat.Cpu.LoadAverage) + } +} + +func TestAddAndFirstN(t *testing.T) { + sb := NewStatsBuffer(5) + + // Add 1. + sb.Add(createStats(1)) + expectSize(t, sb, 1) + expectFirstN(t, sb, []int32{1}) + + // Fill the buffer. + for i := 1; i <= 5; i++ { + expectSize(t, sb, i) + sb.Add(createStats(int32(i))) + } + expectSize(t, sb, 5) + expectFirstN(t, sb, []int32{1, 2, 3, 4, 5}) + + // Add more than is available in the buffer + sb.Add(createStats(6)) + expectSize(t, sb, 5) + expectFirstN(t, sb, []int32{2, 3, 4, 5, 6}) + + // Replace all elements. + for i := 7; i <= 10; i++ { + sb.Add(createStats(int32(i))) + } + expectSize(t, sb, 5) + expectFirstN(t, sb, []int32{6, 7, 8, 9, 10}) +} + +func TestGet(t *testing.T) { + sb := NewStatsBuffer(5) + sb.Add(createStats(1)) + sb.Add(createStats(2)) + sb.Add(createStats(3)) + expectSize(t, sb, 3) + expectFirstN(t, sb, []int32{1, 2, 3}) + + expectElement(t, sb.Get(0), 3) + expectElement(t, sb.Get(1), 2) + expectElement(t, sb.Get(2), 1) +} + +func TestInTimeRange(t *testing.T) { + sb := NewStatsBuffer(5) + assert := assert.New(t) + + var empty time.Time + + // No elements. + assert.Empty(sb.InTimeRange(createTime(0), createTime(5), 10)) + assert.Empty(sb.InTimeRange(createTime(0), empty, 10)) + assert.Empty(sb.InTimeRange(empty, createTime(5), 10)) + assert.Empty(sb.InTimeRange(empty, empty, 10)) + + // One element. + sb.Add(createStats(1)) + expectSize(t, sb, 1) + expectElements(t, sb.InTimeRange(createTime(0), createTime(5), 10), []int32{1}) + expectElements(t, sb.InTimeRange(createTime(1), createTime(5), 10), []int32{1}) + expectElements(t, sb.InTimeRange(createTime(0), createTime(1), 10), []int32{1}) + expectElements(t, sb.InTimeRange(createTime(1), createTime(1), 10), []int32{1}) + assert.Empty(sb.InTimeRange(createTime(2), createTime(5), 10)) + + // Two element. + sb.Add(createStats(2)) + expectSize(t, sb, 2) + expectElements(t, sb.InTimeRange(createTime(0), createTime(5), 10), []int32{1, 2}) + expectElements(t, sb.InTimeRange(createTime(1), createTime(5), 10), []int32{1, 2}) + expectElements(t, sb.InTimeRange(createTime(0), createTime(2), 10), []int32{1, 2}) + expectElements(t, sb.InTimeRange(createTime(1), createTime(2), 10), []int32{1, 2}) + expectElements(t, sb.InTimeRange(createTime(1), createTime(1), 10), []int32{1}) + expectElements(t, sb.InTimeRange(createTime(2), createTime(2), 10), []int32{2}) + assert.Empty(sb.InTimeRange(createTime(3), createTime(5), 10)) + + // Many elements. + sb.Add(createStats(3)) + sb.Add(createStats(4)) + expectSize(t, sb, 4) + expectElements(t, sb.InTimeRange(createTime(0), createTime(5), 10), []int32{1, 2, 3, 4}) + expectElements(t, sb.InTimeRange(createTime(0), createTime(5), 10), []int32{1, 2, 3, 4}) + expectElements(t, sb.InTimeRange(createTime(1), createTime(5), 10), []int32{1, 2, 3, 4}) + expectElements(t, sb.InTimeRange(createTime(0), createTime(4), 10), []int32{1, 2, 3, 4}) + expectElements(t, sb.InTimeRange(createTime(1), createTime(4), 10), []int32{1, 2, 3, 4}) + expectElements(t, sb.InTimeRange(createTime(0), createTime(2), 10), []int32{1, 2}) + expectElements(t, sb.InTimeRange(createTime(1), createTime(2), 10), []int32{1, 2}) + expectElements(t, sb.InTimeRange(createTime(2), createTime(3), 10), []int32{2, 3}) + expectElements(t, sb.InTimeRange(createTime(3), createTime(4), 10), []int32{3, 4}) + expectElements(t, sb.InTimeRange(createTime(3), createTime(5), 10), []int32{3, 4}) + assert.Empty(sb.InTimeRange(createTime(5), createTime(5), 10)) + + // No start time. + expectElements(t, sb.InTimeRange(empty, createTime(5), 10), []int32{1, 2, 3, 4}) + expectElements(t, sb.InTimeRange(empty, createTime(4), 10), []int32{1, 2, 3, 4}) + expectElements(t, sb.InTimeRange(empty, createTime(3), 10), []int32{1, 2, 3}) + expectElements(t, sb.InTimeRange(empty, createTime(2), 10), []int32{1, 2}) + expectElements(t, sb.InTimeRange(empty, createTime(1), 10), []int32{1}) + + // No end time. + expectElements(t, sb.InTimeRange(createTime(0), empty, 10), []int32{1, 2, 3, 4}) + expectElements(t, sb.InTimeRange(createTime(1), empty, 10), []int32{1, 2, 3, 4}) + expectElements(t, sb.InTimeRange(createTime(2), empty, 10), []int32{2, 3, 4}) + expectElements(t, sb.InTimeRange(createTime(3), empty, 10), []int32{3, 4}) + expectElements(t, sb.InTimeRange(createTime(4), empty, 10), []int32{4}) + + // No start or end time. + expectElements(t, sb.InTimeRange(empty, empty, 10), []int32{1, 2, 3, 4}) + + // Start after data. + assert.Empty(sb.InTimeRange(createTime(5), createTime(5), 10)) + assert.Empty(sb.InTimeRange(createTime(5), empty, 10)) + + // End before data. + assert.Empty(sb.InTimeRange(createTime(0), createTime(0), 10)) + assert.Empty(sb.InTimeRange(empty, createTime(0), 10)) +} + +func TestInTimeRangeWithLimit(t *testing.T) { + sb := NewStatsBuffer(5) + sb.Add(createStats(1)) + sb.Add(createStats(2)) + sb.Add(createStats(3)) + sb.Add(createStats(4)) + expectSize(t, sb, 4) + + var empty time.Time + + // Limit cuts off from latest timestamp. + expectElements(t, sb.InTimeRange(empty, empty, 4), []int32{1, 2, 3, 4}) + expectElements(t, sb.InTimeRange(empty, empty, 3), []int32{2, 3, 4}) + expectElements(t, sb.InTimeRange(empty, empty, 2), []int32{3, 4}) + expectElements(t, sb.InTimeRange(empty, empty, 1), []int32{4}) + assert.Empty(t, sb.InTimeRange(empty, empty, 0)) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/storage.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/storage.go new file mode 100644 index 00000000000..3404b9a14e7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/storage.go @@ -0,0 +1,33 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import info "github.com/google/cadvisor/info/v1" + +type StorageDriver interface { + AddStats(ref info.ContainerReference, stats *info.ContainerStats) error + + // Read most recent stats. numStats indicates max number of stats + // returned. The returned stats must be consecutive observed stats. If + // numStats < 0, then return all stats stored in the storage. The + // returned stats should be sorted in time increasing order, i.e. Most + // recent stats should be the last. + RecentStats(containerName string, numStats int) ([]*info.ContainerStats, error) + + // Close will clear the state of the storage driver. The elements + // stored in the underlying storage may or may not be deleted depending + // on the implementation of the storage driver. + Close() error +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/test/mock.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/test/mock.go new file mode 100644 index 00000000000..f29bbdfb903 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/test/mock.go @@ -0,0 +1,43 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + info "github.com/google/cadvisor/info/v1" + "github.com/stretchr/testify/mock" +) + +type MockStorageDriver struct { + mock.Mock + MockCloseMethod bool +} + +func (self *MockStorageDriver) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error { + args := self.Called(ref, stats) + return args.Error(0) +} + +func (self *MockStorageDriver) RecentStats(containerName string, numStats int) ([]*info.ContainerStats, error) { + args := self.Called(containerName, numStats) + return args.Get(0).([]*info.ContainerStats), args.Error(1) +} + +func (self *MockStorageDriver) Close() error { + if self.MockCloseMethod { + args := self.Called() + return args.Error(0) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/test/storagetests.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/test/storagetests.go new file mode 100644 index 00000000000..bbaba0bba2c --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/test/storagetests.go @@ -0,0 +1,272 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "math/rand" + "reflect" + "testing" + "time" + + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/storage" +) + +type TestStorageDriver interface { + StatsEq(a *info.ContainerStats, b *info.ContainerStats) bool + storage.StorageDriver +} + +func buildTrace(cpu, mem []uint64, duration time.Duration) []*info.ContainerStats { + if len(cpu) != len(mem) { + panic("len(cpu) != len(mem)") + } + + ret := make([]*info.ContainerStats, len(cpu)) + currentTime := time.Now() + + var cpuTotalUsage uint64 = 0 + for i, cpuUsage := range cpu { + cpuTotalUsage += cpuUsage + stats := new(info.ContainerStats) + stats.Timestamp = currentTime + currentTime = currentTime.Add(duration) + + stats.Cpu.Usage.Total = cpuTotalUsage + stats.Cpu.Usage.User = stats.Cpu.Usage.Total + stats.Cpu.Usage.System = 0 + stats.Cpu.Usage.PerCpu = []uint64{cpuTotalUsage} + + stats.Memory.Usage = mem[i] + + stats.Network.RxBytes = uint64(rand.Intn(10000)) + stats.Network.RxErrors = uint64(rand.Intn(1000)) + stats.Network.TxBytes = uint64(rand.Intn(100000)) + stats.Network.TxErrors = uint64(rand.Intn(1000)) + + stats.Filesystem = make([]info.FsStats, 1) + stats.Filesystem[0].Device = "/dev/sda1" + stats.Filesystem[0].Limit = 1024000000 + stats.Filesystem[0].Usage = 1024000 + ret[i] = stats + } + return ret +} + +func TimeEq(t1, t2 time.Time, tolerance time.Duration) bool { + // t1 should not be later than t2 + if t1.After(t2) { + t1, t2 = t2, t1 + } + diff := t2.Sub(t1) + if diff <= tolerance { + return true + } + return false +} + +func durationEq(a, b time.Duration, tolerance time.Duration) bool { + if a > b { + a, b = b, a + } + diff := a - b + if diff <= tolerance { + return true + } + return false +} + +const ( + // 10ms, i.e. 0.01s + timePrecision time.Duration = 10 * time.Millisecond +) + +// This function is useful because we do not require precise time +// representation. +func DefaultStatsEq(a, b *info.ContainerStats) bool { + if !TimeEq(a.Timestamp, b.Timestamp, timePrecision) { + return false + } + if !reflect.DeepEqual(a.Cpu, b.Cpu) { + return false + } + if !reflect.DeepEqual(a.Memory, b.Memory) { + return false + } + if !reflect.DeepEqual(a.Network, b.Network) { + return false + } + if !reflect.DeepEqual(a.Filesystem, b.Filesystem) { + return false + } + + return true +} + +// This function will generate random stats and write +// them into the storage. The function will not close the driver +func StorageDriverFillRandomStatsFunc( + containerName string, + N int, + driver TestStorageDriver, + t *testing.T, +) { + cpuTrace := make([]uint64, 0, N) + memTrace := make([]uint64, 0, N) + + // We need N+1 observations to get N samples + for i := 0; i < N+1; i++ { + cpuTrace = append(cpuTrace, uint64(rand.Intn(1000))) + memTrace = append(memTrace, uint64(rand.Intn(1000))) + } + + samplePeriod := 1 * time.Second + + ref := info.ContainerReference{ + Name: containerName, + } + + trace := buildTrace(cpuTrace, memTrace, samplePeriod) + + for _, stats := range trace { + err := driver.AddStats(ref, stats) + if err != nil { + t.Fatalf("unable to add stats: %v", err) + } + } +} + +func StorageDriverTestRetrievePartialRecentStats(driver TestStorageDriver, t *testing.T) { + defer driver.Close() + N := 100 + memTrace := make([]uint64, N) + cpuTrace := make([]uint64, N) + for i := 0; i < N; i++ { + memTrace[i] = uint64(i + 1) + cpuTrace[i] = uint64(1) + } + + ref := info.ContainerReference{ + Name: "container", + } + + trace := buildTrace(cpuTrace, memTrace, 1*time.Second) + + for _, stats := range trace { + driver.AddStats(ref, stats) + } + + recentStats, err := driver.RecentStats(ref.Name, 10) + if err != nil { + t.Fatal(err) + } + if len(recentStats) == 0 { + t.Fatal("should at least store one stats") + } + + if len(recentStats) > 10 { + t.Fatalf("returned %v stats, not 10.", len(recentStats)) + } + + actualRecentStats := trace[len(trace)-len(recentStats):] + + // The returned stats should be sorted in time increasing order + for i, s := range actualRecentStats { + r := recentStats[i] + if !driver.StatsEq(s, r) { + t.Errorf("unexpected stats %+v with memory usage %v; should be %+v", r, r.Memory.Usage, s) + } + } +} + +func StorageDriverTestRetrieveAllRecentStats(driver TestStorageDriver, t *testing.T) { + defer driver.Close() + N := 100 + memTrace := make([]uint64, N) + cpuTrace := make([]uint64, N) + for i := 0; i < N; i++ { + memTrace[i] = uint64(i + 1) + cpuTrace[i] = uint64(1) + } + + ref := info.ContainerReference{ + Name: "container", + } + + trace := buildTrace(cpuTrace, memTrace, 1*time.Second) + + for _, stats := range trace { + driver.AddStats(ref, stats) + } + + recentStats, err := driver.RecentStats(ref.Name, -1) + if err != nil { + t.Fatal(err) + } + if len(recentStats) == 0 { + t.Fatal("should at least store one stats") + } + if len(recentStats) > N { + t.Fatalf("returned %v stats, not %d.", len(recentStats), N) + } + + actualRecentStats := trace[len(trace)-len(recentStats):] + + // The returned stats should be sorted in time increasing order + for i, s := range actualRecentStats { + r := recentStats[i] + if !driver.StatsEq(s, r) { + t.Errorf("unexpected stats %+v with memory usage %v", r, r.Memory.Usage) + } + } +} + +func StorageDriverTestNoRecentStats(driver TestStorageDriver, t *testing.T) { + defer driver.Close() + nonExistContainer := "somerandomecontainer" + stats, _ := driver.RecentStats(nonExistContainer, -1) + if len(stats) > 0 { + t.Errorf("RecentStats() returns %v stats on non exist container", len(stats)) + } +} + +func StorageDriverTestRetrieveZeroRecentStats(driver TestStorageDriver, t *testing.T) { + defer driver.Close() + N := 100 + memTrace := make([]uint64, N) + cpuTrace := make([]uint64, N) + for i := 0; i < N; i++ { + memTrace[i] = uint64(i + 1) + cpuTrace[i] = uint64(1) + } + + ref := info.ContainerReference{ + Name: "container", + } + + trace := buildTrace(cpuTrace, memTrace, 1*time.Second) + + for _, stats := range trace { + driver.AddStats(ref, stats) + } + + recentStats, err := driver.RecentStats(ref.Name, 0) + if err != nil { + t.Fatal(err) + } + if len(recentStats) > 0 { + t.Errorf("RecentStats() returns %v stats when requests for 0 stats", len(recentStats)) + } +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/summary/buffer.go b/Godeps/_workspace/src/github.com/google/cadvisor/summary/buffer.go new file mode 100644 index 00000000000..fd1f87a5c14 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/summary/buffer.go @@ -0,0 +1,74 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package summary + +import ( + info "github.com/google/cadvisor/info/v2" +) + +// Manages a buffer of usage samples. +// This is similar to stats buffer in storage/memory. +// The main difference is that we do not pre-allocate the buffer as most containers +// won't live that long. +type SamplesBuffer struct { + // list of collected samples. + samples []info.Usage + // maximum size this buffer can grow to. + maxSize int + // index for the latest sample. + index int +} + +// Initializes an empty buffer. +func NewSamplesBuffer(size int) *SamplesBuffer { + return &SamplesBuffer{ + index: -1, + maxSize: size, + } +} + +// Returns the current number of samples in the buffer. +func (s *SamplesBuffer) Size() int { + return len(s.samples) +} + +// Add an element to the buffer. Oldest one is overwritten if required. +func (s *SamplesBuffer) Add(stat info.Usage) { + if len(s.samples) < s.maxSize { + s.samples = append(s.samples, stat) + s.index++ + return + } + s.index = (s.index + 1) % s.maxSize + s.samples[s.index] = stat +} + +// Returns pointers to the last 'n' stats. +func (s *SamplesBuffer) RecentStats(n int) []*info.Usage { + if n > len(s.samples) { + n = len(s.samples) + } + start := s.index - (n - 1) + if start < 0 { + start += len(s.samples) + } + + out := make([]*info.Usage, n) + for i := 0; i < n; i++ { + index := (start + i) % len(s.samples) + out[i] = &s.samples[index] + } + return out +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/summary/buffer_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/summary/buffer_test.go new file mode 100644 index 00000000000..357ede7c0a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/summary/buffer_test.go @@ -0,0 +1,118 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package summary + +import ( + "reflect" + "testing" + + info "github.com/google/cadvisor/info/v2" +) + +func createSample(i uint64) info.Usage { + usage := info.Usage{} + usage.PercentComplete = 100 + usage.Cpu = info.Percentiles{ + Present: true, + Mean: i * 50, + Max: i * 100, + Ninety: i * 90, + } + usage.Memory = info.Percentiles{ + Present: true, + Mean: i * 50 * 1024, + Max: i * 100 * 1024, + Ninety: i * 90 * 1024, + } + return usage +} + +func expectSize(t *testing.T, b *SamplesBuffer, expectedSize int) { + if b.Size() != expectedSize { + t.Errorf("Expected size %d, got %d", expectedSize, b.Size()) + } +} + +func expectElements(t *testing.T, b *SamplesBuffer, expected []info.Usage) { + + out := b.RecentStats(b.Size()) + if len(out) != len(expected) { + t.Errorf("Expected %d elements, got %d", len(expected), len(out)) + } + for i, el := range out { + if !reflect.DeepEqual(*el, expected[i]) { + t.Errorf("Expected elements %v, got %v", expected[i], *el) + } + } +} + +func TestEmpty(t *testing.T) { + b := NewSamplesBuffer(5) + expectSize(t, b, 0) + expectElements(t, b, []info.Usage{}) +} + +func TestAddSingleSample(t *testing.T) { + b := NewSamplesBuffer(5) + + sample := createSample(1) + b.Add(sample) + expectSize(t, b, 1) + expectElements(t, b, []info.Usage{sample}) +} + +func TestFullBuffer(t *testing.T) { + maxSize := 5 + b := NewSamplesBuffer(maxSize) + samples := []info.Usage{} + for i := 0; i < maxSize; i++ { + sample := createSample(uint64(i)) + samples = append(samples, sample) + b.Add(sample) + } + expectSize(t, b, maxSize) + expectElements(t, b, samples) +} + +func TestOverflow(t *testing.T) { + maxSize := 5 + overflow := 2 + b := NewSamplesBuffer(maxSize) + samples := []info.Usage{} + for i := 0; i < maxSize+overflow; i++ { + sample := createSample(uint64(i)) + if i >= overflow { + samples = append(samples, sample) + } + b.Add(sample) + } + expectSize(t, b, maxSize) + expectElements(t, b, samples) +} + +func TestReplaceAll(t *testing.T) { + maxSize := 5 + b := NewSamplesBuffer(maxSize) + samples := []info.Usage{} + for i := 0; i < maxSize*2; i++ { + sample := createSample(uint64(i)) + if i >= maxSize { + samples = append(samples, sample) + } + b.Add(sample) + } + expectSize(t, b, maxSize) + expectElements(t, b, samples) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/summary/percentiles.go b/Godeps/_workspace/src/github.com/google/cadvisor/summary/percentiles.go new file mode 100644 index 00000000000..1c7faa0bd4c --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/summary/percentiles.go @@ -0,0 +1,192 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Utility methods to calculate percentiles. + +package summary + +import ( + "fmt" + "math" + "sort" + + "github.com/golang/glog" + info "github.com/google/cadvisor/info/v2" +) + +const secondsToMilliSeconds = 1000 +const milliSecondsToNanoSeconds = 1000000 +const secondsToNanoSeconds = secondsToMilliSeconds * milliSecondsToNanoSeconds + +type uint64Slice []uint64 + +func (a uint64Slice) Len() int { return len(a) } +func (a uint64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a uint64Slice) Less(i, j int) bool { return a[i] < a[j] } + +// Get 90th percentile of the provided samples. Round to integer. +func (self uint64Slice) Get90Percentile() uint64 { + count := self.Len() + if count == 0 { + return 0 + } + sort.Sort(self) + n := float64(0.9 * (float64(count) + 1)) + idx, frac := math.Modf(n) + index := int(idx) + percentile := float64(self[index-1]) + if index > 1 && index < count { + percentile += frac * float64(self[index]-self[index-1]) + } + return uint64(percentile) +} + +type mean struct { + // current count. + count uint64 + // current mean. + Mean float64 +} + +func (self *mean) Add(value uint64) { + self.count++ + if self.count == 1 { + self.Mean = float64(value) + return + } + c := float64(self.count) + v := float64(value) + self.Mean = (self.Mean*(c-1) + v) / c +} + +type resource struct { + // list of samples being tracked. + samples uint64Slice + // average from existing samples. + mean mean + // maximum value seen so far in the added samples. + max uint64 +} + +// Adds a new percentile sample. +func (self *resource) Add(p info.Percentiles) { + if !p.Present { + return + } + if p.Max > self.max { + self.max = p.Max + } + self.mean.Add(p.Mean) + // Selecting 90p of 90p :( + self.samples = append(self.samples, p.Ninety) +} + +// Add a single sample. Internally, we convert it to a fake percentile sample. +func (self *resource) AddSample(val uint64) { + sample := info.Percentiles{ + Present: true, + Mean: val, + Max: val, + Ninety: val, + } + self.Add(sample) +} + +// Get max, average, and 90p from existing samples. +func (self *resource) GetPercentile() info.Percentiles { + p := info.Percentiles{} + p.Mean = uint64(self.mean.Mean) + p.Max = self.max + p.Ninety = self.samples.Get90Percentile() + p.Present = true + return p +} + +func NewResource(size int) *resource { + return &resource{ + samples: make(uint64Slice, 0, size), + mean: mean{count: 0, Mean: 0}, + } +} + +// Return aggregated percentiles from the provided percentile samples. +func GetDerivedPercentiles(stats []*info.Usage) info.Usage { + cpu := NewResource(len(stats)) + memory := NewResource(len(stats)) + for _, stat := range stats { + cpu.Add(stat.Cpu) + memory.Add(stat.Memory) + } + usage := info.Usage{} + usage.Cpu = cpu.GetPercentile() + usage.Memory = memory.GetPercentile() + return usage +} + +// Calculate part of a minute this sample set represent. +func getPercentComplete(stats []*secondSample) (percent int32) { + numSamples := len(stats) + if numSamples > 1 { + percent = 100 + timeRange := stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds() + // allow some slack + if timeRange < 58*secondsToNanoSeconds { + percent = int32((timeRange * 100) / 60 * secondsToNanoSeconds) + } + } + return +} + +// Calculate cpurate from two consecutive total cpu usage samples. +func getCpuRate(latest, previous secondSample) (uint64, error) { + var elapsed int64 + elapsed = latest.Timestamp.Sub(previous.Timestamp).Nanoseconds() + if elapsed < 10*milliSecondsToNanoSeconds { + return 0, fmt.Errorf("elapsed time too small: %d ns: time now %s last %s", elapsed, latest.Timestamp.String(), previous.Timestamp.String()) + } + if latest.Cpu < previous.Cpu { + return 0, fmt.Errorf("bad sample: cumulative cpu usage dropped from %d to %d", latest.Cpu, previous.Cpu) + } + // Cpurate is calculated in cpu-milliseconds per second. + cpuRate := (latest.Cpu - previous.Cpu) * secondsToMilliSeconds / uint64(elapsed) + return cpuRate, nil +} + +// Returns a percentile sample for a minute by aggregating seconds samples. +func GetMinutePercentiles(stats []*secondSample) info.Usage { + lastSample := secondSample{} + cpu := NewResource(len(stats)) + memory := NewResource(len(stats)) + for _, stat := range stats { + if !lastSample.Timestamp.IsZero() { + cpuRate, err := getCpuRate(*stat, lastSample) + if err != nil { + glog.V(3).Infof("Skipping sample, %v", err) + continue + } + glog.V(3).Infof("Adding cpu rate sample : %d", cpuRate) + cpu.AddSample(cpuRate) + memory.AddSample(stat.Memory) + } else { + memory.AddSample(stat.Memory) + } + lastSample = *stat + } + percent := getPercentComplete(stats) + return info.Usage{ + PercentComplete: percent, + Cpu: cpu.GetPercentile(), + Memory: memory.GetPercentile(), + } +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/summary/percentiles_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/summary/percentiles_test.go new file mode 100644 index 00000000000..53b6a29f2c2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/summary/percentiles_test.go @@ -0,0 +1,182 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package summary + +import ( + "testing" + "time" + + info "github.com/google/cadvisor/info/v2" +) + +const Nanosecond = 1000000000 + +func Test90Percentile(t *testing.T) { + N := 100 + stats := make(uint64Slice, 0, N) + for i := N; i > 0; i-- { + stats = append(stats, uint64(i)) + } + p := stats.Get90Percentile() + if p != 90 { + t.Errorf("90th percentile is %d, should be 90.", p) + } + // 90p should be between 94 and 95. Promoted to 95. + N = 105 + for i := 101; i <= N; i++ { + stats = append(stats, uint64(i)) + } + p = stats.Get90Percentile() + if p != 95 { + t.Errorf("90th percentile is %d, should be 95.", p) + } +} + +func TestMean(t *testing.T) { + var i, N uint64 + N = 100 + mean := mean{count: 0, Mean: 0} + for i = 1; i < N; i++ { + mean.Add(i) + } + if mean.Mean != 50.0 { + t.Errorf("Mean is %f, should be 50.0", mean.Mean) + } +} + +func TestAggregates(t *testing.T) { + N := uint64(100) + var i uint64 + ct := time.Now() + stats := make([]*secondSample, 0, N) + for i = 1; i < N; i++ { + s := &secondSample{ + Timestamp: ct.Add(time.Duration(i) * time.Second), + // cpu rate is 1 s/s + Cpu: i * Nanosecond, + // Memory grows by a KB every second. + Memory: i * 1024, + } + stats = append(stats, s) + } + usage := GetMinutePercentiles(stats) + // Cpu mean, max, and 90p should all be 1000 ms/s. + cpuExpected := info.Percentiles{ + Present: true, + Mean: 1000, + Max: 1000, + Ninety: 1000, + } + if usage.Cpu != cpuExpected { + t.Errorf("cpu stats are %+v. Expected %+v", usage.Cpu, cpuExpected) + } + memExpected := info.Percentiles{ + Present: true, + Mean: 50 * 1024, + Max: 99 * 1024, + Ninety: 90 * 1024, + } + if usage.Memory != memExpected { + t.Errorf("memory stats are mean %+v. Expected %+v", usage.Memory, memExpected) + } +} +func TestSamplesCloseInTimeIgnored(t *testing.T) { + N := uint64(100) + var i uint64 + ct := time.Now() + stats := make([]*secondSample, 0, N*2) + for i = 1; i < N; i++ { + s1 := &secondSample{ + Timestamp: ct.Add(time.Duration(i) * time.Second), + // cpu rate is 1 s/s + Cpu: i * Nanosecond, + // Memory grows by a KB every second. + Memory: i * 1024, + } + stats = append(stats, s1) + + // Add another dummy sample too close in time to the last one. + s2 := &secondSample{ + // Add extra millisecond. + Timestamp: ct.Add(time.Duration(i) * time.Second).Add(time.Duration(1) * time.Millisecond), + Cpu: i * 100 * Nanosecond, + Memory: i * 1024 * 1024, + } + stats = append(stats, s2) + } + usage := GetMinutePercentiles(stats) + // Cpu mean, max, and 90p should all be 1000 ms/s. All high-value samples are discarded. + cpuExpected := info.Percentiles{ + Present: true, + Mean: 1000, + Max: 1000, + Ninety: 1000, + } + if usage.Cpu != cpuExpected { + t.Errorf("cpu stats are %+v. Expected %+v", usage.Cpu, cpuExpected) + } + memExpected := info.Percentiles{ + Present: true, + Mean: 50 * 1024, + Max: 99 * 1024, + Ninety: 90 * 1024, + } + if usage.Memory != memExpected { + t.Errorf("memory stats are mean %+v. Expected %+v", usage.Memory, memExpected) + } +} + +func TestDerivedStats(t *testing.T) { + N := uint64(100) + var i uint64 + stats := make([]*info.Usage, 0, N) + for i = 1; i < N; i++ { + s := &info.Usage{ + PercentComplete: 100, + Cpu: info.Percentiles{ + Present: true, + Mean: i * Nanosecond, + Max: i * Nanosecond, + Ninety: i * Nanosecond, + }, + Memory: info.Percentiles{ + Present: true, + Mean: i * 1024, + Max: i * 1024, + Ninety: i * 1024, + }, + } + stats = append(stats, s) + } + usage := GetDerivedPercentiles(stats) + cpuExpected := info.Percentiles{ + Present: true, + Mean: 50 * Nanosecond, + Max: 99 * Nanosecond, + Ninety: 90 * Nanosecond, + } + if usage.Cpu != cpuExpected { + t.Errorf("cpu stats are %+v. Expected %+v", usage.Cpu, cpuExpected) + } + memExpected := info.Percentiles{ + Present: true, + Mean: 50 * 1024, + Max: 99 * 1024, + Ninety: 90 * 1024, + } + if usage.Memory != memExpected { + t.Errorf("memory stats are mean %+v. Expected %+v", usage.Memory, memExpected) + } +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/summary/summary.go b/Godeps/_workspace/src/github.com/google/cadvisor/summary/summary.go new file mode 100644 index 00000000000..4912b9b4083 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/summary/summary.go @@ -0,0 +1,185 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Maintains the summary of aggregated minute, hour, and day stats. +// For a container running for more than a day, amount of tracked data can go up to +// 40 KB when cpu and memory are tracked. We'll start by enabling collection for the +// node, followed by docker, and then all containers as we understand the usage pattern +// better +// TODO(rjnagal): Optimize the size if we start running it for every container. +package summary + +import ( + "fmt" + "sync" + "time" + + "github.com/google/cadvisor/info/v1" + info "github.com/google/cadvisor/info/v2" +) + +// Usage fields we track for generating percentiles. +type secondSample struct { + Timestamp time.Time // time when the sample was recorded. + Cpu uint64 // cpu usage + Memory uint64 // memory usage +} + +type availableResources struct { + Cpu bool + Memory bool +} + +type StatsSummary struct { + // Resources being tracked for this container. + available availableResources + // list of second samples. The list is cleared when a new minute samples is generated. + secondSamples []*secondSample + // minute percentiles. We track 24 * 60 maximum samples. + minuteSamples *SamplesBuffer + // latest derived instant, minute, hour, and day stats. Instant sample updated every second. + // Others updated every minute. + derivedStats info.DerivedStats // Guarded by dataLock. + dataLock sync.RWMutex +} + +// Adds a new seconds sample. +// If enough seconds samples are collected, a minute sample is generated and derived +// stats are updated. +func (s *StatsSummary) AddSample(stat v1.ContainerStats) error { + sample := secondSample{} + sample.Timestamp = stat.Timestamp + if s.available.Cpu { + sample.Cpu = stat.Cpu.Usage.Total + } + if s.available.Memory { + sample.Memory = stat.Memory.WorkingSet + } + s.secondSamples = append(s.secondSamples, &sample) + s.updateLatestUsage() + // TODO(jnagal): Use 'available' to avoid unnecessary computation. + numSamples := len(s.secondSamples) + elapsed := time.Nanosecond + if numSamples > 1 { + start := s.secondSamples[0].Timestamp + end := s.secondSamples[numSamples-1].Timestamp + elapsed = end.Sub(start) + } + if elapsed > 60*time.Second { + // Make a minute sample. This works with dynamic housekeeping as long + // as we keep max dynamic houskeeping period close to a minute. + minuteSample := GetMinutePercentiles(s.secondSamples) + // Clear seconds samples. Keep the latest sample for continuity. + // Copying and resizing helps avoid slice re-allocation. + s.secondSamples[0] = s.secondSamples[numSamples-1] + s.secondSamples = s.secondSamples[:1] + s.minuteSamples.Add(minuteSample) + err := s.updateDerivedStats() + if err != nil { + return err + } + } + return nil +} + +func (s *StatsSummary) updateLatestUsage() { + usage := info.InstantUsage{} + numStats := len(s.secondSamples) + if numStats < 1 { + return + } + latest := s.secondSamples[numStats-1] + usage.Memory = latest.Memory + if numStats > 1 { + previous := s.secondSamples[numStats-2] + cpu, err := getCpuRate(*latest, *previous) + if err == nil { + usage.Cpu = cpu + } + } + + s.dataLock.Lock() + defer s.dataLock.Unlock() + s.derivedStats.LatestUsage = usage + s.derivedStats.Timestamp = latest.Timestamp + return +} + +// Generate new derived stats based on current minute stats samples. +func (s *StatsSummary) updateDerivedStats() error { + derived := info.DerivedStats{} + derived.Timestamp = time.Now() + minuteSamples := s.minuteSamples.RecentStats(1) + if len(minuteSamples) != 1 { + return fmt.Errorf("failed to retrieve minute stats") + } + derived.MinuteUsage = *minuteSamples[0] + hourUsage, err := s.getDerivedUsage(60) + if err != nil { + return fmt.Errorf("failed to compute hour stats: %v", err) + } + dayUsage, err := s.getDerivedUsage(60 * 24) + if err != nil { + return fmt.Errorf("failed to compute day usage: %v", err) + } + derived.HourUsage = hourUsage + derived.DayUsage = dayUsage + + s.dataLock.Lock() + defer s.dataLock.Unlock() + derived.LatestUsage = s.derivedStats.LatestUsage + s.derivedStats = derived + + return nil +} + +// helper method to get hour and daily derived stats +func (s *StatsSummary) getDerivedUsage(n int) (info.Usage, error) { + if n < 1 { + return info.Usage{}, fmt.Errorf("invalid number of samples requested: %d", n) + } + samples := s.minuteSamples.RecentStats(n) + numSamples := len(samples) + if numSamples < 1 { + return info.Usage{}, fmt.Errorf("failed to retrieve any minute stats.") + } + // We generate derived stats even with partial data. + usage := GetDerivedPercentiles(samples) + // Assumes we have equally placed minute samples. + usage.PercentComplete = int32(numSamples * 100 / n) + return usage, nil +} + +// Return the latest calculated derived stats. +func (s *StatsSummary) DerivedStats() (info.DerivedStats, error) { + s.dataLock.RLock() + defer s.dataLock.RUnlock() + + return s.derivedStats, nil +} + +func New(spec v1.ContainerSpec) (*StatsSummary, error) { + summary := StatsSummary{} + if spec.HasCpu { + summary.available.Cpu = true + } + if spec.HasMemory { + summary.available.Memory = true + } + if !summary.available.Cpu && !summary.available.Memory { + return nil, fmt.Errorf("none of the resources are being tracked.") + } + summary.minuteSamples = NewSamplesBuffer(60 /* one hour */) + return &summary, nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/cpuload.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/cpuload.go new file mode 100644 index 00000000000..f1a9ba054f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/cpuload.go @@ -0,0 +1,54 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cpuload + +import ( + "fmt" + + "github.com/golang/glog" + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/utils/cpuload/netlink" + "github.com/google/cadvisor/utils/cpuload/scheddebug" +) + +type CpuLoadReader interface { + // Start the reader. + Start() error + + // Stop the reader and clean up internal state. + Stop() + + // Retrieve Cpu load for a given group. + // name is the full hierarchical name of the container. + // Path is an absolute filesystem path for a container under CPU cgroup hierarchy. + GetCpuLoad(name string, path string) (info.LoadStats, error) +} + +func New() (CpuLoadReader, error) { + // First try to create a scheddebug based load reader. + schedReader, schedErr := scheddebug.New() + if schedErr == nil { + glog.Info("Using a sched debug based load reader") + return schedReader, nil + } + glog.V(1).Infof("failed to create a scheddebug-based cpu load reader: %v", schedErr) + // netlink gives us more data than scheddebug, but it doesn't work inside network namespaces. It also needs to be hierarchical. + reader, err := netlink.New() + if err != nil { + return nil, fmt.Errorf("failed to create any cpu load reader - netlink based (%v), scheddebug based (%v)", err, schedErr) + } + glog.Info("Using a netlink-based load reader") + return reader, nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/netlink/conn.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/netlink/conn.go new file mode 100644 index 00000000000..7eb2204da3f --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/netlink/conn.go @@ -0,0 +1,95 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package netlink + +import ( + "bufio" + "bytes" + "encoding/binary" + "os" + "syscall" +) + +type Connection struct { + // netlink socket + fd int + // cache pid to use in every netlink request. + pid uint32 + // sequence number for netlink messages. + seq uint32 + addr syscall.SockaddrNetlink + rbuf *bufio.Reader +} + +// Create and bind a new netlink socket. +func newConnection() (*Connection, error) { + + fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_DGRAM, syscall.NETLINK_GENERIC) + if err != nil { + return nil, err + } + + conn := new(Connection) + conn.fd = fd + conn.seq = 0 + conn.pid = uint32(os.Getpid()) + conn.addr.Family = syscall.AF_NETLINK + conn.rbuf = bufio.NewReader(conn) + err = syscall.Bind(fd, &conn.addr) + if err != nil { + syscall.Close(fd) + return nil, err + } + return conn, err +} + +func (self *Connection) Read(b []byte) (n int, err error) { + n, _, err = syscall.Recvfrom(self.fd, b, 0) + return n, err +} + +func (self *Connection) Write(b []byte) (n int, err error) { + err = syscall.Sendto(self.fd, b, 0, &self.addr) + return len(b), err +} + +func (self *Connection) Close() error { + return syscall.Close(self.fd) +} + +func (self *Connection) WriteMessage(msg syscall.NetlinkMessage) error { + w := bytes.NewBuffer(nil) + msg.Header.Len = uint32(syscall.NLMSG_HDRLEN + len(msg.Data)) + msg.Header.Seq = self.seq + self.seq++ + msg.Header.Pid = self.pid + binary.Write(w, binary.LittleEndian, msg.Header) + _, err := w.Write(msg.Data) + if err != nil { + return err + } + _, err = self.Write(w.Bytes()) + return err +} + +func (self *Connection) ReadMessage() (msg syscall.NetlinkMessage, err error) { + err = binary.Read(self.rbuf, binary.LittleEndian, &msg.Header) + if err != nil { + return msg, err + } + msg.Data = make([]byte, msg.Header.Len-syscall.NLMSG_HDRLEN) + _, err = self.rbuf.Read(msg.Data) + return msg, err +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/netlink/defs.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/netlink/defs.go new file mode 100644 index 00000000000..a45d8703a8d --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/netlink/defs.go @@ -0,0 +1,26 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package netlink + +/* +#include +*/ +import "C" + +type TaskStats C.struct_taskstats + +const ( + __TASKSTATS_CMD_MAX = C.__TASKSTATS_CMD_MAX +) diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/netlink/example/example.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/netlink/example/example.go new file mode 100644 index 00000000000..42e60862e3c --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/netlink/example/example.go @@ -0,0 +1,40 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/google/cadvisor/utils/cpuload/netlink" +) + +func main() { + n, err := netlink.New() + if err != nil { + log.Printf("Failed to create cpu load util: %s", err) + return + } + defer n.Stop() + + paths := []string{"/sys/fs/cgroup/cpu", "/sys/fs/cgroup/cpu/docker"} + names := []string{"/", "/docker"} + for i, path := range paths { + stats, err := n.GetCpuLoad(names[i], path) + if err != nil { + log.Printf("Error getting cpu load for %q: %s", path, err) + } + log.Printf("Task load for %s: %+v", path, stats) + } +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/netlink/netlink.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/netlink/netlink.go new file mode 100644 index 00000000000..7ca05f361eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/netlink/netlink.go @@ -0,0 +1,241 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package netlink + +import ( + "bytes" + "encoding/binary" + "fmt" + "syscall" + + info "github.com/google/cadvisor/info/v1" +) + +const ( + // Kernel constants for tasks stats. + genlIdCtrl = syscall.NLMSG_MIN_TYPE // GENL_ID_CTRL + taskstatsGenlName = "TASKSTATS" // TASKSTATS_GENL_NAME + cgroupStatsCmdAttrFd = 0x1 // CGROUPSTATS_CMD_ATTR_FD + ctrlAttrFamilyId = 0x1 // CTRL_ATTR_FAMILY_ID + ctrlAttrFamilyName = 0x2 // CTRL_ATTR_FAMILY_NAME + ctrlCmdGetFamily = 0x3 // CTRL_CMD_GETFAMILY +) + +var ( + // TODO(rjnagal): Verify and fix for other architectures. + Endian = binary.LittleEndian +) + +type genMsghdr struct { + Command uint8 + Version uint8 + Reserved uint16 +} + +type netlinkMessage struct { + Header syscall.NlMsghdr + GenHeader genMsghdr + Data []byte +} + +func (self netlinkMessage) toRawMsg() (rawmsg syscall.NetlinkMessage) { + rawmsg.Header = self.Header + w := bytes.NewBuffer([]byte{}) + binary.Write(w, Endian, self.GenHeader) + w.Write(self.Data) + rawmsg.Data = w.Bytes() + return rawmsg +} + +type loadStatsResp struct { + Header syscall.NlMsghdr + GenHeader genMsghdr + Stats info.LoadStats +} + +// Return required padding to align 'size' to 'alignment'. +func padding(size int, alignment int) int { + unalignedPart := size % alignment + return (alignment - unalignedPart) % alignment +} + +// Get family id for taskstats subsystem. +func getFamilyId(conn *Connection) (uint16, error) { + msg := prepareFamilyMessage() + conn.WriteMessage(msg.toRawMsg()) + + resp, err := conn.ReadMessage() + if err != nil { + return 0, err + } + id, err := parseFamilyResp(resp) + if err != nil { + return 0, err + } + return id, nil +} + +// Append an attribute to the message. +// Adds attribute info (length and type), followed by the data and necessary padding. +// Can be called multiple times to add attributes. Only fixed size and string type +// attributes are handled. We don't need nested attributes for task stats. +func addAttribute(buf *bytes.Buffer, attrType uint16, data interface{}, dataSize int) { + attr := syscall.RtAttr{ + Len: syscall.SizeofRtAttr, + Type: attrType, + } + attr.Len += uint16(dataSize) + binary.Write(buf, Endian, attr) + switch data := data.(type) { + case string: + binary.Write(buf, Endian, []byte(data)) + buf.WriteByte(0) // terminate + default: + binary.Write(buf, Endian, data) + } + for i := 0; i < padding(int(attr.Len), syscall.NLMSG_ALIGNTO); i++ { + buf.WriteByte(0) + } +} + +// Prepares the message and generic headers and appends attributes as data. +func prepareMessage(headerType uint16, cmd uint8, attributes []byte) (msg netlinkMessage) { + msg.Header.Type = headerType + msg.Header.Flags = syscall.NLM_F_REQUEST + msg.GenHeader.Command = cmd + msg.GenHeader.Version = 0x1 + msg.Data = attributes + return msg +} + +// Prepares message to query family id for task stats. +func prepareFamilyMessage() (msg netlinkMessage) { + buf := bytes.NewBuffer([]byte{}) + addAttribute(buf, ctrlAttrFamilyName, taskstatsGenlName, len(taskstatsGenlName)+1) + return prepareMessage(genlIdCtrl, ctrlCmdGetFamily, buf.Bytes()) +} + +// Prepares message to query task stats for a task group. +func prepareCmdMessage(id uint16, cfd uintptr) (msg netlinkMessage) { + buf := bytes.NewBuffer([]byte{}) + addAttribute(buf, cgroupStatsCmdAttrFd, uint32(cfd), 4) + return prepareMessage(id, __TASKSTATS_CMD_MAX+1, buf.Bytes()) +} + +// Extracts returned family id from the response. +func parseFamilyResp(msg syscall.NetlinkMessage) (uint16, error) { + m := new(netlinkMessage) + m.Header = msg.Header + err := verifyHeader(msg) + if err != nil { + return 0, err + } + buf := bytes.NewBuffer(msg.Data) + // extract generic header from data. + err = binary.Read(buf, Endian, &m.GenHeader) + if err != nil { + return 0, err + } + id := uint16(0) + // Extract attributes. kernel reports family name, id, version, etc. + // Scan till we find id. + for buf.Len() > syscall.SizeofRtAttr { + var attr syscall.RtAttr + err = binary.Read(buf, Endian, &attr) + if err != nil { + return 0, err + } + if attr.Type == ctrlAttrFamilyId { + err = binary.Read(buf, Endian, &id) + if err != nil { + return 0, err + } + return id, nil + } + payload := int(attr.Len) - syscall.SizeofRtAttr + skipLen := payload + padding(payload, syscall.SizeofRtAttr) + name := make([]byte, skipLen) + err = binary.Read(buf, Endian, name) + if err != nil { + return 0, err + } + } + return 0, fmt.Errorf("family id not found in the response.") +} + +// Extract task stats from response returned by kernel. +func parseLoadStatsResp(msg syscall.NetlinkMessage) (*loadStatsResp, error) { + m := new(loadStatsResp) + m.Header = msg.Header + err := verifyHeader(msg) + if err != nil { + return m, err + } + buf := bytes.NewBuffer(msg.Data) + // Scan the general header. + err = binary.Read(buf, Endian, &m.GenHeader) + if err != nil { + return m, err + } + // cgroup stats response should have just one attribute. + // Read it directly into the stats structure. + var attr syscall.RtAttr + err = binary.Read(buf, Endian, &attr) + if err != nil { + return m, err + } + err = binary.Read(buf, Endian, &m.Stats) + if err != nil { + return m, err + } + return m, err +} + +// Verify and return any error reported by kernel. +func verifyHeader(msg syscall.NetlinkMessage) error { + switch msg.Header.Type { + case syscall.NLMSG_DONE: + return fmt.Errorf("expected a response, got nil") + case syscall.NLMSG_ERROR: + buf := bytes.NewBuffer(msg.Data) + var errno int32 + binary.Read(buf, Endian, errno) + return fmt.Errorf("netlink request failed with error %s", syscall.Errno(-errno)) + } + return nil +} + +// Get load stats for a task group. +// id: family id for taskstats. +// fd: fd to path to the cgroup directory under cpu hierarchy. +// conn: open netlink connection used to communicate with kernel. +func getLoadStats(id uint16, fd uintptr, conn *Connection) (info.LoadStats, error) { + msg := prepareCmdMessage(id, fd) + err := conn.WriteMessage(msg.toRawMsg()) + if err != nil { + return info.LoadStats{}, err + } + + resp, err := conn.ReadMessage() + if err != nil { + return info.LoadStats{}, err + } + + parsedmsg, err := parseLoadStatsResp(resp) + if err != nil { + return info.LoadStats{}, err + } + return parsedmsg.Stats, nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/netlink/reader.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/netlink/reader.go new file mode 100644 index 00000000000..89f1d8455a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/netlink/reader.go @@ -0,0 +1,78 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package netlink + +import ( + "fmt" + "os" + + "github.com/golang/glog" + info "github.com/google/cadvisor/info/v1" +) + +type NetlinkReader struct { + familyId uint16 + conn *Connection +} + +func New() (*NetlinkReader, error) { + conn, err := newConnection() + if err != nil { + return nil, fmt.Errorf("failed to create a new connection: %s", err) + } + + id, err := getFamilyId(conn) + if err != nil { + return nil, fmt.Errorf("failed to get netlink family id for task stats: %s", err) + } + glog.V(2).Infof("Family id for taskstats: %d", id) + return &NetlinkReader{ + familyId: id, + conn: conn, + }, nil +} + +func (self *NetlinkReader) Stop() { + if self.conn != nil { + self.conn.Close() + } +} + +func (self *NetlinkReader) Start() error { + // We do the start setup for netlink in New(). Nothing to do here. + return nil +} + +// Returns instantaneous number of running tasks in a group. +// Caller can use historical data to calculate cpu load. +// path is an absolute filesystem path for a container under the CPU cgroup hierarchy. +// NOTE: non-hierarchical load is returned. It does not include load for subcontainers. +func (self *NetlinkReader) GetCpuLoad(name string, path string) (info.LoadStats, error) { + if len(path) == 0 { + return info.LoadStats{}, fmt.Errorf("cgroup path can not be empty!") + } + + cfd, err := os.Open(path) + if err != nil { + return info.LoadStats{}, fmt.Errorf("failed to open cgroup path %s: %q", path, err) + } + + stats, err := getLoadStats(self.familyId, cfd.Fd(), self.conn) + if err != nil { + return info.LoadStats{}, err + } + glog.V(3).Infof("Task stats for %q: %+v", path, stats) + return stats, nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/scheddebug/scheddebug.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/scheddebug/scheddebug.go new file mode 100644 index 00000000000..280578840a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cpuload/scheddebug/scheddebug.go @@ -0,0 +1,231 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheddebug + +import ( + "fmt" + "io/ioutil" + "path" + "regexp" + "sort" + "strconv" + "sync" + "time" + + "github.com/golang/glog" + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/utils" +) + +const ( + schedDebugPath = "/proc/sched_debug" +) + +var ( + // Scans cpu number, task group name, and number of running threads. + // TODO(rjnagal): cpu number is only used for debug. Remove it later. + schedRegExp = regexp.MustCompile(`cfs_rq\[([0-9]+)\]:(.*)\n(?:.*\n)*?.*nr_running.*: ([0-9]+)`) + selfCgroupRegExp = regexp.MustCompile(`cpu.*:(.*)\n`) + procLoadAvgRegExp = regexp.MustCompile(` ([0-9]+)/`) + pollInterval = 1 * time.Second +) + +type SchedReader struct { + quitChan chan error // Used to cleanly shutdown housekeeping. + lastErrorTime time.Time // Limit errors to one per minute. + selfCgroup string // Cgroup that cAdvisor is running under. + dataLock sync.RWMutex + load map[string]int // Load per container. Guarded by dataLock. +} + +func (self *SchedReader) Start() error { + self.quitChan = make(chan error) + self.refresh() + go self.housekeep() + return nil +} + +func (self *SchedReader) Stop() { + self.quitChan <- nil + err := <-self.quitChan + if err != nil { + glog.Warning("Failed to stop scheddebug load reader: %s", err) + } +} + +// Since load housekeeping and normal container housekeeping runs at the same rate, +// there is a chance of sometimes picking the last cycle's data. We can solve that by +// calling this housekeeping from globalhousekeeping if its an issue. +func (self *SchedReader) housekeep() { + // We start all housekeeping threads around the same time. + // Phase shift load reader thread so it does not poll all housekeeping threads whenever it wakes up. + time.Sleep(500 * time.Millisecond) + ticker := time.Tick(pollInterval) + for { + select { + case <-ticker: + self.refresh() + case <-self.quitChan: + self.quitChan <- nil + glog.Infof("Exiting housekeeping") + return + } + } +} + +func (self *SchedReader) refresh() { + out, err := ioutil.ReadFile(schedDebugPath) + if err != nil { + if self.allowErrorLogging() { + glog.Warningf("Error reading sched debug file %v: %v", schedDebugPath, err) + } + return + } + load := make(map[string]int) + matches := schedRegExp.FindAllSubmatch(out, -1) + for _, matchSlice := range matches { + if len(matchSlice) != 4 { + if self.allowErrorLogging() { + glog.Warningf("Malformed sched debug entry: %v", matchSlice) + } + continue + } + cpu := string(matchSlice[1]) + cgroup := string(matchSlice[2]) + n := string(matchSlice[3]) + numRunning, err := strconv.ParseInt(n, 10, 64) + if err != nil { + if self.allowErrorLogging() { + glog.Warningf("Could not parse running tasks from: %q", n) + } + continue + } + glog.V(3).Infof("Load for %q on cpu %s: %d", cgroup, cpu, numRunning) + if numRunning == 0 { + continue + } + load[cgroup] += int(numRunning) + // detect task group entry from parent's runnable count. + if cgroup != "/" { + parent := getParent(cgroup) + load[parent] -= 1 + } + } + glog.V(3).Infof("New non-hierarchical loads : %+v", load) + // sort the keys and update parents in order. + var cgroups sort.StringSlice + for c := range load { + cgroups = append(cgroups, c) + } + sort.Sort(sort.Reverse(cgroups[:])) + for _, c := range cgroups { + // Add this task groups' processes to its parent. + if c != "/" { + parent := getParent(c) + load[parent] += load[c] + } + // Sometimes we catch a sched dump in middle of an update. + // TODO(rjnagal): Look into why the task hierarchy isn't fully filled sometimes. + if load[c] < 0 { + load[c] = 0 + } + } + // Take off this cAdvisor thread from load calculation. + if self.selfCgroup != "" && load[self.selfCgroup] >= 1 { + load[self.selfCgroup] -= 1 + // Deduct from all parents. + p := self.selfCgroup + for p != "/" { + p = getParent(p) + if load[p] >= 1 { + load[p] -= 1 + } + } + } + glog.V(3).Infof("Derived task group loads : %+v", load) + rootLoad, err := getRootLoad() + if err != nil { + glog.Infof("failed to get root load: %v", err) + } + load["/"] = int(rootLoad) + self.dataLock.Lock() + defer self.dataLock.Unlock() + self.load = load +} + +func (self *SchedReader) GetCpuLoad(name string, path string) (stats info.LoadStats, err error) { + self.dataLock.RLock() + defer self.dataLock.RUnlock() + stats.NrRunning = uint64(self.load[name]) + return stats, nil +} + +func (self *SchedReader) allowErrorLogging() bool { + if time.Since(self.lastErrorTime) > time.Minute { + self.lastErrorTime = time.Now() + return true + } + return false +} + +func getSelfCgroup() (string, error) { + out, err := ioutil.ReadFile("/proc/self/cgroup") + if err != nil { + return "", fmt.Errorf("failed to read cgroup path for cAdvisor: %v", err) + } + matches := selfCgroupRegExp.FindSubmatch(out) + if len(matches) != 2 { + return "", fmt.Errorf("could not find cpu cgroup path in %q", string(out)) + } + return string(matches[1]), nil +} + +func getRootLoad() (int64, error) { + loadFile := "/proc/loadavg" + out, err := ioutil.ReadFile(loadFile) + if err != nil { + return -1, fmt.Errorf("failed to get load from %q: %v", loadFile, err) + } + matches := procLoadAvgRegExp.FindSubmatch(out) + if len(matches) != 2 { + return -1, fmt.Errorf("could not find cpu load in %q", string(out)) + } + numRunning, err := strconv.ParseInt(string(matches[1]), 10, 64) + if err != nil { + return -1, fmt.Errorf("could not parse number of running processes from %q: %v", matches[1], err) + } + numRunning -= 1 + return numRunning, nil +} + +// Return parent cgroup name given an absolute cgroup name. +func getParent(c string) string { + parent := path.Dir(c) + if parent == "." { + parent = "/" + } + return parent +} + +func New() (*SchedReader, error) { + if !utils.FileExists(schedDebugPath) { + return nil, fmt.Errorf("sched debug file %q not accessible", schedDebugPath) + } + selfCgroup, err := getSelfCgroup() + if err != nil { + glog.Infof("failed to get cgroup for cadvisor: %v", err) + } + return &SchedReader{selfCgroup: selfCgroup}, nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/fs/fs.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/fs/fs.go new file mode 100644 index 00000000000..d5999a9b30b --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/fs/fs.go @@ -0,0 +1,44 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "io" + "os" +) + +type osFS struct{} + +func (osFS) Open(name string) (File, error) { return os.Open(name) } +func (osFS) Stat(name string) (os.FileInfo, error) { return os.Stat(name) } + +var fs FileSystem = osFS{} + +type FileSystem interface { + Open(name string) (File, error) +} + +type File interface { + io.ReadWriteCloser +} + +// Useful for tests. Not thread safe. +func ChangeFileSystem(filesystem FileSystem) { + fs = filesystem +} + +func Open(name string) (File, error) { + return fs.Open(name) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/fs/mockfs/fakefile.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/fs/mockfs/fakefile.go new file mode 100644 index 00000000000..77a3f48734b --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/fs/mockfs/fakefile.go @@ -0,0 +1,35 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mockfs + +import "bytes" + +type FakeFile struct { + bytes.Buffer + Name string +} + +func (self *FakeFile) Close() error { + return nil +} + +func AddTextFile(mockfs *MockFileSystem, name, content string) *FakeFile { + f := &FakeFile{ + Name: name, + Buffer: *bytes.NewBufferString(content), + } + mockfs.EXPECT().Open(name).Return(f, nil).AnyTimes() + return f +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/fs/mockfs/mockfs.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/fs/mockfs/mockfs.go new file mode 100644 index 00000000000..93f08a686e4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/fs/mockfs/mockfs.go @@ -0,0 +1,55 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Automatically generated by MockGen. DO NOT EDIT! +// Source: github.com/google/cadvisor/utils/fs (interfaces: FileSystem) + +package mockfs + +import ( + gomock "code.google.com/p/gomock/gomock" + fs "github.com/google/cadvisor/utils/fs" +) + +// Mock of FileSystem interface +type MockFileSystem struct { + ctrl *gomock.Controller + recorder *_MockFileSystemRecorder +} + +// Recorder for MockFileSystem (not exported) +type _MockFileSystemRecorder struct { + mock *MockFileSystem +} + +func NewMockFileSystem(ctrl *gomock.Controller) *MockFileSystem { + mock := &MockFileSystem{ctrl: ctrl} + mock.recorder = &_MockFileSystemRecorder{mock} + return mock +} + +func (_m *MockFileSystem) EXPECT() *_MockFileSystemRecorder { + return _m.recorder +} + +func (_m *MockFileSystem) Open(_param0 string) (fs.File, error) { + ret := _m.ctrl.Call(_m, "Open", _param0) + ret0, _ := ret[0].(fs.File) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockFileSystemRecorder) Open(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Open", arg0) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/oomparser/containerOomExampleLog.txt b/Godeps/_workspace/src/github.com/google/cadvisor/utils/oomparser/containerOomExampleLog.txt new file mode 100644 index 00000000000..be6632e6417 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/oomparser/containerOomExampleLog.txt @@ -0,0 +1,44 @@ +Jan 5 15:19:01 CRON[14500]: (root) CMD (touch /var/run/crond.sittercheck) +Jan 5 15:19:04 cookie_monster[1249]: uid 0, pid 14504, "/var/lib/certs/machine_cert.crt" accessed by exe "/usr/bin/nsscacheclient", cwd "/root", comm "/usr/bin/nsscacheclient" +Jan 5 15:19:04 cookie_monster[1249]: uid 0, pid 14504, "/var/lib/certs/machine_cert.key" accessed by exe "/usr/bin/nsscacheclient", cwd "/root", comm "/usr/bin/nsscacheclient" +Jan 5 15:19:05 nsscacheclient[14504]: SUCCESS: Completed run (v29/c20 rtime:0.334299 utime:0.136923 stime:0.011736 maxrss:5260k dials:1 sent:1793 rcvd:5143). +Jan 5 15:19:27 kernel: [ 5864.708440] memorymonster invoked oom-killer: gfp_mask=0xd0, order=0, oom_score_adj=0 +Jan 5 15:19:27 kernel: [ 5864.708443] memorymonster cpuset=/ mems_allowed=0 +Jan 5 15:19:27 kernel: [ 5864.708446] CPU: 5 PID: 13536 Comm: memorymonster Tainted: P OX 3.13.0-43-generic #72-Ubuntu +Jan 5 15:19:27 kernel: [ 5864.708447] Hardware name: Hewlett-Packard HP Z420 Workstation/1589, BIOS J61 v03.65 12/19/2013 +Jan 5 15:19:27 kernel: [ 5864.708448] ffff88072ae10800 ffff8807a4835c48 ffffffff81720bf6 ffff8807a8e86000 +Jan 5 15:19:27 kernel: [ 5864.708451] ffff8807a4835cd0 ffffffff8171b4b1 0000000000000246 ffff88072ae10800 +Jan 5 15:19:27 kernel: [ 5864.708453] ffff8807a4835c90 ffff8807a4835ca0 ffffffff811522a7 0000000000000001 +Jan 5 15:19:27 kernel: [ 5864.708455] Call Trace: +Jan 5 15:19:27 kernel: [ 5864.708460] [] dump_stack+0x45/0x56 +Jan 5 15:19:27 kernel: [ 5864.708463] [] dump_header+0x7f/0x1f1 +Jan 5 15:19:27 kernel: [ 5864.708465] [] ? find_lock_task_mm+0x27/0x70 +Jan 5 15:19:27 kernel: [ 5864.708467] [] oom_kill_process+0x1ce/0x330 +Jan 5 15:19:27 kernel: [ 5864.708470] [] ? security_capable_noaudit+0x15/0x20 +Jan 5 15:19:27 kernel: [ 5864.708474] [] mem_cgroup_oom_synchronize+0x51c/0x560 +Jan 5 15:19:27 kernel: [ 5864.708476] [] ? mem_cgroup_charge_common+0xa0/0xa0 +Jan 5 15:19:27 kernel: [ 5864.708478] [] pagefault_out_of_memory+0x14/0x80 +Jan 5 15:19:27 kernel: [ 5864.708480] [] mm_fault_error+0x8e/0x180 +Jan 5 15:19:27 kernel: [ 5864.708482] [] __do_page_fault+0x4a1/0x560 +Jan 5 15:19:27 kernel: [ 5864.708485] [] ? set_next_entity+0x95/0xb0 +Jan 5 15:19:27 kernel: [ 5864.708489] [] ? __switch_to+0x169/0x4c0 +Jan 5 15:19:27 kernel: [ 5864.708490] [] do_page_fault+0x1a/0x70 +Jan 5 15:19:27 kernel: [ 5864.708492] [] page_fault+0x28/0x30 +Jan 5 15:19:27 kernel: [ 5864.708493] Task in /mem2 killed as a result of limit of /mem2 +Jan 5 15:19:27 kernel: [ 5864.708495] memory: usage 980kB, limit 980kB, failcnt 4152239 +Jan 5 15:19:27 kernel: [ 5864.708495] memory+swap: usage 0kB, limit 18014398509481983kB, failcnt 0 +Jan 5 15:19:27 kernel: [ 5864.708496] kmem: usage 0kB, limit 18014398509481983kB, failcnt 0 +Jan 5 15:19:27 kernel: [ 5864.708497] Memory cgroup stats for /mem2: cache:0KB rss:980KB rss_huge:0KB mapped_file:0KB writeback:20KB inactive_anon:560KB active_anon:420KB inactive_file:0KB active_file:0KB unevictable:0KB +Jan 5 15:19:27 kernel: [ 5864.708505] [ pid ] uid tgid total_vm rss nr_ptes swapents oom_score_adj name +Jan 5 15:19:27 kernel: [ 5864.708600] [13536] 275858 13536 8389663 343 16267 8324326 0 memorymonster +Jan 5 15:19:27 kernel: [ 5864.708607] Memory cgroup out of memory: Kill process 13536 (memorymonster) score 996 or sacrifice child +Jan 5 15:19:27 kernel: [ 5864.708608] Killed process 13536 (memorymonster) total-vm:33558652kB, anon-rss:920kB, file-rss:452kB +Jan 5 15:20:01 CRON[14608]: (root) CMD (touch /var/run/crond.sittercheck) +Jan 5 15:20:01 CRON[14609]: (root) CMD (/usr/bin/alarm 6000 /usr/share/update-notifier/reevaluate.py) +Jan 5 15:20:01 CRON[14610]: (root) CMD (/usr/bin/corp_cronwrap -j 80 -t 600 -A -K -L -l 'nsscache-client' /usr/bin/nsscacheclient all) +Jan 5 15:20:01 /usr/bin/lock: called by /bin/bash for . uid 0, euid 0. +Jan 5 15:21:01 CRON[14639]: (root) CMD (touch /var/run/crond.sittercheck) +Jan 5 15:21:05 cookie_monster[1249]: uid 0, pid 14643, "/var/lib/certs/machine_cert.crt" accessed by exe "/usr/bin/nsscacheclient", cwd "/root", comm "/usr/bin/nsscacheclient" +Jan 5 15:21:05 cookie_monster[1249]: uid 0, pid 14643, "/var/lib/certs/machine_cert.key" accessed by exe "/usr/bin/nsscacheclient", cwd "/root", comm "/usr/bin/nsscacheclient" +Jan 5 15:21:05 nsscacheclient[14643]: auto.auto(no change) time:0.042264697000000004 retries:0 +Jan 5 15:21:05 nsscacheclient[14643]: auto.home(63c07d09->8686499b write:3631382) time:0.318774602 retries:0 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/oomparser/oominfo/main.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/oomparser/oominfo/main.go new file mode 100644 index 00000000000..208d9d9067f --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/oomparser/oominfo/main.go @@ -0,0 +1,44 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "flag" + + "github.com/golang/glog" + "github.com/google/cadvisor/utils/oomparser" +) + +// demonstrates how to run oomparser.OomParser to get OomInstance information +func main() { + flag.Parse() + // out is a user-provided channel from which the user can read incoming + // OomInstance objects + outStream := make(chan *oomparser.OomInstance) + oomLog, err := oomparser.New() + if err != nil { + glog.Infof("Couldn't make a new oomparser. %v", err) + } else { + err := oomLog.StreamOoms(outStream) + if err != nil { + glog.Errorf("%v", err) + } + // demonstration of how to get oomLog's list of oomInstances or access + // the user-declared oomInstance channel, here called outStream + for oomInstance := range outStream { + glog.Infof("Reading the buffer. Output is %v", oomInstance) + } + } +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/oomparser/oomparser.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/oomparser/oomparser.go new file mode 100644 index 00000000000..54835915552 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/oomparser/oomparser.go @@ -0,0 +1,180 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oomparser + +import ( + "bufio" + "fmt" + "io" + "os" + "path" + "regexp" + "strconv" + "time" + + "github.com/golang/glog" + "github.com/google/cadvisor/utils" +) + +var containerRegexp *regexp.Regexp = regexp.MustCompile( + `Task in (.*) killed as a result of limit of `) +var lastLineRegexp *regexp.Regexp = regexp.MustCompile( + `(^[A-Z]{1}[a-z]{2} .*[0-9]{1,2} [0-9]{1,2}:[0-9]{2}:[0-9]{2}) .* Killed process ([0-9]+) \(([0-9A-Za-z_]+)\)`) +var firstLineRegexp *regexp.Regexp = regexp.MustCompile( + `invoked oom-killer:`) + +// struct to hold file from which we obtain OomInstances +type OomParser struct { + systemFile string +} + +// struct that contains information related to an OOM kill instance +type OomInstance struct { + // process id of the killed process + Pid int + // the name of the killed process + ProcessName string + // the time that the process was reported to be killed, + // accurate to the minute + TimeOfDeath time.Time + // the absolute name of the container that OOMed + ContainerName string +} + +// gets the container name from a line and adds it to the oomInstance. +func getContainerName(line string, currentOomInstance *OomInstance) error { + parsedLine := containerRegexp.FindStringSubmatch(line) + if parsedLine == nil { + return nil + } + currentOomInstance.ContainerName = path.Join("/", parsedLine[1]) + return nil +} + +// gets the pid, name, and date from a line and adds it to oomInstance +func getProcessNamePid(line string, currentOomInstance *OomInstance) (bool, error) { + reList := lastLineRegexp.FindStringSubmatch(line) + if reList == nil { + return false, nil + } + const longForm = "Jan _2 15:04:05 2006" + stringYear := strconv.Itoa(time.Now().Year()) + linetime, err := time.Parse(longForm, reList[1]+" "+stringYear) + if err != nil { + return false, err + } + currentOomInstance.TimeOfDeath = linetime + if err != nil { + return false, err + } + pid, err := strconv.Atoi(reList[2]) + if err != nil { + return false, err + } + currentOomInstance.Pid = pid + currentOomInstance.ProcessName = reList[3] + return true, nil +} + +// uses regex to see if line is the start of a kernel oom log +func checkIfStartOfOomMessages(line string) bool { + potential_oom_start := firstLineRegexp.MatchString(line) + if potential_oom_start { + return true + } + return false +} + +// opens a reader to grab new messages from the Reader object called outPipe +// opened in PopulateOomInformation. It reads line by line splitting on +// the "\n" character. Checks if line might be start or end of an oom message +// log. Then the +// lines are checked against a regexp to check for the pid, process name, etc. +// At the end of an oom message group, AnalyzeLines adds the new oomInstance to +// oomLog +func (self *OomParser) analyzeLines(ioreader *bufio.Reader, outStream chan *OomInstance) { + var line string + var err error + for true { + for line, err = ioreader.ReadString('\n'); err != nil && err == io.EOF; { + time.Sleep(100 * time.Millisecond) + } + in_oom_kernel_log := checkIfStartOfOomMessages(line) + if in_oom_kernel_log { + oomCurrentInstance := &OomInstance{ + ContainerName: "/", + } + finished := false + for err == nil && !finished { + err = getContainerName(line, oomCurrentInstance) + if err != nil { + glog.Errorf("%v", err) + } + finished, err = getProcessNamePid(line, oomCurrentInstance) + if err != nil { + glog.Errorf("%v", err) + } + line, err = ioreader.ReadString('\n') + } + in_oom_kernel_log = false + outStream <- oomCurrentInstance + } + } +} + +// looks for system files that contain kernel messages and if one is found, sets +// the systemFile attribute of the OomParser object +func getSystemFile() (string, error) { + const varLogMessages = "/var/log/messages" + const varLogSyslog = "/var/log/syslog" + if utils.FileExists(varLogMessages) { + return varLogMessages, nil + } else if utils.FileExists(varLogSyslog) { + return varLogSyslog, nil + } + return "", fmt.Errorf("neither %s nor %s exists from which to read kernel errors", varLogMessages, varLogSyslog) +} + +// calls a go routine that populates self.OomInstances and fills the argument +// channel with OomInstance objects as they are read from the file. +// opens the OomParser's systemFile which was set in getSystemFile +// to look for OOM messages by calling AnalyzeLines. Takes in the argument +// outStream, which is passed in by the user and passed to AnalyzeLines. +// OomInstance objects are added to outStream when they are found by +// AnalyzeLines +func (self *OomParser) StreamOoms(outStream chan *OomInstance) error { + file, err := os.Open(self.systemFile) + if err != nil { + return err + } + ioreader := bufio.NewReader(file) + + // Process the events received from the kernel. + go func() { + self.analyzeLines(ioreader, outStream) + }() + return nil +} + +// initializes an OomParser object and calls getSystemFile to set the systemFile +// attribute. Returns and OomParser object and an error +func New() (*OomParser, error) { + systemFileName, err := getSystemFile() + if err != nil { + return nil, err + } + return &OomParser{ + systemFile: systemFileName}, nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/oomparser/oomparser_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/oomparser/oomparser_test.go new file mode 100644 index 00000000000..93835861156 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/oomparser/oomparser_test.go @@ -0,0 +1,198 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oomparser + +import ( + "bufio" + "os" + "testing" + "time" +) + +const startLine = "Jan 21 22:01:49 localhost kernel: [62278.816267] ruby invoked oom-killer: gfp_mask=0x201da, order=0, oom_score_adj=0" +const endLine = "Jan 21 22:01:49 localhost kernel: [62279.421192] Killed process 19667 (evilprogram2) total-vm:1460016kB, anon-rss:1414008kB, file-rss:4kB" +const containerLine = "Jan 26 14:10:07 kateknister0.mtv.corp.google.com kernel: [1814368.465205] Task in /mem2 killed as a result of limit of /mem2" +const containerLogFile = "containerOomExampleLog.txt" +const systemLogFile = "systemOomExampleLog.txt" + +func createExpectedContainerOomInstance(t *testing.T) *OomInstance { + const longForm = "Jan _2 15:04:05 2006" + deathTime, err := time.Parse(longForm, "Jan 5 15:19:27 2015") + if err != nil { + t.Fatalf("could not parse expected time when creating expected container oom instance. Had error %v", err) + return nil + } + return &OomInstance{ + Pid: 13536, + ProcessName: "memorymonster", + TimeOfDeath: deathTime, + ContainerName: "/mem2", + } +} + +func createExpectedSystemOomInstance(t *testing.T) *OomInstance { + const longForm = "Jan _2 15:04:05 2006" + deathTime, err := time.Parse(longForm, "Jan 28 19:58:45 2015") + if err != nil { + t.Fatalf("could not parse expected time when creating expected system oom instance. Had error %v", err) + return nil + } + return &OomInstance{ + Pid: 1532, + ProcessName: "badsysprogram", + TimeOfDeath: deathTime, + ContainerName: "/", + } +} + +func TestGetContainerName(t *testing.T) { + currentOomInstance := new(OomInstance) + err := getContainerName(startLine, currentOomInstance) + if err != nil { + t.Errorf("bad line fed to getContainerName should yield no error, but had error %v", err) + } + if currentOomInstance.ContainerName != "" { + t.Errorf("bad line fed to getContainerName yielded no container name but set it to %s", currentOomInstance.ContainerName) + } + err = getContainerName(containerLine, currentOomInstance) + if err != nil { + t.Errorf("container line fed to getContainerName should yield no error, but had error %v", err) + } + if currentOomInstance.ContainerName != "/mem2" { + t.Errorf("getContainerName should have set containerName to /mem2, not %s", currentOomInstance.ContainerName) + } +} + +func TestGetProcessNamePid(t *testing.T) { + currentOomInstance := new(OomInstance) + couldParseLine, err := getProcessNamePid(startLine, currentOomInstance) + if err != nil { + t.Errorf("bad line fed to getProcessNamePid should yield no error, but had error %v", err) + } + if couldParseLine { + t.Errorf("bad line fed to getProcessNamePid should return false but returned %v", couldParseLine) + } + + const longForm = "Jan _2 15:04:05 2006" + correctTime, err := time.Parse(longForm, "Jan 21 22:01:49 2015") + couldParseLine, err = getProcessNamePid(endLine, currentOomInstance) + if err != nil { + t.Errorf("good line fed to getProcessNamePid should yield no error, but had error %v", err) + } + if !couldParseLine { + t.Errorf("good line fed to getProcessNamePid should return true but returned %v", couldParseLine) + } + if currentOomInstance.ProcessName != "evilprogram2" { + t.Errorf("getProcessNamePid should have set processName to evilprogram2, not %s", currentOomInstance.ProcessName) + } + if currentOomInstance.Pid != 19667 { + t.Errorf("getProcessNamePid should have set PID to 19667, not %d", currentOomInstance.Pid) + } + if !correctTime.Equal(currentOomInstance.TimeOfDeath) { + t.Errorf("getProcessNamePid should have set date to %v, not %v", correctTime, currentOomInstance.TimeOfDeath) + } +} + +func TestCheckIfStartOfMessages(t *testing.T) { + couldParseLine := checkIfStartOfOomMessages(endLine) + if couldParseLine { + t.Errorf("bad line fed to checkIfStartOfMessages should return false but returned %v", couldParseLine) + } + couldParseLine = checkIfStartOfOomMessages(startLine) + if !couldParseLine { + t.Errorf("start line fed to checkIfStartOfMessages should return true but returned %v", couldParseLine) + } +} + +func TestAnalyzeLinesContainerOom(t *testing.T) { + expectedContainerOomInstance := createExpectedContainerOomInstance(t) + helpTestAnalyzeLines(expectedContainerOomInstance, containerLogFile, t) +} + +func TestAnalyzeLinesSystemOom(t *testing.T) { + expectedSystemOomInstance := createExpectedSystemOomInstance(t) + helpTestAnalyzeLines(expectedSystemOomInstance, systemLogFile, t) +} + +func helpTestAnalyzeLines(oomCheckInstance *OomInstance, sysFile string, t *testing.T) { + outStream := make(chan *OomInstance) + oomLog := new(OomParser) + oomLog.systemFile = sysFile + file, err := os.Open(oomLog.systemFile) + if err != nil { + t.Errorf("couldn't open test log: %v", err) + } + ioreader := bufio.NewReader(file) + timeout := make(chan bool, 1) + go func() { + time.Sleep(1 * time.Second) + timeout <- true + }() + go oomLog.analyzeLines(ioreader, outStream) + select { + case oomInstance := <-outStream: + if *oomCheckInstance != *oomInstance { + t.Errorf("wrong instance returned. Expected %v and got %v", + oomCheckInstance, oomInstance) + } + case <-timeout: + t.Error( + "timeout happened before oomInstance was found in test file") + } +} + +func TestStreamOomsContainer(t *testing.T) { + expectedContainerOomInstance := createExpectedContainerOomInstance(t) + helpTestStreamOoms(expectedContainerOomInstance, containerLogFile, t) +} + +func TestStreamOomsSystem(t *testing.T) { + expectedSystemOomInstance := createExpectedSystemOomInstance(t) + helpTestStreamOoms(expectedSystemOomInstance, systemLogFile, t) +} + +func helpTestStreamOoms(oomCheckInstance *OomInstance, sysFile string, t *testing.T) { + outStream := make(chan *OomInstance) + oomLog := new(OomParser) + oomLog.systemFile = sysFile + timeout := make(chan bool, 1) + go func() { + time.Sleep(1 * time.Second) + timeout <- true + }() + + err := oomLog.StreamOoms(outStream) + if err != nil { + t.Errorf("had an error opening file: %v", err) + } + + select { + case oomInstance := <-outStream: + if *oomCheckInstance != *oomInstance { + t.Errorf("wrong instance returned. Expected %v and got %v", + oomCheckInstance, oomInstance) + } + case <-timeout: + t.Error( + "timeout happened before oomInstance was found in test file") + } +} + +func TestNew(t *testing.T) { + _, err := New() + if err != nil { + t.Errorf("function New() had error %v", err) + } +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/oomparser/systemOomExampleLog.txt b/Godeps/_workspace/src/github.com/google/cadvisor/utils/oomparser/systemOomExampleLog.txt new file mode 100644 index 00000000000..9d38bbdee9d --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/oomparser/systemOomExampleLog.txt @@ -0,0 +1,362 @@ +[ 0.000000] SLUB: HWalign=64, Order=0-3, MinObjects=0, CPUs=1, Nodes=1 +[ 0.000000] Hierarchical RCU implementation. +[ 0.000000] RCU dyntick-idle grace-period acceleration is enabled. +[ 0.000000] RCU restricting CPUs from NR_CPUS=256 to nr_cpu_ids=1. +[ 0.000000] Offload RCU callbacks from all CPUs +[ 0.000000] Offload RCU callbacks from CPUs: 0. +[ 0.000000] NR_IRQS:16640 nr_irqs:256 16 +[ 0.000000] Console: colour dummy device 80x25 +[ 0.000000] console [ttyS0] enabled +[ 0.000000] allocated 7340032 bytes of page_cgroup +[ 0.000000] please try 'cgroup_disable=memory' option if you don't want memory cgroups +[ 0.000000] tsc: Detected 2500.000 MHz processor +[ 0.008000] Calibrating delay loop (skipped) preset value.. 5000.00 BogoMIPS (lpj=10000000) +[ 0.008000] pid_max: default: 32768 minimum: 301 +[ 0.008000] Security Framework initialized +[ 0.008000] AppArmor: AppArmor initialized +[ 0.008000] Yama: becoming mindful. +[ 0.008200] Dentry cache hash table entries: 262144 (order: 9, 2097152 bytes) +[ 0.011365] Inode-cache hash table entries: 131072 (order: 8, 1048576 bytes) +[ 0.013066] Mount-cache hash table entries: 4096 (order: 3, 32768 bytes) +[ 0.014030] Mountpoint-cache hash table entries: 4096 (order: 3, 32768 bytes) +[ 0.016266] Initializing cgroup subsys memory +[ 0.016898] Initializing cgroup subsys devices +[ 0.017546] Initializing cgroup subsys freezer +[ 0.018193] Initializing cgroup subsys blkio +[ 0.018793] Initializing cgroup subsys perf_event +[ 0.019416] Initializing cgroup subsys hugetlb +[ 0.020067] Disabled fast string operations +[ 0.020681] CPU: Physical Processor ID: 0 +[ 0.021238] CPU: Processor Core ID: 0 +[ 0.022587] mce: CPU supports 32 MCE banks +[ 0.023260] Last level iTLB entries: 4KB 512, 2MB 0, 4MB 0 +[ 0.023260] Last level dTLB entries: 4KB 512, 2MB 0, 4MB 0 +[ 0.023260] tlb_flushall_shift: 6 +[ 0.043758] Freeing SMP alternatives memory: 32K (ffffffff81e6c000 - ffffffff81e74000) +[ 0.048361] ACPI: Core revision 20131115 +[ 0.049516] ACPI: All ACPI Tables successfully acquired +[ 0.050342] ftrace: allocating 28458 entries in 112 pages +[ 0.060327] Enabling x2apic +[ 0.060740] Enabled x2apic +[ 0.064005] Switched APIC routing to physical x2apic. +[ 0.065489] ..TIMER: vector=0x30 apic1=0 pin1=0 apic2=-1 pin2=-1 +[ 0.066331] smpboot: CPU0: Intel(R) Xeon(R) CPU @ 2.50GHz (fam: 06, model: 3e, stepping: 04) +[ 0.072000] APIC calibration not consistent with PM-Timer: 227ms instead of 100ms +[ 0.072000] APIC delta adjusted to PM-Timer: 6250028 (14249259) +[ 0.074382] Performance Events: unsupported p6 CPU model 62 no PMU driver, software events only. +[ 0.077174] x86: Booted up 1 node, 1 CPUs +[ 0.077738] smpboot: Total of 1 processors activated (5000.00 BogoMIPS) +[ 0.078932] NMI watchdog: disabled (cpu0): hardware events not enabled +[ 0.079945] devtmpfs: initialized +[ 0.081784] EVM: security.selinux +[ 0.082251] EVM: security.SMACK64 +[ 0.082720] EVM: security.ima +[ 0.083135] EVM: security.capability +[ 0.084729] pinctrl core: initialized pinctrl subsystem +[ 0.085517] regulator-dummy: no parameters +[ 0.086187] RTC time: 19:51:09, date: 01/28/15 +[ 0.086869] NET: Registered protocol family 16 +[ 0.087613] cpuidle: using governor ladder +[ 0.088009] cpuidle: using governor menu +[ 0.088580] ACPI: bus type PCI registered +[ 0.089191] acpiphp: ACPI Hot Plug PCI Controller Driver version: 0.5 +[ 0.090220] PCI: Using configuration type 1 for base access +[ 0.091749] bio: create slab at 0 +[ 0.092215] ACPI: Added _OSI(Module Device) +[ 0.092799] ACPI: Added _OSI(Processor Device) +[ 0.093410] ACPI: Added _OSI(3.0 _SCP Extensions) +[ 0.094173] ACPI: Added _OSI(Processor Aggregator Device) +[ 0.096962] ACPI: Interpreter enabled +[ 0.097483] ACPI Exception: AE_NOT_FOUND, While evaluating Sleep State [\_S1_] (20131115/hwxface-580) +[ 0.098762] ACPI Exception: AE_NOT_FOUND, While evaluating Sleep State [\_S2_] (20131115/hwxface-580) +[ 0.100011] ACPI: (supports S0 S3 S4 S5) +[ 0.100555] ACPI: Using IOAPIC for interrupt routing +[ 0.101252] PCI: Using host bridge windows from ACPI; if necessary, use "pci=nocrs" and report a bug +[ 0.102545] ACPI: No dock devices found. +[ 0.105210] ACPI: PCI Root Bridge [PCI0] (domain 0000 [bus 00-ff]) +[ 0.106060] acpi PNP0A03:00: _OSC: OS supports [ASPM ClockPM Segments MSI] +[ 0.108025] acpi PNP0A03:00: _OSC failed (AE_NOT_FOUND); disabling ASPM +[ 0.109116] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended PCI configuration space under this bridge. +[ 0.112685] PCI host bridge to bus 0000:00 +[ 0.113294] pci_bus 0000:00: root bus resource [bus 00-ff] +[ 0.114054] pci_bus 0000:00: root bus resource [io 0x0000-0x0cf7] +[ 0.115065] pci_bus 0000:00: root bus resource [io 0x0d00-0xffff] +[ 0.116004] pci_bus 0000:00: root bus resource [mem 0x000a0000-0x000bffff] +[ 0.116955] pci_bus 0000:00: root bus resource [mem 0x6cc00000-0xfebfffff] +[ 0.117916] pci 0000:00:01.0: [8086:7110] type 00 class 0x060100 +[ 0.122089] pci 0000:00:01.3: [8086:7113] type 00 class 0x068000 +[ 0.125713] pci 0000:00:01.3: quirk: [io 0xb000-0xb03f] claimed by PIIX4 ACPI +[ 0.127117] pci 0000:00:03.0: [1af4:1004] type 00 class 0x000000 +[ 0.128752] pci 0000:00:03.0: reg 0x10: [io 0xc000-0xc03f] +[ 0.130322] pci 0000:00:03.0: reg 0x14: [mem 0xfebfe000-0xfebfe07f] +[ 0.133571] pci 0000:00:04.0: [1af4:1000] type 00 class 0x020000 +[ 0.135267] pci 0000:00:04.0: reg 0x10: [io 0xc040-0xc07f] +[ 0.136777] pci 0000:00:04.0: reg 0x14: [mem 0xfebff000-0xfebff03f] +[ 0.140811] ACPI: PCI Interrupt Link [LNKA] (IRQs 5 *10 11) +[ 0.141879] ACPI: PCI Interrupt Link [LNKB] (IRQs 5 *10 11) +[ 0.142886] ACPI: PCI Interrupt Link [LNKC] (IRQs 5 10 *11) +[ 0.144086] ACPI: PCI Interrupt Link [LNKD] (IRQs 5 10 *11) +[ 0.145067] ACPI: PCI Interrupt Link [LNKS] (IRQs *9) +[ 0.146245] ACPI: Enabled 16 GPEs in block 00 to 0F +[ 0.147038] ACPI: \_SB_.PCI0: notify handler is installed +[ 0.147840] Found 1 acpi root devices +[ 0.148136] vgaarb: loaded +[ 0.148780] SCSI subsystem initialized +[ 0.149472] libata version 3.00 loaded. +[ 0.150070] ACPI: bus type USB registered +[ 0.150659] usbcore: registered new interface driver usbfs +[ 0.151536] usbcore: registered new interface driver hub +[ 0.152055] usbcore: registered new device driver usb +[ 0.153144] PCI: Using ACPI for IRQ routing +[ 0.153756] PCI: pci_cache_line_size set to 64 bytes +[ 0.154617] e820: reserve RAM buffer [mem 0x0009fc00-0x0009ffff] +[ 0.156004] e820: reserve RAM buffer [mem 0x6cbfe000-0x6fffffff] +[ 0.156993] NetLabel: Initializing +[ 0.157498] NetLabel: domain hash size = 128 +[ 0.158082] NetLabel: protocols = UNLABELED CIPSOv4 +[ 0.158815] NetLabel: unlabeled traffic allowed by default +[ 0.160005] Switched to clocksource kvm-clock +[ 0.168695] AppArmor: AppArmor Filesystem Enabled +[ 0.169361] pnp: PnP ACPI init +[ 0.169853] ACPI: bus type PNP registered +[ 0.170499] pnp 00:00: Plug and Play ACPI device, IDs PNP0b00 (active) +[ 0.171591] pnp 00:01: Plug and Play ACPI device, IDs PNP0501 (active) +[ 0.172574] pnp 00:02: Plug and Play ACPI device, IDs PNP0501 (active) +[ 0.173782] pnp: PnP ACPI: found 3 devices +[ 0.174430] ACPI: bus type PNP unregistered +[ 0.181364] pci_bus 0000:00: resource 4 [io 0x0000-0x0cf7] +[ 0.182172] pci_bus 0000:00: resource 5 [io 0x0d00-0xffff] +[ 0.183049] pci_bus 0000:00: resource 6 [mem 0x000a0000-0x000bffff] +[ 0.184120] pci_bus 0000:00: resource 7 [mem 0x6cc00000-0xfebfffff] +[ 0.185051] NET: Registered protocol family 2 +[ 0.185859] TCP established hash table entries: 16384 (order: 5, 131072 bytes) +[ 0.187117] TCP bind hash table entries: 16384 (order: 6, 262144 bytes) +[ 0.188393] TCP: Hash tables configured (established 16384 bind 16384) +[ 0.189429] TCP: reno registered +[ 0.189929] UDP hash table entries: 1024 (order: 3, 32768 bytes) +[ 0.190824] UDP-Lite hash table entries: 1024 (order: 3, 32768 bytes) +[ 0.191830] NET: Registered protocol family 1 +[ 0.192585] PCI: CLS 0 bytes, default 64 +[ 0.193412] Trying to unpack rootfs image as initramfs... +[ 0.897565] Freeing initrd memory: 18780K (ffff880035b42000 - ffff880036d99000) +[ 0.898982] microcode: CPU0 sig=0x306e4, pf=0x1, revision=0x1 +[ 0.899884] microcode: Microcode Update Driver: v2.00 , Peter Oruba +[ 0.901196] Scanning for low memory corruption every 60 seconds +[ 0.902497] Initialise system trusted keyring +[ 0.903169] audit: initializing netlink socket (disabled) +[ 0.904016] type=2000 audit(1422474669.702:1): initialized +[ 0.926617] HugeTLB registered 2 MB page size, pre-allocated 0 pages +[ 0.928567] zbud: loaded +[ 0.929030] VFS: Disk quotas dquot_6.5.2 +[ 0.929685] Dquot-cache hash table entries: 512 (order 0, 4096 bytes) +[ 0.931113] fuse init (API version 7.22) +[ 0.931781] msgmni has been set to 3390 +[ 0.932595] Key type big_key registered +[ 0.933680] Key type asymmetric registered +[ 0.934332] Asymmetric key parser 'x509' registered +[ 0.935078] Block layer SCSI generic (bsg) driver version 0.4 loaded (major 252) +[ 0.936224] io scheduler noop registered +[ 0.936858] io scheduler deadline registered (default) +[ 0.937675] io scheduler cfq registered +[ 0.938307] pci_hotplug: PCI Hot Plug PCI Core version: 0.5 +[ 0.939158] pciehp: PCI Express Hot Plug Controller Driver version: 0.4 +[ 0.940239] efifb: probing for efifb +[ 0.940788] efifb: framebuffer at 0xa0000, mapped to 0xffff8800000a0000, using 64k, total 64k +[ 0.942044] efifb: mode is 640x480x1, linelength=80, pages=1 +[ 0.942964] efifb: scrolling: redraw +[ 0.943525] efifb: Truecolor: size=8:8:8:8, shift=24:16:8:0 +[ 0.945209] Console: switching to colour frame buffer device 80x30 +[ 0.946826] fb0: EFI VGA frame buffer device +[ 0.947485] intel_idle: does not run on family 6 model 62 +[ 0.948380] ipmi message handler version 39.2 +[ 0.949036] input: Power Button as /devices/LNXSYSTM:00/LNXPWRBN:00/input/input0 +[ 0.950135] ACPI: Power Button [PWRF] +[ 0.950722] input: Sleep Button as /devices/LNXSYSTM:00/LNXSLPBN:00/input/input1 +[ 0.951773] ACPI: Sleep Button [SLPF] +[ 0.952529] GHES: HEST is not enabled! +[ 0.953921] ACPI: PCI Interrupt Link [LNKC] enabled at IRQ 11 +[ 0.955783] ACPI: PCI Interrupt Link [LNKD] enabled at IRQ 10 +[ 0.957395] Serial: 8250/16550 driver, 32 ports, IRQ sharing enabled +[ 1.112167] 00:01: ttyS0 at I/O 0x3f8 (irq = 4, base_baud = 115200) is a 16550A +[ 1.134843] 00:02: ttyS1 at I/O 0x2f8 (irq = 3, base_baud = 115200) is a 16550A +[ 1.137110] Linux agpgart interface v0.103 +[ 1.138975] brd: module loaded +[ 1.140117] loop: module loaded +[ 1.140923] libphy: Fixed MDIO Bus: probed +[ 1.141640] tun: Universal TUN/TAP device driver, 1.6 +[ 1.142342] tun: (C) 1999-2004 Max Krasnyansky +[ 1.144063] virtio-pci 0000:00:04.0: irq 40 for MSI/MSI-X +[ 1.144871] virtio-pci 0000:00:04.0: irq 41 for MSI/MSI-X +[ 1.145670] virtio-pci 0000:00:04.0: irq 42 for MSI/MSI-X +[ 1.151673] PPP generic driver version 2.4.2 +[ 1.152344] ehci_hcd: USB 2.0 'Enhanced' Host Controller (EHCI) Driver +[ 1.153399] ehci-pci: EHCI PCI platform driver +[ 1.154021] ehci-platform: EHCI generic platform driver +[ 1.154939] ohci_hcd: USB 1.1 'Open' Host Controller (OHCI) Driver +[ 1.155973] ohci-pci: OHCI PCI platform driver +[ 1.156675] ohci-platform: OHCI generic platform driver +[ 1.157423] uhci_hcd: USB Universal Host Controller Interface driver +[ 1.158352] i8042: PNP: No PS/2 controller found. Probing ports directly. +[ 3.646820] i8042: No controller found +[ 3.647493] tsc: Refined TSC clocksource calibration: 2500.002 MHz +[ 3.648490] mousedev: PS/2 mouse device common for all mice +[ 3.649499] rtc_cmos 00:00: RTC can wake from S4 +[ 3.650595] rtc_cmos 00:00: rtc core: registered rtc_cmos as rtc0 +[ 3.651521] rtc_cmos 00:00: alarms up to one day, 114 bytes nvram +[ 3.652422] device-mapper: uevent: version 1.0.3 +[ 3.653131] device-mapper: ioctl: 4.27.0-ioctl (2013-10-30) initialised: dm-devel@redhat.com +[ 3.654281] ledtrig-cpu: registered to indicate activity on CPUs +[ 3.655182] TCP: cubic registered +[ 3.655704] NET: Registered protocol family 10 +[ 3.656551] NET: Registered protocol family 17 +[ 3.657183] Key type dns_resolver registered +[ 3.657931] Loading compiled-in X.509 certificates +[ 3.659264] Loaded X.509 cert 'Magrathea: Glacier signing key: 23984ac203784325ccf7b95b51f6c119380eb933' +[ 3.660726] registered taskstats version 1 +[ 3.663211] Key type trusted registered +[ 3.665462] Key type encrypted registered +[ 3.667679] AppArmor: AppArmor sha1 policy hashing enabled +[ 3.668454] IMA: No TPM chip found, activating TPM-bypass! +[ 3.669388] regulator-dummy: disabling +[ 3.669971] Magic number: 15:428:901 +[ 3.670625] clocksource clocksource0: hash matches +[ 3.671311] acpi PNP0501:01: hash matches +[ 3.671953] rtc_cmos 00:00: setting system clock to 2015-01-28 19:51:13 UTC (1422474673) +[ 3.673268] BIOS EDD facility v0.16 2004-Jun-25, 0 devices found +[ 3.674088] EDD information not available. +[ 3.674668] PM: Hibernation image not present or could not be loaded. +[ 3.676577] Freeing unused kernel memory: 1332K (ffffffff81d1f000 - ffffffff81e6c000) +[ 3.678370] Write protecting the kernel read-only data: 12288k +[ 3.681251] Freeing unused kernel memory: 828K (ffff880001731000 - ffff880001800000) +[ 3.684444] Freeing unused kernel memory: 700K (ffff880001b51000 - ffff880001c00000) +[ 3.700162] systemd-udevd[90]: starting version 204 +[ 3.866262] virtio-pci 0000:00:03.0: irq 43 for MSI/MSI-X +[ 3.867187] virtio-pci 0000:00:03.0: irq 44 for MSI/MSI-X +[ 3.867997] virtio-pci 0000:00:03.0: irq 45 for MSI/MSI-X +[ 3.876214] virtio-pci 0000:00:03.0: irq 46 for MSI/MSI-X +[ 3.880005] scsi0 : Virtio SCSI HBA +[ 3.912410] scsi 0:0:1:0: Direct-Access Google PersistentDisk 1 PQ: 0 ANSI: 6 +[ 3.938957] sd 0:0:1:0: Attached scsi generic sg0 type 0 +[ 3.939845] sd 0:0:1:0: [sda] 20971520 512-byte logical blocks: (10.7 GB/10.0 GiB) +[ 3.941149] sd 0:0:1:0: [sda] 4096-byte physical blocks +[ 3.942233] sd 0:0:1:0: [sda] Write Protect is off +[ 3.942988] sd 0:0:1:0: [sda] Mode Sense: 1f 00 00 08 +[ 3.944398] sd 0:0:1:0: [sda] Write cache: enabled, read cache: enabled, doesn't support DPO or FUA +[ 3.961885] sda: sda1 +[ 3.963152] sd 0:0:1:0: [sda] Attached SCSI disk +[ 4.414649] EXT4-fs (sda1): mounted filesystem with ordered data mode. Opts: (null) +[ 5.293574] random: init urandom read with 73 bits of entropy available +[ 6.418187] random: nonblocking pool is initialized +[ 6.692508] EXT4-fs (sda1): re-mounted. Opts: errors=remount-ro +[ 7.121847] IPv6: ADDRCONF(NETDEV_UP): eth0: link is not ready +[ 7.681714] systemd-udevd[293]: starting version 204 +[ 8.437234] lp: driver loaded but no devices found +[ 9.164195] piix4_smbus 0000:00:01.3: SMBus base address uninitialized - upgrade BIOS or use force_addr=0xaddr +[ 9.648096] device-mapper: multipath: version 1.6.0 loaded +[ 10.434575] type=1400 audit(1422474680.256:2): apparmor="STATUS" operation="profile_load" profile="unconfined" name="/sbin/dhclient" pid=368 comm="apparmor_parser" +[ 10.437242] type=1400 audit(1422474680.260:3): apparmor="STATUS" operation="profile_load" profile="unconfined" name="/usr/lib/NetworkManager/nm-dhcp-client.action" pid=368 comm="apparmor_parser" +[ 10.439901] type=1400 audit(1422474680.260:4): apparmor="STATUS" operation="profile_load" profile="unconfined" name="/usr/lib/connman/scripts/dhclient-script" pid=368 comm="apparmor_parser" +[ 11.126295] type=1400 audit(1422474680.948:5): apparmor="STATUS" operation="profile_replace" profile="unconfined" name="/sbin/dhclient" pid=412 comm="apparmor_parser" +[ 11.129123] type=1400 audit(1422474680.952:6): apparmor="STATUS" operation="profile_replace" profile="unconfined" name="/usr/lib/NetworkManager/nm-dhcp-client.action" pid=412 comm="apparmor_parser" +[ 11.132139] type=1400 audit(1422474680.956:7): apparmor="STATUS" operation="profile_replace" profile="unconfined" name="/usr/lib/connman/scripts/dhclient-script" pid=412 comm="apparmor_parser" +[ 11.196173] type=1400 audit(1422474681.020:8): apparmor="STATUS" operation="profile_replace" profile="unconfined" name="/sbin/dhclient" pid=458 comm="apparmor_parser" +[ 11.198887] type=1400 audit(1422474681.020:9): apparmor="STATUS" operation="profile_replace" profile="unconfined" name="/usr/lib/NetworkManager/nm-dhcp-client.action" pid=458 comm="apparmor_parser" +[ 11.201484] type=1400 audit(1422474681.028:10): apparmor="STATUS" operation="profile_replace" profile="unconfined" name="/usr/lib/connman/scripts/dhclient-script" pid=458 comm="apparmor_parser" +[ 11.361371] init: udev-fallback-graphics main process (454) terminated with status 1 +[ 11.378437] type=1400 audit(1422474681.200:11): apparmor="STATUS" operation="profile_replace" profile="unconfined" name="/usr/lib/NetworkManager/nm-dhcp-client.action" pid=458 comm="apparmor_parser" +[ 14.366411] init: failsafe main process (491) killed by TERM signal +kateknister@kateknister-test3:~$ tail -f /var/log/syslog +Jan 28 19:51:47 localhost ntpdate[1240]: adjust time server 169.254.169.254 offset -0.383723 sec +Jan 28 19:51:47 localhost ntpd[1312]: ntpd 4.2.6p5@1.2349-o Wed Oct 9 19:08:06 UTC 2013 (1) +Jan 28 19:51:47 localhost ntpd[1313]: proto: precision = 0.449 usec +Jan 28 19:51:47 localhost ntpd[1313]: ntp_io: estimated max descriptors: 1024, initial socket boundary: 16 +Jan 28 19:51:47 localhost ntpd[1313]: Listen and drop on 0 v4wildcard 0.0.0.0 UDP 123 +Jan 28 19:51:47 localhost ntpd[1313]: Listen and drop on 1 v6wildcard :: UDP 123 +Jan 28 19:51:47 localhost ntpd[1313]: Listen normally on 2 lo 127.0.0.1 UDP 123 +Jan 28 19:51:47 localhost ntpd[1313]: Listen normally on 3 eth0 10.240.192.196 UDP 123 +Jan 28 19:51:47 localhost ntpd[1313]: peers refreshed +Jan 28 19:51:47 localhost ntpd[1313]: Listening on routing socket on fd #20 for interface updates +Jan 28 19:58:45 localhost kernel: [ 455.498827] badsysprogram invoked oom-killer: gfp_mask=0x280da, order=0, oom_score_adj=0 +Jan 28 19:58:45 localhost kernel: [ 455.500173] badsysprogram cpuset=/ mems_allowed=0 +Jan 28 19:58:45 localhost kernel: [ 455.501007] CPU: 0 PID: 1532 Comm: badsysprogram Not tainted 3.13.0-27-generic #50-Ubuntu +Jan 28 19:58:45 localhost kernel: [ 455.502301] Hardware name: Google Google, BIOS Google 01/01/2011 +Jan 28 19:58:45 localhost kernel: [ 455.503298] 0000000000000000 ffff880069715a90 ffffffff817199c4 ffff8800680d8000 +Jan 28 19:58:45 localhost kernel: [ 455.504563] ffff880069715b18 ffffffff817142ff 0000000000000000 0000000000000000 +Jan 28 19:58:45 localhost kernel: [ 455.505779] 0000000000000000 0000000000000000 0000000000000000 0000000000000000 +Jan 28 19:58:45 localhost kernel: [ 455.506971] Call Trace: +Jan 28 19:58:45 localhost kernel: [ 455.507353] [] dump_stack+0x45/0x56 +Jan 28 19:58:45 localhost kernel: [ 455.508289] [] dump_header+0x7f/0x1f1 +Jan 28 19:58:45 localhost kernel: [ 455.509112] [] oom_kill_process+0x1ce/0x330 +Jan 28 19:58:45 localhost kernel: [ 455.510023] [] ? security_capable_noaudit+0x15/0x20 +Jan 28 19:58:45 localhost kernel: [ 455.510994] [] out_of_memory+0x414/0x450 +Jan 28 19:58:45 localhost kernel: [ 455.511820] [] __alloc_pages_nodemask+0xa87/0xb20 +Jan 28 19:58:45 localhost kernel: [ 455.512815] [] alloc_pages_vma+0x9a/0x140 +Jan 28 19:58:45 localhost kernel: [ 455.513647] [] handle_mm_fault+0xb2b/0xf10 +Jan 28 19:58:45 localhost kernel: [ 455.514498] [] __do_page_fault+0x184/0x560 +Jan 28 19:58:45 localhost kernel: [ 455.515415] [] ? sched_clock+0x9/0x10 +Jan 28 19:58:45 localhost kernel: [ 455.516318] [] ? sched_clock_local+0x1d/0x80 +Jan 28 19:58:45 localhost kernel: [ 455.517242] [] ? acct_account_cputime+0x1c/0x20 +Jan 28 19:58:45 localhost kernel: [ 455.518141] [] ? account_user_time+0x8b/0xa0 +Jan 28 19:58:45 localhost kernel: [ 455.519014] [] ? vtime_account_user+0x54/0x60 +Jan 28 19:58:45 localhost kernel: [ 455.519910] [] do_page_fault+0x1a/0x70 +Jan 28 19:58:45 localhost kernel: [ 455.520712] [] page_fault+0x28/0x30 +Jan 28 19:58:45 localhost kernel: [ 455.521498] Mem-Info: +Jan 28 19:58:45 localhost kernel: [ 455.521873] Node 0 DMA per-cpu: +Jan 28 19:58:45 localhost kernel: [ 455.522388] CPU 0: hi: 0, btch: 1 usd: 0 +Jan 28 19:58:45 localhost kernel: [ 455.598342] Node 0 DMA32 per-cpu: +Jan 28 19:58:45 localhost kernel: [ 455.598890] CPU 0: hi: 186, btch: 31 usd: 86 +Jan 28 19:58:45 localhost kernel: [ 455.599687] active_anon:405991 inactive_anon:57 isolated_anon:0 +Jan 28 19:58:45 localhost kernel: [ 455.599687] active_file:35 inactive_file:69 isolated_file:0 +Jan 28 19:58:45 localhost kernel: [ 455.599687] unevictable:0 dirty:0 writeback:0 unstable:0 +Jan 28 19:58:45 localhost kernel: [ 455.599687] free:12929 slab_reclaimable:1635 slab_unreclaimable:1919 +Jan 28 19:58:45 localhost kernel: [ 455.599687] mapped:34 shmem:70 pagetables:1423 bounce:0 +Jan 28 19:58:45 localhost kernel: [ 455.599687] free_cma:0 +Jan 28 19:58:45 localhost kernel: [ 455.604585] Node 0 DMA free:7124kB min:412kB low:512kB high:616kB active_anon:8508kB inactive_anon:4kB active_file:0kB inactive_file:0kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:15992kB managed:15908kB mlocked:0kB dirty:0kB writeback:0kB mapped:0kB shmem:4kB slab_reclaimable:16kB slab_unreclaimable:16kB kernel_stack:0kB pagetables:12kB unstable:0kB bounce:0kB free_cma:0kB writeback_tmp:0kB pages_scanned:0 all_unreclaimable? yes +Jan 28 19:58:45 localhost kernel: [ 455.610811] lowmem_reserve[]: 0 1679 1679 1679 +Jan 28 19:58:45 localhost kernel: [ 455.611600] Node 0 DMA32 free:44592kB min:44640kB low:55800kB high:66960kB active_anon:1615456kB inactive_anon:224kB active_file:140kB inactive_file:276kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:1765368kB managed:1722912kB mlocked:0kB dirty:0kB writeback:0kB mapped:136kB shmem:276kB slab_reclaimable:6524kB slab_unreclaimable:7660kB kernel_stack:592kB pagetables:5680kB unstable:0kB bounce:0kB free_cma:0kB writeback_tmp:0kB pages_scanned:819 all_unreclaimable? yes +Jan 28 19:58:45 localhost kernel: [ 455.618372] lowmem_reserve[]: 0 0 0 0 +Jan 28 19:58:45 localhost kernel: [ 455.619041] Node 0 DMA: 5*4kB (UM) 6*8kB (UEM) 7*16kB (UEM) 1*32kB (M) 2*64kB (UE) 3*128kB (UEM) 1*256kB (E) 2*512kB (EM) 3*1024kB (UEM) 1*2048kB (R) 0*4096kB = 7124kB +Jan 28 19:58:45 localhost kernel: [ 455.621861] Node 0 DMA32: 74*4kB (UEM) 125*8kB (UEM) 78*16kB (UEM) 26*32kB (UE) 12*64kB (UEM) 4*128kB (UE) 4*256kB (UE) 2*512kB (E) 11*1024kB (UE) 7*2048kB (UE) 3*4096kB (UR) = 44592kB +Jan 28 19:58:45 localhost kernel: [ 455.625174] Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB +Jan 28 19:58:45 localhost kernel: [ 455.626394] 204 total pagecache pages +Jan 28 19:58:45 localhost kernel: [ 455.626954] 0 pages in swap cache +Jan 28 19:58:45 localhost kernel: [ 455.627455] Swap cache stats: add 0, delete 0, find 0/0 +Jan 28 19:58:45 localhost kernel: [ 455.628242] Free swap = 0kB +Jan 28 19:58:45 localhost kernel: [ 455.628686] Total swap = 0kB +Jan 28 19:58:45 localhost kernel: [ 455.629147] 445340 pages RAM +Jan 28 19:58:45 localhost kernel: [ 455.629577] 0 pages HighMem/MovableOnly +Jan 28 19:58:45 localhost kernel: [ 455.630301] 10614 pages reserved +Jan 28 19:58:45 localhost kernel: [ 455.630787] [ pid ] uid tgid total_vm rss nr_ptes swapents oom_score_adj name +Jan 28 19:58:45 localhost kernel: [ 455.631937] [ 273] 0 273 4869 50 13 0 0 upstart-udev-br +Jan 28 19:58:45 localhost kernel: [ 455.633290] [ 293] 0 293 12802 154 28 0 -1000 systemd-udevd +Jan 28 19:58:45 localhost kernel: [ 455.634671] [ 321] 0 321 3819 54 12 0 0 upstart-file-br +Jan 28 19:58:45 localhost kernel: [ 455.636070] [ 326] 102 326 9805 109 24 0 0 dbus-daemon +Jan 28 19:58:45 localhost kernel: [ 455.637373] [ 334] 101 334 63960 94 26 0 0 rsyslogd +Jan 28 19:58:45 localhost kernel: [ 455.638761] [ 343] 0 343 10863 102 26 0 0 systemd-logind +Jan 28 19:58:45 localhost kernel: [ 455.640158] [ 546] 0 546 3815 60 13 0 0 upstart-socket- +Jan 28 19:58:45 localhost kernel: [ 455.641534] [ 710] 0 710 2556 587 8 0 0 dhclient +Jan 28 19:58:45 localhost kernel: [ 455.642834] [ 863] 0 863 3955 48 13 0 0 getty +Jan 28 19:58:45 localhost kernel: [ 455.644139] [ 865] 0 865 3955 50 13 0 0 getty +Jan 28 19:58:45 localhost kernel: [ 455.645325] [ 867] 0 867 3955 51 13 0 0 getty +Jan 28 19:58:45 localhost kernel: [ 455.646621] [ 868] 0 868 3955 51 12 0 0 getty +Jan 28 19:58:45 localhost kernel: [ 455.647963] [ 870] 0 870 3955 49 13 0 0 getty +Jan 28 19:58:45 localhost kernel: [ 455.649234] [ 915] 0 915 5914 61 16 0 0 cron +Jan 28 19:58:45 localhost kernel: [ 455.650439] [ 1015] 0 1015 10885 1524 25 0 0 manage_addresse +Jan 28 19:58:45 localhost kernel: [ 455.651817] [ 1028] 0 1028 3955 49 13 0 0 getty +Jan 28 19:58:45 localhost kernel: [ 455.653091] [ 1033] 0 1033 3197 48 12 0 0 getty +Jan 28 19:58:45 localhost kernel: [ 455.654783] [ 1264] 0 1264 11031 1635 26 0 0 manage_accounts +Jan 28 19:58:45 localhost kernel: [ 455.656657] [ 1268] 0 1268 15341 180 33 0 -1000 sshd +Jan 28 19:58:45 localhost kernel: [ 455.657865] [ 1313] 104 1313 6804 154 17 0 0 ntpd +Jan 28 19:58:45 localhost kernel: [ 455.659085] [ 1389] 0 1389 25889 255 55 0 0 sshd +Jan 28 19:58:45 localhost kernel: [ 455.660440] [ 1407] 1020 1407 25889 255 52 0 0 sshd +Jan 28 19:58:45 localhost kernel: [ 455.661595] [ 1408] 1020 1408 5711 581 17 0 0 bash +Jan 28 19:58:45 localhost kernel: [ 455.662887] [ 1425] 0 1425 25889 256 53 0 0 sshd +Jan 28 19:58:45 localhost kernel: [ 455.664075] [ 1443] 1020 1443 25889 257 52 0 0 sshd +Jan 28 19:58:45 localhost kernel: [ 455.665330] [ 1444] 1020 1444 5711 581 16 0 0 bash +Jan 28 19:58:45 localhost kernel: [ 455.666450] [ 1476] 1020 1476 1809 25 9 0 0 tail +Jan 28 19:58:45 localhost kernel: [ 455.667682] [ 1532] 1020 1532 410347 398810 788 0 0 badsysprogram +Jan 28 19:58:45 localhost kernel: [ 455.669006] Out of memory: Kill process 1532 (badsysprogram) score 919 or sacrifice child +Jan 28 19:58:45 localhost kernel: [ 455.670291] Killed process 1532 (badsysprogram) total-vm:1641388kB, anon-rss:1595164kB, file-rss:76kB +[ 0.170499] pnp 00:00: Plug and Play ACPI device, IDs PNP0b00 (active) +[ 0.171591] pnp 00:01: Plug and Play ACPI device, IDs PNP0501 (active) +[ 0.172574] pnp 00:02: Plug and Play ACPI device, IDs PNP0501 (active) diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/path.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/path.go new file mode 100644 index 00000000000..a7aceee6615 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/path.go @@ -0,0 +1,24 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import "os" + +func FileExists(file string) bool { + if _, err := os.Stat(file); err != nil { + return false + } + return true +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/procfs/doc.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/procfs/doc.go new file mode 100644 index 00000000000..763a556c553 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/procfs/doc.go @@ -0,0 +1,17 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// procfs contains several low level functions to read information from /proc +// filesystem, and also provides some utility functions like JiffiesToDuration. +package procfs diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/procfs/jiffy.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/procfs/jiffy.go new file mode 100644 index 00000000000..b36772a2525 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/procfs/jiffy.go @@ -0,0 +1,33 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +/* +#include +*/ +import "C" +import "time" + +var userHz uint64 + +func init() { + userHzLong := C.sysconf(C._SC_CLK_TCK) + userHz = uint64(userHzLong) +} + +func JiffiesToDuration(jiffies uint64) time.Duration { + d := jiffies * 1000000000 / userHz + return time.Duration(d) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/sysfs/fakesysfs/fake.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/sysfs/fakesysfs/fake.go new file mode 100644 index 00000000000..963fc61eff8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/sysfs/fakesysfs/fake.go @@ -0,0 +1,114 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fakesysfs + +import ( + "os" + "time" + + "github.com/google/cadvisor/utils/sysfs" +) + +// If we extend sysfs to support more interfaces, it might be worth making this a mock instead of a fake. +type FileInfo struct { + EntryName string +} + +func (self *FileInfo) Name() string { + return self.EntryName +} + +func (self *FileInfo) Size() int64 { + return 1234567 +} + +func (self *FileInfo) Mode() os.FileMode { + return 0 +} + +func (self *FileInfo) ModTime() time.Time { + return time.Time{} +} + +func (self *FileInfo) IsDir() bool { + return true +} + +func (self *FileInfo) Sys() interface{} { + return nil +} + +type FakeSysFs struct { + info FileInfo + cache sysfs.CacheInfo +} + +func (self *FakeSysFs) GetBlockDevices() ([]os.FileInfo, error) { + self.info.EntryName = "sda" + return []os.FileInfo{&self.info}, nil +} + +func (self *FakeSysFs) GetBlockDeviceSize(name string) (string, error) { + return "1234567", nil +} + +func (self *FakeSysFs) GetBlockDeviceScheduler(name string) (string, error) { + return "noop deadline [cfq]", nil +} + +func (self *FakeSysFs) GetBlockDeviceNumbers(name string) (string, error) { + return "8:0\n", nil +} + +func (self *FakeSysFs) GetNetworkDevices() ([]os.FileInfo, error) { + return []os.FileInfo{&self.info}, nil +} + +func (self *FakeSysFs) GetNetworkAddress(name string) (string, error) { + return "42:01:02:03:04:f4\n", nil +} + +func (self *FakeSysFs) GetNetworkMtu(name string) (string, error) { + return "1024\n", nil +} + +func (self *FakeSysFs) GetNetworkSpeed(name string) (string, error) { + return "1000\n", nil +} + +func (self *FakeSysFs) GetNetworkStatValue(name string, stat string) (uint64, error) { + return 1024, nil +} + +func (self *FakeSysFs) GetCaches(id int) ([]os.FileInfo, error) { + self.info.EntryName = "index0" + return []os.FileInfo{&self.info}, nil +} + +func (self *FakeSysFs) GetCacheInfo(cpu int, cache string) (sysfs.CacheInfo, error) { + return self.cache, nil +} + +func (self *FakeSysFs) SetCacheInfo(cache sysfs.CacheInfo) { + self.cache = cache +} + +func (self *FakeSysFs) SetEntryName(name string) { + self.info.EntryName = name +} + +func (self *FakeSysFs) GetSystemUUID() (string, error) { + return "1F862619-BA9F-4526-8F85-ECEAF0C97430", nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/sysfs/sysfs.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/sysfs/sysfs.go new file mode 100644 index 00000000000..d33b9d0091f --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/sysfs/sysfs.go @@ -0,0 +1,241 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sysfs + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + "strings" +) + +const ( + blockDir = "/sys/block" + cacheDir = "/sys/devices/system/cpu/cpu" + netDir = "/sys/class/net" + dmiDir = "/sys/class/dmi" +) + +type CacheInfo struct { + // size in bytes + Size uint64 + // cache type - instruction, data, unified + Type string + // distance from cpus in a multi-level hierarchy + Level int + // number of cpus that can access this cache. + Cpus int +} + +// Abstracts the lowest level calls to sysfs. +type SysFs interface { + // Get directory information for available block devices. + GetBlockDevices() ([]os.FileInfo, error) + // Get Size of a given block device. + GetBlockDeviceSize(string) (string, error) + // Get scheduler type for the block device. + GetBlockDeviceScheduler(string) (string, error) + // Get device major:minor number string. + GetBlockDeviceNumbers(string) (string, error) + + GetNetworkDevices() ([]os.FileInfo, error) + GetNetworkAddress(string) (string, error) + GetNetworkMtu(string) (string, error) + GetNetworkSpeed(string) (string, error) + GetNetworkStatValue(dev string, stat string) (uint64, error) + + // Get directory information for available caches accessible to given cpu. + GetCaches(id int) ([]os.FileInfo, error) + // Get information for a cache accessible from the given cpu. + GetCacheInfo(cpu int, cache string) (CacheInfo, error) + + GetSystemUUID() (string, error) +} + +type realSysFs struct{} + +func NewRealSysFs() (SysFs, error) { + return &realSysFs{}, nil +} + +func (self *realSysFs) GetBlockDevices() ([]os.FileInfo, error) { + return ioutil.ReadDir(blockDir) +} + +func (self *realSysFs) GetBlockDeviceNumbers(name string) (string, error) { + dev, err := ioutil.ReadFile(path.Join(blockDir, name, "/dev")) + if err != nil { + return "", err + } + return string(dev), nil +} + +func (self *realSysFs) GetBlockDeviceScheduler(name string) (string, error) { + sched, err := ioutil.ReadFile(path.Join(blockDir, name, "/queue/scheduler")) + if err != nil { + return "", err + } + return string(sched), nil +} + +func (self *realSysFs) GetBlockDeviceSize(name string) (string, error) { + size, err := ioutil.ReadFile(path.Join(blockDir, name, "/size")) + if err != nil { + return "", err + } + return string(size), nil +} + +func (self *realSysFs) GetNetworkDevices() ([]os.FileInfo, error) { + files, err := ioutil.ReadDir(netDir) + if err != nil { + return nil, err + } + + // Filter out non-directory & non-symlink files + var dirs []os.FileInfo + for _, f := range files { + if f.Mode()|os.ModeSymlink != 0 { + f, err = os.Stat(path.Join(netDir, f.Name())) + if err != nil { + continue + } + } + if f.IsDir() { + dirs = append(dirs, f) + } + } + return dirs, nil +} + +func (self *realSysFs) GetNetworkAddress(name string) (string, error) { + address, err := ioutil.ReadFile(path.Join(netDir, name, "/address")) + if err != nil { + return "", err + } + return string(address), nil +} + +func (self *realSysFs) GetNetworkMtu(name string) (string, error) { + mtu, err := ioutil.ReadFile(path.Join(netDir, name, "/mtu")) + if err != nil { + return "", err + } + return string(mtu), nil +} + +func (self *realSysFs) GetNetworkSpeed(name string) (string, error) { + speed, err := ioutil.ReadFile(path.Join(netDir, name, "/speed")) + if err != nil { + return "", err + } + return string(speed), nil +} + +func (self *realSysFs) GetNetworkStatValue(dev string, stat string) (uint64, error) { + statPath := path.Join(netDir, dev, "/statistics", stat) + out, err := ioutil.ReadFile(statPath) + if err != nil { + return 0, fmt.Errorf("failed to read stat from %q for device %q", statPath, dev) + } + var s uint64 + n, err := fmt.Sscanf(string(out), "%d", &s) + if err != nil || n != 1 { + return 0, fmt.Errorf("could not parse value from %q for file %s", string(out), statPath) + } + return s, nil +} + +func (self *realSysFs) GetCaches(id int) ([]os.FileInfo, error) { + cpuPath := fmt.Sprintf("%s%d/cache", cacheDir, id) + return ioutil.ReadDir(cpuPath) +} + +func bitCount(i uint64) (count int) { + for i != 0 { + if i&1 == 1 { + count++ + } + i >>= 1 + } + return +} + +func getCpuCount(cache string) (count int, err error) { + out, err := ioutil.ReadFile(path.Join(cache, "/shared_cpu_map")) + if err != nil { + return 0, err + } + masks := strings.Split(string(out), ",") + for _, mask := range masks { + // convert hex string to uint64 + m, err := strconv.ParseUint(strings.TrimSpace(mask), 16, 64) + if err != nil { + return 0, fmt.Errorf("failed to parse cpu map %q: %v", string(out), err) + } + count += bitCount(m) + } + return +} + +func (self *realSysFs) GetCacheInfo(id int, name string) (CacheInfo, error) { + cachePath := fmt.Sprintf("%s%d/cache/%s", cacheDir, id, name) + out, err := ioutil.ReadFile(path.Join(cachePath, "/size")) + if err != nil { + return CacheInfo{}, err + } + var size uint64 + n, err := fmt.Sscanf(string(out), "%dK", &size) + if err != nil || n != 1 { + return CacheInfo{}, err + } + // convert to bytes + size = size * 1024 + out, err = ioutil.ReadFile(path.Join(cachePath, "/level")) + if err != nil { + return CacheInfo{}, err + } + var level int + n, err = fmt.Sscanf(string(out), "%d", &level) + if err != nil || n != 1 { + return CacheInfo{}, err + } + + out, err = ioutil.ReadFile(path.Join(cachePath, "/type")) + if err != nil { + return CacheInfo{}, err + } + cacheType := strings.TrimSpace(string(out)) + cpuCount, err := getCpuCount(cachePath) + if err != nil { + return CacheInfo{}, err + } + return CacheInfo{ + Size: size, + Level: level, + Type: cacheType, + Cpus: cpuCount, + }, nil +} + +func (self *realSysFs) GetSystemUUID() (string, error) { + id, err := ioutil.ReadFile(path.Join(dmiDir, "id", "product_uuid")) + if err != nil { + return "", err + } + return strings.TrimSpace(string(id)), nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/sysinfo/sysinfo.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/sysinfo/sysinfo.go new file mode 100644 index 00000000000..9d6a6208d88 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/sysinfo/sysinfo.go @@ -0,0 +1,210 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sysinfo + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/utils/sysfs" +) + +var schedulerRegExp = regexp.MustCompile(".*\\[(.*)\\].*") + +// Get information about block devices present on the system. +// Uses the passed in system interface to retrieve the low level OS information. +func GetBlockDeviceInfo(sysfs sysfs.SysFs) (map[string]info.DiskInfo, error) { + disks, err := sysfs.GetBlockDevices() + if err != nil { + return nil, err + } + + diskMap := make(map[string]info.DiskInfo) + for _, disk := range disks { + name := disk.Name() + // Ignore non-disk devices. + // TODO(rjnagal): Maybe just match hd, sd, and dm prefixes. + if strings.HasPrefix(name, "loop") || strings.HasPrefix(name, "ram") || strings.HasPrefix(name, "sr") { + continue + } + disk_info := info.DiskInfo{ + Name: name, + } + dev, err := sysfs.GetBlockDeviceNumbers(name) + if err != nil { + return nil, err + } + n, err := fmt.Sscanf(dev, "%d:%d", &disk_info.Major, &disk_info.Minor) + if err != nil || n != 2 { + return nil, fmt.Errorf("could not parse device numbers from %s for device %s", dev, name) + } + out, err := sysfs.GetBlockDeviceSize(name) + if err != nil { + return nil, err + } + // Remove trailing newline before conversion. + size, err := strconv.ParseUint(strings.TrimSpace(out), 10, 64) + if err != nil { + return nil, err + } + // size is in 512 bytes blocks. + disk_info.Size = size * 512 + + sched, err := sysfs.GetBlockDeviceScheduler(name) + if err != nil { + sched = "none" + } else { + matches := schedulerRegExp.FindSubmatch([]byte(sched)) + if len(matches) < 2 { + sched = "none" + } else { + sched = string(matches[1]) + } + } + disk_info.Scheduler = sched + device := fmt.Sprintf("%d:%d", disk_info.Major, disk_info.Minor) + diskMap[device] = disk_info + } + return diskMap, nil +} + +// Get information about network devices present on the system. +func GetNetworkDevices(sysfs sysfs.SysFs) ([]info.NetInfo, error) { + devs, err := sysfs.GetNetworkDevices() + if err != nil { + return nil, err + } + netDevices := []info.NetInfo{} + for _, dev := range devs { + name := dev.Name() + // Ignore docker, loopback, and veth devices. + ignoredDevices := []string{"lo", "veth", "docker"} + ignored := false + for _, prefix := range ignoredDevices { + if strings.HasPrefix(name, prefix) { + ignored = true + break + } + } + if ignored { + continue + } + address, err := sysfs.GetNetworkAddress(name) + if err != nil { + return nil, err + } + mtuStr, err := sysfs.GetNetworkMtu(name) + if err != nil { + return nil, err + } + var mtu int64 + n, err := fmt.Sscanf(mtuStr, "%d", &mtu) + if err != nil || n != 1 { + return nil, fmt.Errorf("could not parse mtu from %s for device %s", mtuStr, name) + } + netInfo := info.NetInfo{ + Name: name, + MacAddress: strings.TrimSpace(address), + Mtu: mtu, + } + speed, err := sysfs.GetNetworkSpeed(name) + // Some devices don't set speed. + if err == nil { + var s int64 + n, err := fmt.Sscanf(speed, "%d", &s) + if err != nil || n != 1 { + return nil, fmt.Errorf("could not parse speed from %s for device %s", speed, name) + } + netInfo.Speed = s + } + netDevices = append(netDevices, netInfo) + } + return netDevices, nil +} + +func GetCacheInfo(sysFs sysfs.SysFs, id int) ([]sysfs.CacheInfo, error) { + caches, err := sysFs.GetCaches(id) + if err != nil { + return nil, err + } + + info := []sysfs.CacheInfo{} + for _, cache := range caches { + if !strings.HasPrefix(cache.Name(), "index") { + continue + } + cacheInfo, err := sysFs.GetCacheInfo(id, cache.Name()) + if err != nil { + return nil, err + } + info = append(info, cacheInfo) + } + return info, nil +} + +func GetNetworkStats(name string) (info.NetworkStats, error) { + stats := info.NetworkStats{} + // TODO(rjnagal): Take syfs as an argument. + sysFs, err := sysfs.NewRealSysFs() + if err != nil { + return stats, err + } + return getNetworkStats(name, sysFs) +} + +func getNetworkStats(name string, sysFs sysfs.SysFs) (info.NetworkStats, error) { + stats := info.NetworkStats{} + var err error + stats.RxBytes, err = sysFs.GetNetworkStatValue(name, "rx_bytes") + if err != nil { + return stats, err + } + stats.RxPackets, err = sysFs.GetNetworkStatValue(name, "rx_packets") + if err != nil { + return stats, err + } + stats.RxErrors, err = sysFs.GetNetworkStatValue(name, "rx_errors") + if err != nil { + return stats, err + } + stats.RxDropped, err = sysFs.GetNetworkStatValue(name, "rx_dropped") + if err != nil { + return stats, err + } + stats.TxBytes, err = sysFs.GetNetworkStatValue(name, "tx_bytes") + if err != nil { + return stats, err + } + stats.TxPackets, err = sysFs.GetNetworkStatValue(name, "tx_packets") + if err != nil { + return stats, err + } + stats.TxErrors, err = sysFs.GetNetworkStatValue(name, "tx_errors") + if err != nil { + return stats, err + } + stats.TxDropped, err = sysFs.GetNetworkStatValue(name, "tx_dropped") + if err != nil { + return stats, err + } + return stats, nil +} + +func GetSystemUUID(sysFs sysfs.SysFs) (string, error) { + return sysFs.GetSystemUUID() +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/sysinfo/sysinfo_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/sysinfo/sysinfo_test.go new file mode 100644 index 00000000000..f46a0c14f9d --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/sysinfo/sysinfo_test.go @@ -0,0 +1,131 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sysinfo + +import ( + "testing" + + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/utils/sysfs" + "github.com/google/cadvisor/utils/sysfs/fakesysfs" +) + +func TestGetBlockDeviceInfo(t *testing.T) { + fakeSys := fakesysfs.FakeSysFs{} + disks, err := GetBlockDeviceInfo(&fakeSys) + if err != nil { + t.Errorf("expected call to GetBlockDeviceInfo() to succeed. Failed with %s", err) + } + if len(disks) != 1 { + t.Errorf("expected to get one disk entry. Got %d", len(disks)) + } + key := "8:0" + disk, ok := disks[key] + if !ok { + t.Fatalf("expected key 8:0 to exist in the disk map.") + } + if disk.Name != "sda" { + t.Errorf("expected to get disk named sda. Got %q", disk.Name) + } + size := uint64(1234567 * 512) + if disk.Size != size { + t.Errorf("expected to get disk size of %d. Got %d", size, disk.Size) + } + if disk.Scheduler != "cfq" { + t.Errorf("expected to get scheduler type of cfq. Got %q", disk.Scheduler) + } +} + +func TestGetNetworkDevices(t *testing.T) { + fakeSys := fakesysfs.FakeSysFs{} + fakeSys.SetEntryName("eth0") + devs, err := GetNetworkDevices(&fakeSys) + if err != nil { + t.Errorf("expected call to GetNetworkDevices() to succeed. Failed with %s", err) + } + if len(devs) != 1 { + t.Errorf("expected to get one network device. Got %d", len(devs)) + } + eth := devs[0] + if eth.Name != "eth0" { + t.Errorf("expected to find device with name eth0. Found name %q", eth.Name) + } + if eth.Mtu != 1024 { + t.Errorf("expected mtu to be set to 1024. Found %d", eth.Mtu) + } + if eth.Speed != 1000 { + t.Errorf("expected device speed to be set to 1000. Found %d", eth.Speed) + } + if eth.MacAddress != "42:01:02:03:04:f4" { + t.Errorf("expected mac address to be '42:01:02:03:04:f4'. Found %q", eth.MacAddress) + } +} + +func TestIgnoredNetworkDevices(t *testing.T) { + fakeSys := fakesysfs.FakeSysFs{} + ignoredDevices := []string{"veth1234", "lo", "docker0"} + for _, name := range ignoredDevices { + fakeSys.SetEntryName(name) + devs, err := GetNetworkDevices(&fakeSys) + if err != nil { + t.Errorf("expected call to GetNetworkDevices() to succeed. Failed with %s", err) + } + if len(devs) != 0 { + t.Errorf("expected dev %s to be ignored, but got info %+v", name, devs) + } + } +} + +func TestGetCacheInfo(t *testing.T) { + fakeSys := &fakesysfs.FakeSysFs{} + cacheInfo := sysfs.CacheInfo{ + Size: 1024, + Type: "Data", + Level: 3, + Cpus: 16, + } + fakeSys.SetCacheInfo(cacheInfo) + caches, err := GetCacheInfo(fakeSys, 0) + if err != nil { + t.Errorf("expected call to GetCacheInfo() to succeed. Failed with %s", err) + } + if len(caches) != 1 { + t.Errorf("expected to get one cache. Got %d", len(caches)) + } + if caches[0] != cacheInfo { + t.Errorf("expected to find cacheinfo %+v. Got %+v", cacheInfo, caches[0]) + } +} + +func TestGetNetworkStats(t *testing.T) { + expected_stats := info.NetworkStats{ + RxBytes: 1024, + RxPackets: 1024, + RxErrors: 1024, + RxDropped: 1024, + TxBytes: 1024, + TxPackets: 1024, + TxErrors: 1024, + TxDropped: 1024, + } + fakeSys := &fakesysfs.FakeSysFs{} + netStats, err := getNetworkStats("eth0", fakeSys) + if err != nil { + t.Errorf("call to getNetworkStats() failed with %s", err) + } + if expected_stats != netStats { + t.Errorf("expected to get stats %+v, got %+v", expected_stats, netStats) + } +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/utils.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/utils.go new file mode 100644 index 00000000000..9458a4c5172 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/utils.go @@ -0,0 +1,29 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import "fmt" + +// Returns a mask of all cores on the machine if the passed-in mask is empty. +func FixCpuMask(mask string, cores int) string { + if mask == "" { + if cores > 1 { + mask = fmt.Sprintf("0-%d", cores-1) + } else { + mask = "0" + } + } + return mask +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/version/version.go b/Godeps/_workspace/src/github.com/google/cadvisor/version/version.go new file mode 100644 index 00000000000..f77817c4f96 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/version/version.go @@ -0,0 +1,18 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +// Version of cAdvisor. +const VERSION = "0.10.1"