update containerd to 4ae34cccc5.

Signed-off-by: Mike Brown <brownwm@us.ibm.com>
This commit is contained in:
Mike Brown 2017-06-07 14:39:13 -05:00 committed by Lantao Liu
parent d4f7380f59
commit 67e884e6cf
193 changed files with 39736 additions and 5633 deletions

250
Godeps/Godeps.json generated
View File

@ -27,133 +27,162 @@
"Rev": "e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd" "Rev": "e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd"
}, },
{ {
"ImportPath": "github.com/containerd/containerd", "ImportPath": "github.com/containerd/containerd/api/services/containers",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/api/services/content", "ImportPath": "github.com/containerd/containerd/api/services/content",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
},
{
"ImportPath": "github.com/containerd/containerd/api/services/diff",
"Comment": "v0.2.8-126-g4ae34cc",
"Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/api/services/execution", "ImportPath": "github.com/containerd/containerd/api/services/execution",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/api/services/images", "ImportPath": "github.com/containerd/containerd/api/services/images",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/api/services/rootfs", "ImportPath": "github.com/containerd/containerd/api/services/snapshot",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/api/services/version", "ImportPath": "github.com/containerd/containerd/api/services/version",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
},
{
"ImportPath": "github.com/containerd/containerd/api/types/container",
"Comment": "v0.2.3-858-g2562aca",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/api/types/descriptor", "ImportPath": "github.com/containerd/containerd/api/types/descriptor",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/api/types/mount", "ImportPath": "github.com/containerd/containerd/api/types/mount",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
},
{
"ImportPath": "github.com/containerd/containerd/api/types/task",
"Comment": "v0.2.8-126-g4ae34cc",
"Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/archive", "ImportPath": "github.com/containerd/containerd/archive",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/archive/compression", "ImportPath": "github.com/containerd/containerd/archive/compression",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
},
{
"ImportPath": "github.com/containerd/containerd/containers",
"Comment": "v0.2.8-126-g4ae34cc",
"Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/content", "ImportPath": "github.com/containerd/containerd/content",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/fs", "ImportPath": "github.com/containerd/containerd/fs",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/images", "ImportPath": "github.com/containerd/containerd/images",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/log", "ImportPath": "github.com/containerd/containerd/log",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
},
{
"ImportPath": "github.com/containerd/containerd/metadata",
"Comment": "v0.2.8-126-g4ae34cc",
"Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
},
{
"ImportPath": "github.com/containerd/containerd/mount",
"Comment": "v0.2.8-126-g4ae34cc",
"Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/plugin", "ImportPath": "github.com/containerd/containerd/plugin",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/reference", "ImportPath": "github.com/containerd/containerd/reference",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/remotes", "ImportPath": "github.com/containerd/containerd/remotes",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/remotes/docker", "ImportPath": "github.com/containerd/containerd/remotes/docker",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/rootfs", "ImportPath": "github.com/containerd/containerd/rootfs",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/services/content", "ImportPath": "github.com/containerd/containerd/services/content",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
},
{
"ImportPath": "github.com/containerd/containerd/services/diff",
"Comment": "v0.2.8-126-g4ae34cc",
"Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/services/images", "ImportPath": "github.com/containerd/containerd/services/images",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/services/rootfs", "ImportPath": "github.com/containerd/containerd/services/snapshot",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/snapshot", "ImportPath": "github.com/containerd/containerd/snapshot",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/sys", "ImportPath": "github.com/containerd/containerd/sys",
"Comment": "v0.2.3-858-g2562aca", "Comment": "v0.2.8-126-g4ae34cc",
"Rev": "2562aca1a3c070bc9f571dec44bfb10993c326d6" "Rev": "4ae34cccc5b496c6547ff28dbeed1bde4773fa7a"
}, },
{ {
"ImportPath": "github.com/containerd/continuity/sysx", "ImportPath": "github.com/containerd/continuity/sysx",
"Rev": "6414d06cab9e2fe082ea29ff42aab627e740d00c" "Rev": "86cec1535a968310e7532819f699ff2830ed7463"
},
{
"ImportPath": "github.com/containerd/fifo",
"Rev": "69b99525e472735860a5269b75af1970142b3062"
}, },
{ {
"ImportPath": "github.com/containernetworking/cni/libcni", "ImportPath": "github.com/containernetworking/cni/libcni",
@ -247,6 +276,10 @@
"ImportPath": "github.com/golang/protobuf/proto", "ImportPath": "github.com/golang/protobuf/proto",
"Rev": "8ee79997227bf9b34611aee7946ae64735e6fd93" "Rev": "8ee79997227bf9b34611aee7946ae64735e6fd93"
}, },
{
"ImportPath": "github.com/golang/protobuf/ptypes/any",
"Rev": "8ee79997227bf9b34611aee7946ae64735e6fd93"
},
{ {
"ImportPath": "github.com/golang/protobuf/ptypes/empty", "ImportPath": "github.com/golang/protobuf/ptypes/empty",
"Rev": "8ee79997227bf9b34611aee7946ae64735e6fd93" "Rev": "8ee79997227bf9b34611aee7946ae64735e6fd93"
@ -348,41 +381,37 @@
"Comment": "v2.2.6", "Comment": "v2.2.6",
"Rev": "666120de432aea38ab06bd5c818f04f4129882c9" "Rev": "666120de432aea38ab06bd5c818f04f4129882c9"
}, },
{
"ImportPath": "github.com/tonistiigi/fifo",
"Rev": "fe870ccf293940774c2b44e23f6c71fff8f7547d"
},
{ {
"ImportPath": "golang.org/x/net/context", "ImportPath": "golang.org/x/net/context",
"Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e" "Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
}, },
{ {
"ImportPath": "golang.org/x/net/context/ctxhttp", "ImportPath": "golang.org/x/net/context/ctxhttp",
"Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e" "Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
}, },
{ {
"ImportPath": "golang.org/x/net/http2", "ImportPath": "golang.org/x/net/http2",
"Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e" "Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
}, },
{ {
"ImportPath": "golang.org/x/net/http2/hpack", "ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e" "Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
}, },
{ {
"ImportPath": "golang.org/x/net/idna", "ImportPath": "golang.org/x/net/idna",
"Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e" "Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
}, },
{ {
"ImportPath": "golang.org/x/net/internal/timeseries", "ImportPath": "golang.org/x/net/internal/timeseries",
"Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e" "Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
}, },
{ {
"ImportPath": "golang.org/x/net/lex/httplex", "ImportPath": "golang.org/x/net/lex/httplex",
"Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e" "Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
}, },
{ {
"ImportPath": "golang.org/x/net/trace", "ImportPath": "golang.org/x/net/trace",
"Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e" "Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
}, },
{ {
"ImportPath": "golang.org/x/sync/errgroup", "ImportPath": "golang.org/x/sync/errgroup",
@ -396,65 +425,100 @@
"ImportPath": "golang.org/x/sys/windows", "ImportPath": "golang.org/x/sys/windows",
"Rev": "f3918c30c5c2cb527c0b071a27c35120a6c0719a" "Rev": "f3918c30c5c2cb527c0b071a27c35120a6c0719a"
}, },
{
"ImportPath": "golang.org/x/text/secure/bidirule",
"Rev": "19e51611da83d6be54ddafce4a4af510cb3e9ea4"
},
{
"ImportPath": "golang.org/x/text/transform",
"Rev": "19e51611da83d6be54ddafce4a4af510cb3e9ea4"
},
{
"ImportPath": "golang.org/x/text/unicode/bidi",
"Rev": "19e51611da83d6be54ddafce4a4af510cb3e9ea4"
},
{
"ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "19e51611da83d6be54ddafce4a4af510cb3e9ea4"
},
{
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
"Rev": "d80a6e20e776b0b17a324d0ba1ab50a39c8e8944"
},
{ {
"ImportPath": "google.golang.org/grpc", "ImportPath": "google.golang.org/grpc",
"Comment": "v1.0.5", "Comment": "v1.3.0",
"Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/codes", "ImportPath": "google.golang.org/grpc/codes",
"Comment": "v1.0.5", "Comment": "v1.3.0",
"Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/credentials", "ImportPath": "google.golang.org/grpc/credentials",
"Comment": "v1.0.5", "Comment": "v1.3.0",
"Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
"Comment": "v1.3.0",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/grpclog", "ImportPath": "google.golang.org/grpc/grpclog",
"Comment": "v1.0.5", "Comment": "v1.3.0",
"Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/health/grpc_health_v1", "ImportPath": "google.golang.org/grpc/health/grpc_health_v1",
"Comment": "v1.0.5", "Comment": "v1.3.0",
"Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/internal", "ImportPath": "google.golang.org/grpc/internal",
"Comment": "v1.0.5", "Comment": "v1.3.0",
"Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/keepalive",
"Comment": "v1.3.0",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/metadata", "ImportPath": "google.golang.org/grpc/metadata",
"Comment": "v1.0.5", "Comment": "v1.3.0",
"Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/naming", "ImportPath": "google.golang.org/grpc/naming",
"Comment": "v1.0.5", "Comment": "v1.3.0",
"Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/peer", "ImportPath": "google.golang.org/grpc/peer",
"Comment": "v1.0.5", "Comment": "v1.3.0",
"Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/stats", "ImportPath": "google.golang.org/grpc/stats",
"Comment": "v1.0.5", "Comment": "v1.3.0",
"Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
},
{
"ImportPath": "google.golang.org/grpc/status",
"Comment": "v1.3.0",
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/tap", "ImportPath": "google.golang.org/grpc/tap",
"Comment": "v1.0.5", "Comment": "v1.3.0",
"Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "google.golang.org/grpc/transport", "ImportPath": "google.golang.org/grpc/transport",
"Comment": "v1.0.5", "Comment": "v1.3.0",
"Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8" "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
}, },
{ {
"ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1", "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1",

View File

@ -1,3 +0,0 @@
/bin/
**/coverage.txt
**/coverage.out

View File

@ -1,40 +0,0 @@
dist: trusty
sudo: required
language: go
go:
- 1.7.x
- 1.8.x
- tip
go_import_path: github.com/containerd/containerd
addons:
apt:
packages:
- btrfs-tools
env:
- TRAVIS_GOOS=windows TRAVIS_CGO_ENABLED=1
- TRAVIS_GOOS=linux TRAVIS_CGO_ENABLED=1
install:
- if [ "$TRAVIS_GOOS" = "windows" ] ; then sudo apt-get install -y gcc-multilib gcc-mingw-w64; export CC=x86_64-w64-mingw32-gcc ; export CXX=x86_64-w64-mingw32-g++ ; fi
- wget https://github.com/google/protobuf/releases/download/v3.1.0/protoc-3.1.0-linux-x86_64.zip -O /tmp/protoc-3.1.0-linux-x86_64.zip
- unzip -o -d /tmp/protobuf /tmp/protoc-3.1.0-linux-x86_64.zip
- export PATH=$PATH:/tmp/protobuf/bin/
- go get -u github.com/vbatts/git-validation
script:
- export GOOS=$TRAVIS_GOOS
- export CGO_ENABLED=$TRAVIS_CGO_ENABLED
- GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" make dco
- make fmt
- make vet
- make binaries
- if [ "$GOOS" != "windows" ]; then make coverage ; fi
- if [ "$GOOS" != "windows" ]; then sudo PATH=$PATH GOPATH=$GOPATH make root-coverage ; fi
after_success:
- bash <(curl -s https://codecov.io/bash)

View File

@ -1,115 +0,0 @@
# Contributing
Contributions should be made via pull requests. Pull requests will be reviewed
by one or more maintainers and merged when acceptable.
This project is in an early state, making the impact of contributions much
greater than at other stages. In this respect, it is important to consider any
changes or additions for their future impact more so than their current impact.
## Successful Changes
We ask that before contributing, please make the effort to coordinate with the
maintainers of the project before submitting large or high impact PRs. This
will prevent you from doing extra work that may or may not be merged.
PRs that are just submitted without any prior communication will likely be
summarily closed.
While pull requests are the methodology for submitting changes to code, changes
are much more likely to be accepted if they are accompanied by additional
engineering work. While we don't define this explicitly, most of these goals
are accomplished through communication of the design goals and subsequent
solutions. Often times, it helps to first state the problem before presenting
solutions.
Typically, the best methods of accomplishing this are to submit an issue,
stating the problem. This issue can include a problem statement and a
checklist with requirements. If solutions are proposed, alternatives should be
listed and eliminated. Even if the criteria for elimination of a solution is
frivolous, say so.
Larger changes typically work best with design documents, similar to those found
in `design/`. These are focused on providing context to the design at the time
the feature was conceived and can inform future documentation contributions.
## Commit Messages
There are times for one line commit messages and this is not one of them.
Commit messages should follow best practices, including explaining the context
of the problem and how it was solved, including in caveats or follow up changes
required. They should tell the story of the change and provide readers
understanding of what led to it.
If you're lost about what this even means, please see [How to Write a Git
Commit Message](http://chris.beams.io/posts/git-commit/) for a start.
In practice, the best approach to maintaining a nice commit message is to
leverage a `git add -p` and `git commit --amend` to formulate a solid
changeset. This allows one to piece together a change, as information becomes
available.
If you squash a series of commits, don't just submit that. Re-write the commit
message, as if the series of commits was a single stroke of brilliance.
That said, there is no requirement to have a single commit for a PR, as long as
each commit tells the story. For example, if there is a feature that requires a
package, it might make sense to have the package in a separate commit then have
a subsequent commit that uses it.
Remember, you're telling part of the story with the commit message. Don't make
your chapter weird.
## Sign your work
The sign-off is a simple line at the end of the explanation for the patch. Your
signature certifies that you wrote the patch or otherwise have the right to pass
it on as an open-source patch. The rules are pretty simple: if you can certify
the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@email.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.

View File

@ -1,300 +0,0 @@
# containerd project maintainers file
#
# This file describes who runs the containerd project and how.
# This is a living document - if you see something out of date or missing,
# speak up!
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant
# parser.
[Rules]
[Rules.maintainers]
title = "What is a maintainer?"
text = """
There are different types of maintainers, with different responsibilities, but
all maintainers have 3 things in common:
1) They share responsibility in the project's success.
2) They have made a long-term, recurring time investment to improve the project.
3) They spend that time doing whatever needs to be done, not necessarily what
is the most interesting or fun.
Maintainers are often under-appreciated, because their work is harder to appreciate.
It's easy to appreciate a really cool and technically advanced feature. It's harder
to appreciate the absence of bugs, the slow but steady improvement in stability,
or the reliability of a release process. But those things distinguish a good
project from a great one.
"""
[Rules.adding-maintainers]
title = "How are maintainers added?"
text = """
Maintainers are first and foremost contributors that have shown they are
committed to the long term success of a project. Contributors wanting to become
maintainers are expected to be deeply involved in contributing code, pull
request review, and triage of issues in the project for more than three months.
Just contributing does not make you a maintainer, it is about building trust
with the current maintainers of the project and being a person that they can
depend on and trust to make decisions in the best interest of the project.
Periodically, the existing maintainers curate a list of contributors that have
shown regular activity on the project over the prior months. From this list,
maintainer candidates are selected and proposed on the maintainers mailing list.
After a candidate has been announced on the maintainers mailing list, the
existing maintainers are given five business days to discuss the candidate,
raise objections and cast their vote. Candidates must be approved by the BDFL
and at least 66% of the current maintainers by adding their vote on the mailing
list. Only maintainers of the repository that the candidate is proposed for are
allowed to vote. The BDFL's vote is mandatory.
If a candidate is approved, a maintainer will contact the candidate to invite
the candidate to open a pull request that adds the contributor to the
MAINTAINERS file. The candidate becomes a maintainer once the pull request is
merged.
"""
[Rules.adding-sub-projects]
title = "How are sub projects added?"
text = """
Similar to adding maintainers, new sub projects can be added to containerd
GitHub organization as long as they adhere to the CNCF
[charter](https://www.cncf.io/about/charter/) and mission. After a project
proposal has been announced on a public forum (GitHub issue or mailing list),
the existing maintainers are given five business days to discuss the new
project, raise objections and cast their vote. Projects must be approved by at
least 66% of the current maintainers by adding their vote.
If a project is approved, a maintainer will add the project to the containerd
GitHub organization, and make an announcement on a public forum.
"""
[Rules.stepping-down-policy]
title = "Stepping down policy"
text = """
Life priorities, interests, and passions can change. If you're a maintainer but
feel you must remove yourself from the list, inform other maintainers that you
intend to step down, and if possible, help find someone to pick up your work.
At the very least, ensure your work can be continued where you left off.
After you've informed other maintainers, create a pull request to remove
yourself from the MAINTAINERS file.
"""
[Rules.inactive-maintainers]
title = "Removal of inactive maintainers"
text = """
Similar to the procedure for adding new maintainers, existing maintainers can
be removed from the list if they do not show significant activity on the
project. Periodically, the maintainers review the list of maintainers and their
activity over the last three months.
If a maintainer has shown insufficient activity over this period, a neutral
person will contact the maintainer to ask if they want to continue being
a maintainer. If the maintainer decides to step down as a maintainer, they
open a pull request to be removed from the MAINTAINERS file.
If the maintainer wants to remain a maintainer, but is unable to perform the
required duties they can be removed with a vote by the BDFL and at least 66% of
the current maintainers. The BDFL's vote is mandatory. An e-mail is sent to the
mailing list, inviting maintainers of the project to vote. The voting period is
five business days. Issues related to a maintainer's performance should be
discussed with them among the other maintainers so that they are not surprised
by a pull request removing them.
"""
[Rules.bdfl]
title = "The Benevolent dictator for life (BDFL)"
text = """
containerd follows the timeless, highly efficient and totally unfair system
known as [Benevolent dictator for
life](https://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with yours
truly, Solomon Hykes, in the role of BDFL. This means that all decisions are
made, by default, by Solomon. Since making every decision himself would be
highly un-scalable, in practice decisions are spread across multiple
maintainers.
Ideally, the BDFL role is like the Queen of England: awesome crown, but not an
actual operational role day-to-day. The real job of a BDFL is to NEVER GO AWAY.
Every other rule can change, perhaps drastically so, but the BDFL will always be
there, preserving the philosophy and principles of the project, and keeping
ultimate authority over its fate. This gives us great flexibility in
experimenting with various governance models, knowing that we can always press
the "reset" button without fear of fragmentation or deadlock. See the US
congress for a counter-example.
BDFL daily routine:
* Is the project governance stuck in a deadlock or irreversibly fragmented?
* If yes: refactor the project governance
* Are there issues or conflicts escalated by maintainers?
* If yes: resolve them
* Go back to polishing that crown.
"""
[Rules.decisions]
title = "How are decisions made?"
text = """
Short answer: EVERYTHING IS A PULL REQUEST.
containerd is an open-source project with an open design philosophy. This means
that the repository is the source of truth for EVERY aspect of the project,
including its philosophy, design, road map, and APIs. *If it's part of the
project, it's in the repo. If it's in the repo, it's part of the project.*
As a result, all decisions can be expressed as changes to the repository. An
implementation change is a change to the source code. An API change is a change
to the API specification. A philosophy change is a change to the philosophy
manifesto, and so on.
All decisions affecting containerd, big and small, follow the same 3 steps:
* Step 1: Open a pull request. Anyone can do this.
* Step 2: Discuss the pull request. Anyone can do this.
* Step 3: Merge or refuse the pull request. Who does this depends on the nature
of the pull request and which areas of the project it affects.
"""
[Rules.DCO]
title = "Helping contributors with the DCO"
text = """
The [DCO or `Sign your work`](
https://github.com/containerd/containerd/blob/master/CONTRIBUTING.md#sign-your-work)
requirement is not intended as a roadblock or speed bump.
Some containerd contributors are not as familiar with `git`, or have used a web
based editor, and thus asking them to `git commit --amend -s` is not the best
way forward.
In this case, maintainers can update the commits based on clause (c) of the DCO.
The most trivial way for a contributor to allow the maintainer to do this, is to
add a DCO signature in a pull requests's comment, or a maintainer can simply
note that the change is sufficiently trivial that it does not substantially
change the existing contribution - i.e., a spelling change.
When you add someone's DCO, please also add your own to keep a log.
"""
[Rules."no direct push"]
title = "I'm a maintainer. Should I make pull requests too?"
text = """
Yes. Nobody should ever push to master directly. All changes should be
made through a pull request.
"""
[Rules.meta]
title = "How is this process changed?"
text = "Just like everything else: by making a pull request :)"
# Current project roles
[Roles]
[Roles.bdfl]
person = "shykes"
[Roles."Chief Architect"]
person = "crosbymichael"
text = """
The chief architect is responsible for the overall integrity of the technical
architecture across all subsystems, and the consistency of APIs and UI.
Changes to UI, public APIs and overall architecture must be approved by the
chief architect.
"""
[Roles."Chief Maintainer"]
person = "crosbymichael"
text = """
The chief maintainer is responsible for all aspects of quality for the project
including code reviews, usability, stability, security, performance, etc. The
most important function of the chief maintainer is to lead by example. On the
first day of a new maintainer, the best advice should be "follow the C.M.'s
example and you'll be fine".
"""
# Current project organization
[Org]
[Org.Maintainers]
people = [
"crosbymichael",
"dqminh",
"dmcgowan",
"estesp",
"hqhq",
"jhowardmsft",
"mlaventure",
"stevvooe",
]
[People]
[People.crosbymichael]
Name = "Michael Crosby"
Email = "crosbymichael@gmail.com"
GitHub = "crosbymichael"
[People.dqminh]
Name = "Daniel, Dao Quang Minh"
Email = "dqminh89@gmail.com"
GitHub = "dqminh"
[people.dmcgowan]
Name = "Derek McGowan"
Email = "derek@mcgstyle.net"
GitHub = "dmcgowan"
[People.estesp]
Name = "Phil Estes"
Email = "estesp@linux.vnet.ibm.com"
GitHub = "estesp"
[People.hqhq]
Name = "Qiang Huang"
Email = "h.huangqiang@huawei.com"
GitHub = "hqhq"
[People.jhowardmsft]
Name = "John Howard"
Email = "jhoward@microsoft.com"
GitHub = "jhowardmsft"
[People.mlaventure]
Name = "Kenfe-Mickaël Laventure"
Email = "mickael.laventure@docker.com"
GitHub = "mlaventure"
[People.stevvooe]
Name = "Stephen Day"
Email = "stephen.day@docker.com"
GitHub = "stevvooe"

View File

@ -1,190 +0,0 @@
# Root directory of the project (absolute path).
ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
# Base path used to install.
DESTDIR=/usr/local
# Used to populate variables in version package.
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
PKG=github.com/containerd/containerd
ifneq "$(strip $(shell command -v go 2>/dev/null))" ""
GOOS ?= $(shell go env GOOS)
else
GOOS ?= $$GOOS
endif
WHALE = "🐳"
ONI = "👹"
ifeq ("$(OS)", "Windows_NT")
WHALE="+"
ONI="-"
endif
# Project packages.
PACKAGES=$(shell go list ./... | grep -v /vendor/)
INTEGRATION_PACKAGE=${PKG}/integration
SNAPSHOT_PACKAGES=$(shell go list ./snapshot/...)
# Project binaries.
COMMANDS=ctr containerd protoc-gen-gogoctrd dist ctrd-protobuild
ifeq ("$(GOOS)", "linux")
COMMANDS += containerd-shim
endif
BINARIES=$(addprefix bin/,$(COMMANDS))
ifeq ("$(GOOS)", "windows")
BINARY_SUFFIX=".exe"
endif
GO_LDFLAGS=-ldflags "-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG)"
# Flags passed to `go test`
TESTFLAGS ?=-parallel 8 -race
.PHONY: clean all AUTHORS fmt vet lint dco build binaries test integration setup generate protos checkprotos coverage ci check help install uninstall vendor
.DEFAULT: default
all: binaries
check: fmt vet lint ineffassign ## run fmt, vet, lint, ineffassign
ci: check binaries checkprotos coverage coverage-integration ## to be used by the CI
AUTHORS: .mailmap .git/HEAD
git log --format='%aN <%aE>' | sort -fu > $@
setup: ## install dependencies
@echo "$(WHALE) $@"
# TODO(stevvooe): Install these from the vendor directory
@go get -u github.com/golang/lint/golint
#@go get -u github.com/kisielk/errcheck
@go get -u github.com/gordonklaus/ineffassign
generate: protos
@echo "$(WHALE) $@"
@PATH=${ROOTDIR}/bin:${PATH} go generate -x ${PACKAGES}
protos: bin/protoc-gen-gogoctrd bin/ctrd-protobuild ## generate protobuf
@echo "$(WHALE) $@"
@PATH=${ROOTDIR}/bin:${PATH} ctrd-protobuild ${PACKAGES}
checkprotos: protos ## check if protobufs needs to be generated again
@echo "$(WHALE) $@"
@test -z "$$(git status --short | grep ".pb.go" | tee /dev/stderr)" || \
((git diff | cat) && \
(echo "$(ONI) please run 'make generate' when making changes to proto files" && false))
# Depends on binaries because vet will silently fail if it can't load compiled
# imports
vet: binaries ## run go vet
@echo "$(WHALE) $@"
@test -z "$$(go vet ${PACKAGES} 2>&1 | grep -v 'constant [0-9]* not a string in call to Errorf' | grep -v 'unrecognized printf verb 'r'' | egrep -v '(timestamp_test.go|duration_test.go|fetch.go|exit status 1)' | tee /dev/stderr)"
fmt: ## run go fmt
@echo "$(WHALE) $@"
@test -z "$$(gofmt -s -l . | grep -v vendor/ | grep -v ".pb.go$$" | tee /dev/stderr)" || \
(echo "$(ONI) please format Go code with 'gofmt -s -w'" && false)
@test -z "$$(find . -path ./vendor -prune -o ! -name timestamp.proto ! -name duration.proto -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \
(echo "$(ONI) please indent proto files with tabs only" && false)
@test -z "$$(find . -path ./vendor -prune -o -name '*.proto' -type f -exec grep -EHn "[_ ]id = " {} \; | grep -v gogoproto.customname | tee /dev/stderr)" || \
(echo "$(ONI) id fields in proto files must have a gogoproto.customname set" && false)
@test -z "$$(find . -path ./vendor -prune -o -name '*.proto' -type f -exec grep -Hn "Meta meta = " {} \; | grep -v '(gogoproto.nullable) = false' | tee /dev/stderr)" || \
(echo "$(ONI) meta fields in proto files must have option (gogoproto.nullable) = false" && false)
lint: ## run go lint
@echo "$(WHALE) $@"
@test -z "$$(golint ./... | grep -v vendor/ | grep -v ".pb.go:" | tee /dev/stderr)"
dco: ## dco check
@which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found" && false)
ifdef TRAVIS_COMMIT_RANGE
git-validation -q -run DCO,short-subject,dangling-whitespace
else
git-validation -v -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..HEAD
endif
ineffassign: ## run ineffassign
@echo "$(WHALE) $@"
@test -z "$$(ineffassign . | grep -v vendor/ | grep -v ".pb.go:" | tee /dev/stderr)"
#errcheck: ## run go errcheck
# @echo "$(WHALE) $@"
# @test -z "$$(errcheck ./... | grep -v vendor/ | grep -v ".pb.go:" | tee /dev/stderr)"
build: ## build the go packages
@echo "$(WHALE) $@"
@go build -i -v ${GO_LDFLAGS} ${GO_GCFLAGS} ${PACKAGES}
test: ## run tests, except integration tests and tests that require root
@echo "$(WHALE) $@"
@go test ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
root-test: ## run tests, except integration tests
@echo "$(WHALE) $@"
@go test ${TESTFLAGS} ${SNAPSHOT_PACKAGES} -test.root
integration: ## run integration tests
@echo "$(WHALE) $@"
@go test ${TESTFLAGS} ${INTEGRATION_PACKAGE}
FORCE:
# Build a binary from a cmd.
bin/%: cmd/% FORCE
@test $$(go list) = "${PKG}" || \
(echo "$(ONI) Please correctly set up your Go build environment. This project must be located at <GOPATH>/src/${PKG}" && false)
@echo "$(WHALE) $@${BINARY_SUFFIX}"
@go build -i -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_GCFLAGS} ./$<
binaries: $(BINARIES) ## build binaries
@echo "$(WHALE) $@"
clean: ## clean up binaries
@echo "$(WHALE) $@"
@rm -f $(BINARIES)
install: ## install binaries
@echo "$(WHALE) $@ $(BINARIES)"
@mkdir -p $(DESTDIR)/bin
@install $(BINARIES) $(DESTDIR)/bin
uninstall:
@echo "$(WHALE) $@"
@rm -f $(addprefix $(DESTDIR)/bin/,$(notdir $(BINARIES)))
coverage: ## generate coverprofiles from the unit tests, except tests that require root
@echo "$(WHALE) $@"
@rm -f coverage.txt
( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}); do \
go test -i ${TESTFLAGS} -test.short -coverprofile=coverage.out -covermode=atomic $$pkg || exit; \
if [ -f profile.out ]; then \
cat profile.out >> coverage.txt; \
rm profile.out; \
fi; \
go test ${TESTFLAGS} -test.short -coverprofile=coverage.out -covermode=atomic $$pkg || exit; \
if [ -f profile.out ]; then \
cat profile.out >> coverage.txt; \
rm profile.out; \
fi; \
done )
root-coverage: ## generae coverage profiles for the unit tests
@echo "$(WHALE) $@"
@( for pkg in ${SNAPSHOT_PACKAGES}; do \
go test -i ${TESTFLAGS} -test.short -coverprofile="../../../$$pkg/coverage.txt" -covermode=atomic $$pkg -test.root || exit; \
go test ${TESTFLAGS} -test.short -coverprofile="../../../$$pkg/coverage.txt" -covermode=atomic $$pkg -test.root || exit; \
done )
coverage-integration: ## generate coverprofiles from the integration tests
@echo "$(WHALE) $@"
go test ${TESTFLAGS} -test.short -coverprofile="../../../${INTEGRATION_PACKAGE}/coverage.txt" -covermode=atomic ${INTEGRATION_PACKAGE}
vendor:
@echo "$(WHALE) $@"
@vndr
help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort

View File

@ -1,137 +0,0 @@
![banner](/docs/images/containerd-dark.png?raw=true)
[![Build Status](https://travis-ci.org/containerd/containerd.svg?branch=master)](https://travis-ci.org/containerd/containerd)
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc..
containerd is designed to be embedded into a larger system, rather than being used directly by developers or end-users.
### State of the Project
containerd currently has two active branches.
There is a [v0.2.x](https://github.com/containerd/containerd/tree/v0.2.x) branch for the current release of containerd that is being consumed by Docker and others and the master branch is the development branch for the 1.0 roadmap and feature set.
Any PR or issue that is intended for the current v0.2.x release should be tagged with the same `v0.2.x` tag.
### Communication
For async communication and long running discussions please use issues and pull requests on the github repo.
This will be the best place to discuss design and implementation.
For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development.
**Slack:** https://dockr.ly/community
### Developer Quick-Start
To build the daemon and `ctr` simple test client, the following build system dependencies are required:
* Go 1.8.x or above (requires 1.8 due to use of golang plugin(s))
* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/google/protobuf/releases))
For proper results, install the `protoc` release into `/usr/local` on your build system. For example, the following commands will download and install the 3.1.0 release for a 64-bit Linux host:
```
$ wget -c https://github.com/google/protobuf/releases/download/v3.1.0/protoc-3.1.0-linux-x86_64.zip
$ sudo unzip protoc-3.1.0-linux-x86_64.zip -d /usr/local
```
With the required dependencies installed, the `Makefile` target named **binaries** will compile the `ctr` and `containerd` binaries and place them in the `bin/` directory. Using `sudo make install` will place the binaries in `/usr/local/bin`. When making any changes to the gRPC API, `make generate` will use the installed `protoc` compiler to regenerate the API generated code packages.
Vendoring of external imports uses the [`vndr` tool](https://github.com/LK4D4/vndr) which uses a simple config file, `vendor.conf`, to provide the URL and version or hash details for each vendored import. After modifying `vendor.conf` run the `vndr` tool to update the `vendor/` directory contents. Combining the `vendor.conf` update with the changeset in `vendor/` after running `vndr` should become a single commit for a PR which relies on vendored updates.
Containerd will by default use `runc` found via the $PATH as the OCI-compliant runtime. You can specify the runtime directly with the `runtime` flag when starting containerd. However you specify the runtime, the expectation is that during the pre-release development cycle for containerd, the supported version of `runc` will track the current master branch of [`opencontainers/runc`](https://github.com/opencontainers/runc).
## Features
* OCI Image Spec support
* OCI Runtime Spec support
* Image push and pull support
* Container runtime and lifecycle support
* Management of network namespaces containers to join existing namespaces
* Multi-tenant supported with CAS storage for global images
## Scope and Principles
Having a clearly defined scope of a project is important for ensuring consistency and focus.
These following criteria will be used when reviewing pull requests, features, and changes for the project before being accepted.
### Components
Components should not have tight dependencies on each other so that they are able to be used independently.
The APIs for images and containers should be designed in a way that when used together the components have a natural flow but still be useful independently.
An example for this design can be seen with the overlay filesystems and the container execution layer.
The execution layer and overlay filesystems can be used independently but if you were to use both, they share a common `Mount` struct that the filesystems produce and the execution layer consumes.
### Primitives
containerd should expose primitives to solve problems instead of building high level abstractions in the API.
A common example of this is how build would be implemented.
Instead of having a build API in containerd we should expose the lower level primitives that allow things required in build to work.
Breaking up the filesystem APIs to allow snapshots, copy functionality, and mounts allow people implementing build at the higher levels more flexibility.
### Extensibility and Defaults
For the various components in containerd there should be defined extension points where implementations can be swapped for alternatives.
The best example of this is that containerd will use `runc` from OCI as the default runtime in the execution layer but other runtimes conforming to the OCI Runtime specification they can be easily added to containerd.
containerd will come with a default implementation for the various components.
These defaults will be chosen by the maintainers of the project and should not change unless better tech for that component comes out.
Additional implementations will not be accepted into the core repository and should be developed in a separate repository not maintained by the containerd maintainers.
### Releases
containerd will be released with a 1.0 when feature complete and this version will be supported for 1 year with security and bug fixes applied and released.
The upgrade path for containerd is that the 0.0.x patch releases are always backward compatible with its major and minor version.
Minor (0.x.0) version will always be compatible with the previous minor release. i.e. 1.2.0 is backwards compatible with 1.1.0 and 1.1.0 is compatible with 1.0.0.
There is no compatibility guarantees with upgrades from two minor releases. i.e. 1.0.0 to 1.2.0.
There are not backwards compatibility guarantees with upgrades to major versions. i.e 1.0.0 to 2.0.0.
Each major version will be supported for 1 year with bug fixes and security patches.
### Scope
The following table specifies the various components of containerd and general features of container runtimes.
The table specifies whether or not the feature/component is in or out of scope.
| Name | Description | In/Out | Reason |
|------------------------------|--------------------------------------------------------------------------------------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| execution | Provide an extensible execution layer for executing a container | in | Create,start, stop pause, resume exec, signal, delete |
| cow filesystem | Built in functionality for overlay, aufs, and other copy on write filesystems for containers | in | |
| distribution | Having the ability to push and pull images as well as operations on images as a first class API object | in | containerd will fully support the management and retrieval of images |
| metrics | container-level metrics, cgroup stats, and OOM events | in |
| networking | creation and management of network interfaces | out | Networking will be handled and provided to containerd via higher level systems. |
| build | Building images as a first class API | out | Build is a higher level tooling feature and can be implemented in many different ways on top of containerd |
| volumes | Volume management for external data | out | The API supports mounts, binds, etc where all volumes type systems can be built on top of containerd. |
| logging | Persisting container logs | out | Logging can be build on top of containerd because the containers STDIO will be provided to the clients and they can persist any way they see fit. There is no io copying of container STDIO in containerd. |
containerd is scoped to a single host and makes assumptions based on that fact.
It can be used to build things like a node agent that launches containers but does not have any concepts of a distributed system.
containerd is designed to be embedded into a larger system, hence it only includes a barebone CLI (`ctr`) specifically for development and debugging purpose, with no mandate to be human-friendly, and no guarantee of interface stability over time.
Also things like service discovery are out of scope even though networking is in scope.
containerd should provide the primitives to create, add, remove, or manage network interfaces and network namespaces for a container but IP allocation, discovery, and DNS should be handled at higher layers.
### How is the scope changed?
The scope of this project is a whitelist.
If it's not mentioned as being in scope, it is out of scope.
For the scope of this project to change it requires a 100% vote from all maintainers of the project.
### Development reports.
Weekly summary on the progress and what is being worked on.
https://github.com/containerd/containerd/tree/master/reports
## Copyright and license
Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code
is released under the Apache 2.0 license. The README.md file, and files in the
"docs" folder are licensed under the Creative Commons Attribution 4.0
International License under the terms and conditions set forth in the file
"LICENSE.docs". You may obtain a duplicate copy of the same license, titled
CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.

View File

@ -1,78 +0,0 @@
# containerd roadmap
This is a high level roadmap for the project that outlines what is currently being worked on, what comes next, and where you can help.
For a more up to date look please review the milestones on [github](https://github.com/containerd/containerd/milestones).
The following are the different status the various phases of development can be in:
* Not Started - no work or thinking has been done towards the goal
* In Design - design work has started for the component and you can find design documents in the `design` folder
* In Progress - design has mostly finished and development has started
* Completed - the development work has been completed
* Stable - the apis for the phase are feature complete and considered stable
We would like to follow the roadmap and develop the components one by one to completion before starting the next phase. If PRs are opened for another phase before the previous phase has been completed they will be closed as we are not ready for them at that time.
## Phase 1
**Status:** In Progress
### GRPC API
**Documents:**
We are going from a top down design for filling out this missing pieces of containerd and design of the API.
### Design
**Documents:**
The high level design work is needed so that the architecture of containerd stays consistent throughout the development process.
### Build & Test Process
**Documents:**
We need to have a simple build and test process for new developers to bootstrap their environments.
Because containerd will be the base of many high level systems we need to have a simple build process that does
not require high level tooling.
## Phase 2
Phase 2 includes most of the design and development work for the execution and storage layers of containerd.
It will include porting over existing "graph drivers" from Docker Engine and finding a common model for representing snapshots for layered filesystems.
This will also include moving the existing execution code support OCI's Runtime Spec and the existing containerd execution code.
**Status:** In Design
### Runtime
The runtime layer is responsible for the creation of containers and their management, and supervision of the processes inside those containers.
### Storage
**Documents:** https://github.com/containerd/containerd/blob/master/design/snapshots.md
The current graph drivers were built when we only had overlay filesystems like aufs.
We forced the model to be designed around overlay filesystems and this introduced a lot of complexity for snapshotting graph drivers like btrfs and devicemapper thin-p.
Our current approach is to model our storage layer after snapshotting drivers instead of overlay drivers as we can get the same results and its cleaner and more robust to have an overlay filesytem model snapshots than it is to have a snapshot filesystem model overlay filesystems.
## Phase 3
This phase includes getting support for the OCI Image spec built into containerd.
**Status:** Not Started
### Distribution
## Phase 4
Phase 4 involves graduating to version 1.0, and shifting the focus from features to maintenance. Graduating to 1.0 implies:
- Completing all of the above phases.
- Covering the functionalities required by a majority of container-centric platforms.
- Offering feature parity, to the extent of technical possibilities, across Linux and Windows.
- Demonstrating that containerd fulfills the requirements of at least one higher-level platforms through its complete integration as an upstream.
**Status:** Not Started

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,116 @@
syntax = "proto3";
package containerd.v1;
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/field_mask.proto";
import "github.com/containerd/containerd/api/types/descriptor/descriptor.proto";
// Containers provides metadata storage for containers used in the execution
// service.
//
// The objects here provide an state-independent view of containers for use in
// management and resource pinning. From that perspective, contaienrs do not
// have a "state" but rather this is the set of resources that will be
// considered in use by the container.
//
// From the perspective of the execution service, these objects represent the
// base parameters for creating a container process.
//
// In general, when looking to add fields for this type, first ask yourself
// whether or not the function of the field has to do with runtime execution or
// is invariant of the runtime state of the container. If it has to do with
// runtime, or changes as the "container" is started and stops, it probably
// doesn't belong on this object.
service Containers {
rpc Get(GetContainerRequest) returns (GetContainerResponse);
rpc List(ListContainersRequest) returns (ListContainersResponse);
rpc Create(CreateContainerRequest) returns (CreateContainerResponse);
rpc Update(UpdateContainerRequest) returns (UpdateContainerResponse);
rpc Delete(DeleteContainerRequest) returns (google.protobuf.Empty);
}
message Container {
// ID is the user-specified identifier.
//
// This field may not be updated.
string id = 1;
// Labels provides an area to include arbitrary data on containers.
//
// Note that to add a new value to this field, read the existing set and
// include the entire result in the update call.
map<string, string> labels = 2;
// Image contains the reference of the image used to build the
// specification and snapshots for running this container.
//
// If this field is updated, the spec and rootfs needed to updated, as well.
string image = 3;
// Runtime specifies which runtime to use for executing this container.
string runtime = 4;
// Spec to be used when creating the container. This is runtime specific.
google.protobuf.Any spec = 6;
// RootFS specifies the snapshot key to use for the container's root
// filesystem. When starting a task from this container, a caller should
// look up the mounts from the snapshot service and include those on the
// task create request.
//
// Snapshots referenced in this field will not be garbage collected.
//
// This field may be updated.
string rootfs = 7 [(gogoproto.customname) = "RootFS"];
}
message GetContainerRequest {
string id = 1;
}
message GetContainerResponse {
Container container = 1 [(gogoproto.nullable) = false];
}
message ListContainersRequest {
string filter = 1; // TODO(stevvooe): Define a filtering syntax to make these queries.
}
message ListContainersResponse {
repeated Container containers = 1 [(gogoproto.nullable) = false];
}
message CreateContainerRequest {
Container container = 1 [(gogoproto.nullable) = false];
}
message CreateContainerResponse {
Container container = 1 [(gogoproto.nullable) = false];
}
// UpdateContainerRequest updates the metadata on one or more container.
//
// The operation should follow semantics described in
// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask,
// unless otherwise qualified.
message UpdateContainerRequest {
// Container provides the target values, as declared by the mask, for the update.
//
// The ID field must be set.
Container container = 1 [(gogoproto.nullable) = false];
// UpdateMask specifies which fields to perform the update on. If empty,
// the operation applies to all fields.
google.protobuf.FieldMask update_mask = 2;
}
message UpdateContainerResponse {
Container container = 1 [(gogoproto.nullable) = false];
}
message DeleteContainerRequest {
string id = 1;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,58 @@
syntax = "proto3";
package containerd.v1;
import "gogoproto/gogo.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/timestamp.proto";
import "github.com/containerd/containerd/api/types/mount/mount.proto";
import "github.com/containerd/containerd/api/types/descriptor/descriptor.proto";
// Diff service creates and applies diffs
service Diff {
// Apply applies the content associated with the provided digests onto
// the provided mounts. Archive content will be extracted and
// decompressed if necessary.
rpc Apply(ApplyRequest) returns (ApplyResponse);
// Diff creates a diff between the given mounts and uploads the result
// to the content store.
rpc Diff(DiffRequest) returns (DiffResponse);
}
message ApplyRequest {
// Diff is the descriptor of the diff to be extracted
containerd.v1.types.Descriptor diff = 1;
repeated containerd.v1.types.Mount mounts = 2;
}
message ApplyResponse {
// Applied is the descriptor for the object which was applied.
// If the input was a compressed blob then the result will be
// the descriptor for the uncompressed blob.
containerd.v1.types.Descriptor applied = 1;
}
message DiffRequest {
// Left are the mounts which represent the older copy
// in which is the base of the computed changes.
repeated containerd.v1.types.Mount left = 1;
// Right are the mounts which represents the newer copy
// in which changes from the left were made into.
repeated containerd.v1.types.Mount right = 2;
// MediaType is the media type descriptor for the created diff
// object
string media_type = 3;
// Ref identifies the pre-commit content store object. This
// reference can be used to get the status from the content store.
string ref = 5;
}
message DiffResponse {
// Diff is the descriptor of the diff which can be applied
containerd.v1.types.Descriptor diff = 3;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,19 +1,20 @@
syntax = "proto3"; syntax = "proto3";
package containerd.v1.services; package containerd.v1.services.execution;
import "google/protobuf/empty.proto"; import "google/protobuf/empty.proto";
import "google/protobuf/any.proto"; import "google/protobuf/any.proto";
import "gogoproto/gogo.proto"; import "gogoproto/gogo.proto";
import "github.com/containerd/containerd/api/types/mount/mount.proto"; import "github.com/containerd/containerd/api/types/mount/mount.proto";
import "github.com/containerd/containerd/api/types/container/container.proto"; import "github.com/containerd/containerd/api/types/descriptor/descriptor.proto";
import "github.com/containerd/containerd/api/types/task/task.proto";
import "google/protobuf/timestamp.proto"; import "google/protobuf/timestamp.proto";
service ContainerService { service Tasks {
rpc Create(CreateRequest) returns (CreateResponse); rpc Create(CreateRequest) returns (CreateResponse);
rpc Start(StartRequest) returns (google.protobuf.Empty); rpc Start(StartRequest) returns (google.protobuf.Empty);
rpc Delete(DeleteRequest) returns (DeleteResponse); rpc Delete(DeleteRequest) returns (DeleteResponse);
rpc Info(InfoRequest) returns (containerd.v1.types.Container); rpc Info(InfoRequest) returns (InfoResponse);
rpc List(ListRequest) returns (ListResponse); rpc List(ListRequest) returns (ListResponse);
rpc Kill(KillRequest) returns (google.protobuf.Empty); rpc Kill(KillRequest) returns (google.protobuf.Empty);
rpc Events(EventsRequest) returns (stream containerd.v1.types.Event); rpc Events(EventsRequest) returns (stream containerd.v1.types.Event);
@ -22,64 +23,102 @@ service ContainerService {
rpc CloseStdin(CloseStdinRequest) returns (google.protobuf.Empty); rpc CloseStdin(CloseStdinRequest) returns (google.protobuf.Empty);
rpc Pause(PauseRequest) returns (google.protobuf.Empty); rpc Pause(PauseRequest) returns (google.protobuf.Empty);
rpc Resume(ResumeRequest) returns (google.protobuf.Empty); rpc Resume(ResumeRequest) returns (google.protobuf.Empty);
rpc Processes(ProcessesRequest) returns (ProcessesResponse);
rpc Checkpoint(CheckpointRequest) returns (CheckpointResponse);
} }
message CreateRequest { message CreateRequest {
string id = 1 [(gogoproto.customname) = "ID"]; // ContainerID specifies the container to use for creating this task.
google.protobuf.Any spec = 2; //
// The spec from the provided container id will be used to create the
// task associated with this container. Only one task can be run at a time
// per container.
//
// This should be created using the Containers service.
string container_id = 2;
// RootFS provides the pre-chroot mounts to perform in the shim before
// executing the container task.
//
// These are for mounts that cannot be performed in the user namespace.
// Typically, these mounts should be resolved from snapshots specified on
// the container object.
repeated containerd.v1.types.Mount rootfs = 3; repeated containerd.v1.types.Mount rootfs = 3;
string runtime = 4;
string stdin = 5; string stdin = 5;
string stdout = 6; string stdout = 6;
string stderr = 7; string stderr = 7;
bool terminal = 8; bool terminal = 8;
types.Descriptor checkpoint = 9;
} }
message CreateResponse { message CreateResponse {
string id = 1 [(gogoproto.customname) = "ID"]; // TODO(stevvooe): We no longer have an id for a task since they are bound
uint32 pid = 2; // to a single container. Although, we should represent each new task with
// an ID so one can differentiate between each instance of a container
// running.
//
// Hence, we are leaving this here and reserving the field number in case
// we need to move in this direction.
// string id = 1;
string container_id = 2;
uint32 pid = 3;
} }
message StartRequest { message StartRequest {
string id = 1 [(gogoproto.customname) = "ID"]; string container_id = 1;
} }
message DeleteRequest { message DeleteRequest {
string id = 1 [(gogoproto.customname) = "ID"]; string container_id = 1;
} }
message DeleteResponse { message DeleteResponse {
string id = 1 [(gogoproto.customname) = "ID"]; string container_id = 1;
uint32 exit_status = 2; uint32 exit_status = 2;
google.protobuf.Timestamp exited_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; google.protobuf.Timestamp exited_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
} }
message InfoRequest { message InfoRequest {
string id = 1 [(gogoproto.customname) = "ID"]; string container_id = 1;
}
message InfoResponse {
types.Task task = 1;
} }
message ListRequest { message ListRequest {
} }
message ListResponse { message ListResponse {
repeated containerd.v1.types.Container containers = 1; repeated containerd.v1.types.Task tasks = 1;
} }
message KillRequest { message KillRequest {
string id = 1 [(gogoproto.customname) = "ID"]; string container_id = 1;
uint32 signal = 2; uint32 signal = 2;
bool all = 3; oneof pid_or_all {
bool all = 3;
uint32 pid = 4;
}
} }
message EventsRequest { message EventsRequest {
} }
message ExecRequest { message ExecRequest {
string id = 1 [(gogoproto.customname) = "ID"]; // ContainerID specifies the container in which to exec the process.
string container_id = 1;
bool terminal = 2; bool terminal = 2;
string stdin = 3; string stdin = 3;
string stdout = 4; string stdout = 4;
string stderr = 5; string stderr = 5;
// Spec for starting a process in the target container.
//
// For runc, this is a process spec, for example.
google.protobuf.Any spec = 6; google.protobuf.Any spec = 6;
} }
@ -88,21 +127,44 @@ message ExecResponse {
} }
message PtyRequest { message PtyRequest {
string id = 1 [(gogoproto.customname) = "ID"]; string container_id = 1;
uint32 pid = 2; uint32 pid = 2;
uint32 width = 3; uint32 width = 3;
uint32 height = 4; uint32 height = 4;
} }
message CloseStdinRequest { message CloseStdinRequest {
string id = 1 [(gogoproto.customname) = "ID"]; string container_id = 1;
uint32 pid = 2; uint32 pid = 2;
} }
message PauseRequest { message PauseRequest {
string id = 1 [(gogoproto.customname) = "ID"]; string container_id = 1;
} }
message ResumeRequest { message ResumeRequest {
string id = 1 [(gogoproto.customname) = "ID"]; string container_id = 1;
}
message ProcessesRequest {
string container_id = 1;
}
message ProcessesResponse{
repeated containerd.v1.types.Process processes = 1;
}
message CheckpointRequest {
string container_id = 1;
bool allow_tcp = 2;
bool allow_unix_sockets = 3;
bool allow_terminal = 4;
bool file_locks = 5;
repeated string empty_namespaces = 6;
string parent_checkpoint = 7 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
bool exit = 8;
}
message CheckpointResponse {
repeated types.Descriptor descriptors = 1;
} }

View File

@ -50,7 +50,8 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type Image struct { type Image struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Target containerd_v1_types1.Descriptor `protobuf:"bytes,2,opt,name=target" json:"target"` Labels string `protobuf:"bytes,2,opt,name=labels,proto3" json:"labels,omitempty"`
Target containerd_v1_types1.Descriptor `protobuf:"bytes,3,opt,name=target" json:"target"`
} }
func (m *Image) Reset() { *m = Image{} } func (m *Image) Reset() { *m = Image{} }
@ -127,11 +128,11 @@ const _ = grpc.SupportPackageIsVersion4
type ImagesClient interface { type ImagesClient interface {
// Get returns an image by name. // Get returns an image by name.
Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error)
// List returns a list of all images known to containerd.
List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error)
// Put assigns the name to a given target image based on the provided // Put assigns the name to a given target image based on the provided
// image. // image.
Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
// List returns a list of all images known to containerd.
List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error)
// Delete deletes the image by name. // Delete deletes the image by name.
Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
} }
@ -153,18 +154,18 @@ func (c *imagesClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.Cal
return out, nil return out, nil
} }
func (c *imagesClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { func (c *imagesClient) List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) {
out := new(google_protobuf1.Empty) out := new(ListResponse)
err := grpc.Invoke(ctx, "/containerd.v1.Images/Put", in, out, c.cc, opts...) err := grpc.Invoke(ctx, "/containerd.v1.Images/List", in, out, c.cc, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return out, nil return out, nil
} }
func (c *imagesClient) List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) { func (c *imagesClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
out := new(ListResponse) out := new(google_protobuf1.Empty)
err := grpc.Invoke(ctx, "/containerd.v1.Images/List", in, out, c.cc, opts...) err := grpc.Invoke(ctx, "/containerd.v1.Images/Put", in, out, c.cc, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -185,11 +186,11 @@ func (c *imagesClient) Delete(ctx context.Context, in *DeleteRequest, opts ...gr
type ImagesServer interface { type ImagesServer interface {
// Get returns an image by name. // Get returns an image by name.
Get(context.Context, *GetRequest) (*GetResponse, error) Get(context.Context, *GetRequest) (*GetResponse, error)
// List returns a list of all images known to containerd.
List(context.Context, *ListRequest) (*ListResponse, error)
// Put assigns the name to a given target image based on the provided // Put assigns the name to a given target image based on the provided
// image. // image.
Put(context.Context, *PutRequest) (*google_protobuf1.Empty, error) Put(context.Context, *PutRequest) (*google_protobuf1.Empty, error)
// List returns a list of all images known to containerd.
List(context.Context, *ListRequest) (*ListResponse, error)
// Delete deletes the image by name. // Delete deletes the image by name.
Delete(context.Context, *DeleteRequest) (*google_protobuf1.Empty, error) Delete(context.Context, *DeleteRequest) (*google_protobuf1.Empty, error)
} }
@ -216,24 +217,6 @@ func _Images_Get_Handler(srv interface{}, ctx context.Context, dec func(interfac
return interceptor(ctx, in, info, handler) return interceptor(ctx, in, info, handler)
} }
func _Images_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PutRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ImagesServer).Put(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/containerd.v1.Images/Put",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ImagesServer).Put(ctx, req.(*PutRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Images_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { func _Images_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListRequest) in := new(ListRequest)
if err := dec(in); err != nil { if err := dec(in); err != nil {
@ -252,6 +235,24 @@ func _Images_List_Handler(srv interface{}, ctx context.Context, dec func(interfa
return interceptor(ctx, in, info, handler) return interceptor(ctx, in, info, handler)
} }
func _Images_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PutRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ImagesServer).Put(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/containerd.v1.Images/Put",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ImagesServer).Put(ctx, req.(*PutRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Images_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { func _Images_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteRequest) in := new(DeleteRequest)
if err := dec(in); err != nil { if err := dec(in); err != nil {
@ -278,14 +279,14 @@ var _Images_serviceDesc = grpc.ServiceDesc{
MethodName: "Get", MethodName: "Get",
Handler: _Images_Get_Handler, Handler: _Images_Get_Handler,
}, },
{
MethodName: "Put",
Handler: _Images_Put_Handler,
},
{ {
MethodName: "List", MethodName: "List",
Handler: _Images_List_Handler, Handler: _Images_List_Handler,
}, },
{
MethodName: "Put",
Handler: _Images_Put_Handler,
},
{ {
MethodName: "Delete", MethodName: "Delete",
Handler: _Images_Delete_Handler, Handler: _Images_Delete_Handler,
@ -316,7 +317,13 @@ func (m *Image) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintImages(dAtA, i, uint64(len(m.Name))) i = encodeVarintImages(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name) i += copy(dAtA[i:], m.Name)
} }
dAtA[i] = 0x12 if len(m.Labels) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintImages(dAtA, i, uint64(len(m.Labels)))
i += copy(dAtA[i:], m.Labels)
}
dAtA[i] = 0x1a
i++ i++
i = encodeVarintImages(dAtA, i, uint64(m.Target.Size())) i = encodeVarintImages(dAtA, i, uint64(m.Target.Size()))
n1, err := m.Target.MarshalTo(dAtA[i:]) n1, err := m.Target.MarshalTo(dAtA[i:])
@ -511,6 +518,10 @@ func (m *Image) Size() (n int) {
if l > 0 { if l > 0 {
n += 1 + l + sovImages(uint64(l)) n += 1 + l + sovImages(uint64(l))
} }
l = len(m.Labels)
if l > 0 {
n += 1 + l + sovImages(uint64(l))
}
l = m.Target.Size() l = m.Target.Size()
n += 1 + l + sovImages(uint64(l)) n += 1 + l + sovImages(uint64(l))
return n return n
@ -591,6 +602,7 @@ func (this *Image) String() string {
} }
s := strings.Join([]string{`&Image{`, s := strings.Join([]string{`&Image{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Labels:` + fmt.Sprintf("%v", this.Labels) + `,`,
`Target:` + strings.Replace(strings.Replace(this.Target.String(), "Descriptor", "containerd_v1_types1.Descriptor", 1), `&`, ``, 1) + `,`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "Descriptor", "containerd_v1_types1.Descriptor", 1), `&`, ``, 1) + `,`,
`}`, `}`,
}, "") }, "")
@ -722,6 +734,35 @@ func (m *Image) Unmarshal(dAtA []byte) error {
m.Name = string(dAtA[iNdEx:postIndex]) m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex iNdEx = postIndex
case 2: case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowImages
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthImages
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Labels = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 { if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
} }
@ -1334,32 +1375,32 @@ func init() {
} }
var fileDescriptorImages = []byte{ var fileDescriptorImages = []byte{
// 419 bytes of a gzipped FileDescriptorProto // 430 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x4d, 0x6f, 0xd3, 0x40, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0x6f, 0x94, 0x40,
0x10, 0xcd, 0x36, 0xa9, 0x25, 0xc6, 0xe4, 0xb2, 0xaa, 0x90, 0x71, 0x91, 0x6b, 0x99, 0x4b, 0xc5, 0x14, 0xde, 0xe9, 0x6e, 0x49, 0x7c, 0xb8, 0x97, 0x49, 0xd3, 0x20, 0x35, 0x94, 0xe0, 0xa5, 0xf1,
0x61, 0x0d, 0xe6, 0x02, 0x52, 0x29, 0x22, 0x2a, 0x54, 0x48, 0x1c, 0x2a, 0x1f, 0xb9, 0x39, 0xee, 0x30, 0x28, 0x5e, 0x34, 0xa9, 0x35, 0x6e, 0xaa, 0x8d, 0x89, 0x87, 0x86, 0x7f, 0x00, 0xf4, 0x89,
0x60, 0x2c, 0xd5, 0x5e, 0xe3, 0x5d, 0x57, 0xca, 0x0d, 0xfe, 0x5d, 0x8e, 0x1c, 0x39, 0x21, 0xe2, 0x24, 0xc0, 0x20, 0x33, 0x34, 0xe9, 0x4d, 0xff, 0xdd, 0x1e, 0x3d, 0x7a, 0x32, 0x96, 0x5f, 0x62,
0x5f, 0x82, 0xbc, 0xbb, 0xf9, 0x32, 0x01, 0xd2, 0x4b, 0x32, 0xab, 0x79, 0xef, 0xcd, 0x7b, 0xe3, 0x98, 0x99, 0xee, 0x6e, 0x71, 0xd5, 0xf6, 0x02, 0xef, 0xcd, 0xfb, 0xbe, 0xef, 0xbd, 0xef, 0xe5,
0x81, 0x37, 0x59, 0x2e, 0x3f, 0x37, 0x53, 0x96, 0xf2, 0x22, 0x4c, 0x79, 0x29, 0x93, 0xbc, 0xc4, 0xc1, 0xdb, 0xbc, 0x90, 0x9f, 0xbb, 0x94, 0x65, 0xbc, 0x0a, 0x33, 0x5e, 0xcb, 0xa4, 0xa8, 0xb1,
0xfa, 0x7a, 0xb3, 0x4c, 0xaa, 0x3c, 0x14, 0x58, 0xdf, 0xe6, 0x29, 0x8a, 0x30, 0x2f, 0x92, 0x6c, 0xbd, 0xd8, 0x0c, 0x93, 0xa6, 0x08, 0x05, 0xb6, 0x97, 0x45, 0x86, 0x22, 0x2c, 0xaa, 0x24, 0x5f,
0xf5, 0xc7, 0xaa, 0x9a, 0x4b, 0x4e, 0xc7, 0x6b, 0x30, 0xbb, 0x7d, 0xe6, 0x1e, 0x65, 0x3c, 0xe3, 0xfd, 0x58, 0xd3, 0x72, 0xc9, 0xe9, 0x7c, 0x0d, 0x66, 0x97, 0xcf, 0xdd, 0xbd, 0x9c, 0xe7, 0x5c,
0xaa, 0x13, 0x76, 0x95, 0x06, 0xb9, 0xc7, 0x19, 0xe7, 0xd9, 0x0d, 0x86, 0xea, 0x35, 0x6d, 0x3e, 0x55, 0xc2, 0x21, 0xd2, 0x20, 0xf7, 0x20, 0xe7, 0x3c, 0x2f, 0x31, 0x54, 0x59, 0xda, 0x7d, 0x0a,
0x85, 0x58, 0x54, 0x72, 0x66, 0x9a, 0x67, 0x7b, 0x99, 0x90, 0xb3, 0x0a, 0x45, 0x58, 0xf0, 0xa6, 0xb1, 0x6a, 0xe4, 0x95, 0x29, 0x1e, 0xdf, 0x69, 0x08, 0x79, 0xd5, 0xa0, 0x08, 0x2b, 0xde, 0xd5,
0x94, 0xfa, 0xd7, 0xb0, 0xdf, 0xdd, 0x81, 0x7d, 0x8d, 0x22, 0xad, 0xf3, 0x4a, 0xf2, 0x7a, 0xa3, 0x52, 0x7f, 0x0d, 0xfb, 0xfd, 0x3d, 0xd8, 0x17, 0x28, 0xb2, 0xb6, 0x68, 0x24, 0x6f, 0x37, 0x42,
0xd4, 0x3a, 0xc1, 0x47, 0x38, 0x7c, 0xdf, 0xe5, 0xa2, 0x14, 0x46, 0x65, 0x52, 0xa0, 0x43, 0x7c, 0xad, 0x13, 0xb4, 0xb0, 0xfb, 0x61, 0xf0, 0x45, 0x29, 0xcc, 0xea, 0xa4, 0x42, 0x87, 0xf8, 0xe4,
0x72, 0x7a, 0x2f, 0x56, 0x35, 0x7d, 0x05, 0x96, 0x4c, 0xea, 0x0c, 0xa5, 0x73, 0xe0, 0x93, 0x53, 0xe8, 0x41, 0xac, 0x62, 0xba, 0x0f, 0x56, 0x99, 0xa4, 0x58, 0x0a, 0x67, 0x47, 0xbd, 0x9a, 0x8c,
0x3b, 0x3a, 0x61, 0x5b, 0xa9, 0x99, 0x92, 0x67, 0x17, 0x2b, 0xcd, 0xc9, 0x68, 0xfe, 0xf3, 0x64, 0xbe, 0x06, 0x4b, 0x26, 0x6d, 0x8e, 0xd2, 0x99, 0xfa, 0xe4, 0xc8, 0x8e, 0x0e, 0xd9, 0xad, 0x6d,
0x10, 0x1b, 0x52, 0xe0, 0x03, 0x5c, 0xa2, 0x8c, 0xf1, 0x4b, 0x83, 0x42, 0xee, 0x1a, 0x10, 0xbc, 0x30, 0xd5, 0x96, 0x9d, 0xae, 0x7a, 0x2d, 0x66, 0xcb, 0x9f, 0x87, 0x93, 0xd8, 0x90, 0x02, 0x1f,
0x04, 0x5b, 0x21, 0x44, 0xc5, 0x4b, 0x81, 0xf4, 0x09, 0x1c, 0xaa, 0x25, 0x2b, 0x8c, 0x1d, 0x1d, 0xe0, 0x0c, 0x65, 0x8c, 0x5f, 0x3a, 0x14, 0x72, 0x5b, 0xe3, 0xe0, 0x15, 0xd8, 0x0a, 0x21, 0x1a,
0xf5, 0xc6, 0x29, 0xa3, 0xb1, 0x86, 0x04, 0xe7, 0x00, 0x57, 0xcd, 0x4a, 0xfc, 0xe9, 0x1e, 0x4c, 0x5e, 0x0b, 0xa4, 0x4f, 0x61, 0x57, 0x2d, 0x5f, 0x61, 0xec, 0x68, 0x6f, 0xd4, 0x4e, 0x19, 0x88,
0xe3, 0xce, 0xf0, 0xc7, 0x60, 0x7f, 0xc8, 0xc5, 0x52, 0x20, 0x98, 0xc0, 0x7d, 0xfd, 0x34, 0x56, 0x35, 0x24, 0x38, 0x01, 0x38, 0xef, 0x56, 0xe2, 0xcf, 0xee, 0xc0, 0x34, 0xd3, 0x19, 0xfe, 0x1c,
0x22, 0xb0, 0xf4, 0xf7, 0x76, 0x88, 0x3f, 0xfc, 0x8f, 0xa2, 0x41, 0x06, 0x8f, 0x61, 0x7c, 0x81, 0xec, 0x8f, 0x85, 0xb8, 0x11, 0x08, 0x16, 0xf0, 0x50, 0xa7, 0x66, 0x94, 0x08, 0x2c, 0x7d, 0x07,
0x37, 0x28, 0xf1, 0x1f, 0x91, 0xa3, 0x6f, 0x07, 0x60, 0x29, 0xb2, 0xa0, 0x67, 0x30, 0xbc, 0x44, 0x0e, 0xf1, 0xa7, 0xff, 0x51, 0x34, 0xc8, 0xe0, 0x09, 0xcc, 0x4f, 0xb1, 0x44, 0x89, 0xff, 0xb0,
0x49, 0x1f, 0xf6, 0xa4, 0xd7, 0x3b, 0x73, 0xdd, 0x5d, 0x2d, 0xe3, 0xf0, 0x05, 0x0c, 0xaf, 0x9a, 0x1c, 0x7d, 0xdb, 0x01, 0x4b, 0x91, 0x05, 0x3d, 0x86, 0xe9, 0x19, 0x4a, 0xfa, 0x68, 0x24, 0xbd,
0x3f, 0xd9, 0xeb, 0xa5, 0xb8, 0x0f, 0x98, 0xbe, 0x3f, 0xb6, 0xbc, 0x3f, 0xf6, 0xb6, 0xbb, 0x3f, 0xde, 0x99, 0xeb, 0x6e, 0x2b, 0x99, 0x09, 0xdf, 0xc0, 0x6c, 0x98, 0x98, 0x8e, 0x31, 0x1b, 0xae,
0xfa, 0x1a, 0x46, 0x5d, 0x56, 0xda, 0x57, 0xdf, 0xd8, 0x87, 0x7b, 0xbc, 0xb3, 0x67, 0x46, 0x9f, 0xdc, 0x83, 0xad, 0x35, 0x23, 0xf0, 0x12, 0xa6, 0xe7, 0xdd, 0x9f, 0xed, 0xd7, 0x5b, 0x75, 0xf7,
0x83, 0xa5, 0x83, 0xd2, 0x47, 0x3d, 0xd8, 0x56, 0xfe, 0xbf, 0x19, 0x98, 0x38, 0xf3, 0x85, 0x37, 0x99, 0x3e, 0x6c, 0x76, 0x73, 0xd8, 0xec, 0xdd, 0x70, 0xd8, 0xf4, 0x04, 0x2c, 0x6d, 0x94, 0x3e,
0xf8, 0xb1, 0xf0, 0x06, 0x5f, 0x5b, 0x8f, 0xcc, 0x5b, 0x8f, 0x7c, 0x6f, 0x3d, 0xf2, 0xab, 0xf5, 0x1e, 0x91, 0x6f, 0xf9, 0xff, 0x1b, 0x7f, 0xe1, 0x2c, 0xaf, 0xbd, 0xc9, 0x8f, 0x6b, 0x6f, 0xf2,
0xc8, 0xd4, 0x52, 0xc8, 0xe7, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x45, 0x0e, 0x92, 0xb1, 0xa2, 0xb5, 0xf7, 0xc8, 0xb2, 0xf7, 0xc8, 0xf7, 0xde, 0x23, 0xbf, 0x7a, 0x8f, 0xa4, 0x96, 0x42, 0xbe,
0x03, 0x00, 0x00, 0xf8, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xe2, 0xcf, 0xa0, 0xba, 0x03, 0x00, 0x00,
} }

View File

@ -22,20 +22,21 @@ service Images {
// Get returns an image by name. // Get returns an image by name.
rpc Get(GetRequest) returns (GetResponse); rpc Get(GetRequest) returns (GetResponse);
// List returns a list of all images known to containerd.
rpc List(ListRequest) returns (ListResponse);
// Put assigns the name to a given target image based on the provided // Put assigns the name to a given target image based on the provided
// image. // image.
rpc Put(PutRequest) returns (google.protobuf.Empty); rpc Put(PutRequest) returns (google.protobuf.Empty);
// List returns a list of all images known to containerd.
rpc List(ListRequest) returns (ListResponse);
// Delete deletes the image by name. // Delete deletes the image by name.
rpc Delete(DeleteRequest) returns (google.protobuf.Empty); rpc Delete(DeleteRequest) returns (google.protobuf.Empty);
} }
message Image { message Image {
string name = 1; string name = 1;
types.Descriptor target = 2 [(gogoproto.nullable) = false]; string labels = 2;
types.Descriptor target = 3 [(gogoproto.nullable) = false];
} }
message GetRequest { message GetRequest {

File diff suppressed because it is too large Load Diff

View File

@ -1,35 +0,0 @@
syntax = "proto3";
package containerd.v1;
import "gogoproto/gogo.proto";
import "github.com/containerd/containerd/api/types/mount/mount.proto";
import "github.com/containerd/containerd/api/types/descriptor/descriptor.proto";
service RootFS {
rpc Unpack(UnpackRequest) returns (UnpackResponse);
rpc Prepare(PrepareRequest) returns (MountResponse);
rpc Mounts(MountsRequest) returns (MountResponse);
}
message UnpackRequest {
repeated containerd.v1.types.Descriptor layers = 1;
}
message UnpackResponse {
string chainid = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false, (gogoproto.customname) = "ChainID"];
}
message PrepareRequest {
string name = 1;
string chain_id = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false, (gogoproto.customname) = "ChainID"];
bool readonly = 3;
}
message MountsRequest {
string name = 1;
}
message MountResponse {
repeated containerd.v1.types.Mount mounts = 1;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,81 @@
syntax = "proto3";
package containerd.v1.snapshot;
import "gogoproto/gogo.proto";
import "google/protobuf/empty.proto";
import "github.com/containerd/containerd/api/types/mount/mount.proto";
// Snapshot service manages snapshots
service Snapshot {
rpc Prepare(PrepareRequest) returns (MountsResponse);
rpc View(PrepareRequest) returns (MountsResponse);
rpc Mounts(MountsRequest) returns (MountsResponse);
rpc Commit(CommitRequest) returns (google.protobuf.Empty);
rpc Remove(RemoveRequest) returns (google.protobuf.Empty);
rpc Stat(StatRequest) returns (StatResponse);
rpc List(ListRequest) returns (stream ListResponse);
rpc Usage(UsageRequest) returns (UsageResponse);
// "Snapshot" prepares a new set of mounts from existing name
}
message PrepareRequest {
string key = 1;
string parent = 2;
}
message MountsRequest {
string key = 1;
}
message MountsResponse {
repeated containerd.v1.types.Mount mounts = 1;
}
message RemoveRequest {
string key = 1;
}
message CommitRequest {
string name = 1;
string key = 2;
}
message StatRequest {
string key = 1;
}
enum Kind {
option (gogoproto.goproto_enum_prefix) = false;
option (gogoproto.enum_customname) = "Kind";
ACTIVE = 0 [(gogoproto.enumvalue_customname) = "KindActive"];
COMMITTED = 1 [(gogoproto.enumvalue_customname) = "KindCommitted"];
}
message Info {
string name = 1;
string parent = 2;
Kind kind = 3;
bool readonly = 4;
}
message StatResponse {
Info info = 1 [(gogoproto.nullable) = false];
}
message ListRequest{}
message ListResponse {
repeated Info info = 1 [(gogoproto.nullable) = false];
}
message UsageRequest {
string key = 1;
}
message UsageResponse {
int64 inodes = 2;
int64 size = 1;
}

View File

@ -3,20 +3,26 @@ syntax = "proto3";
package containerd.v1.types; package containerd.v1.types;
import "gogoproto/gogo.proto"; import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
import "google/protobuf/timestamp.proto"; import "google/protobuf/timestamp.proto";
enum Status { enum Status {
UNKNOWN = 0; option (gogoproto.goproto_enum_prefix) = false;
CREATED = 1; option (gogoproto.enum_customname) = "Status";
RUNNING = 2;
STOPPED = 3; UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "StatusUnknown"];
PAUSED = 4; CREATED = 1 [(gogoproto.enumvalue_customname) = "StatusCreated"];
RUNNING = 2 [(gogoproto.enumvalue_customname) = "StatusRunning"];
STOPPED = 3 [(gogoproto.enumvalue_customname) = "StatusStopped"];
PAUSED = 4 [(gogoproto.enumvalue_customname) = "StatusPaused"];
} }
message Container { message Task {
string id = 1 [(gogoproto.customname) = "ID"]; string id = 1; // TODO(stevvooe): For now, this is just the container id.
uint32 pid = 2; string container_id = 2;
Status status = 3; uint32 pid = 3;
Status status = 4;
google.protobuf.Any spec = 5;
} }
message Process { message Process {
@ -28,6 +34,7 @@ message Process {
bool terminal = 6; bool terminal = 6;
uint32 exit_status = 7; uint32 exit_status = 7;
Status status = 8; Status status = 8;
google.protobuf.Any runtime_data = 9;
} }
message User { message User {
@ -37,7 +44,7 @@ message User {
} }
message Event { message Event {
string id = 1 [(gogoproto.customname) = "ID"]; string id = 1;
enum EventType { enum EventType {
EXIT = 0; EXIT = 0;

View File

@ -40,13 +40,7 @@ func Diff(ctx context.Context, a, b string) io.ReadCloser {
r, w := io.Pipe() r, w := io.Pipe()
go func() { go func() {
var err error err := WriteDiff(ctx, w, a, b)
cw := newChangeWriter(w, b)
if err = fs.Changes(ctx, a, b, cw.HandleChange); err != nil {
err = errors.Wrap(err, "failed to create diff tar stream")
} else {
err = cw.Close()
}
if err = w.CloseWithError(err); err != nil { if err = w.CloseWithError(err); err != nil {
log.G(ctx).WithError(err).Debugf("closing tar pipe failed") log.G(ctx).WithError(err).Debugf("closing tar pipe failed")
} }
@ -55,6 +49,22 @@ func Diff(ctx context.Context, a, b string) io.ReadCloser {
return r return r
} }
// WriteDiff writes a tar stream of the computed difference between the
// provided directories.
//
// Produces a tar using OCI style file markers for deletions. Deleted
// files will be prepended with the prefix ".wh.". This style is
// based off AUFS whiteouts.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
func WriteDiff(ctx context.Context, w io.Writer, a, b string) error {
cw := newChangeWriter(w, b)
err := fs.Changes(ctx, a, b, cw.HandleChange)
if err != nil {
return errors.Wrap(err, "failed to create diff tar stream")
}
return cw.Close()
}
const ( const (
// whiteoutPrefix prefix means file is a whiteout. If this is followed by a // whiteoutPrefix prefix means file is a whiteout. If this is followed by a
// filename this means that file has been removed from the base layer. // filename this means that file has been removed from the base layer.

View File

@ -122,7 +122,7 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
func getxattr(path, attr string) ([]byte, error) { func getxattr(path, attr string) ([]byte, error) {
b, err := sysx.LGetxattr(path, attr) b, err := sysx.LGetxattr(path, attr)
if err == syscall.ENOTSUP || err == syscall.ENODATA { if err == syscall.ENOTSUP || err == sysx.ENODATA {
return nil, nil return nil, nil
} }
return b, err return b, err

View File

@ -1,3 +0,0 @@
## containerd Community Code of Conduct
containerd follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).

View File

@ -0,0 +1,24 @@
package containers
import "context"
// Container represents the set of data pinned by a container. Unless otherwise
// noted, the resources here are considered in use by the container.
//
// The resources specified in this object are used to create tasks from the container.
type Container struct {
ID string
Labels map[string]string
Image string
Runtime string
Spec []byte
RootFS string
}
type Store interface {
Get(ctx context.Context, id string) (Container, error)
List(ctx context.Context, filter string) ([]Container, error)
Create(ctx context.Context, container Container) (Container, error)
Update(ctx context.Context, container Container) (Container, error)
Delete(ctx context.Context, id string) error
}

View File

@ -41,6 +41,17 @@ func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, size i
} }
defer cw.Close() defer cw.Close()
return Copy(cw, r, size, expected)
}
// Copy copies data with the expected digest from the reader into the
// provided content store writer.
//
// This is useful when the digest and size are known beforehand. When
// the size or digest is unknown, these values may be empty.
//
// Copy is buffered, so no need to wrap reader in buffered io.
func Copy(cw Writer, r io.Reader, size int64, expected digest.Digest) error {
ws, err := cw.Status() ws, err := cw.Status()
if err != nil { if err != nil {
return err return err
@ -50,7 +61,7 @@ func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, size i
r, err = seekReader(r, ws.Offset, size) r, err = seekReader(r, ws.Offset, size)
if err != nil { if err != nil {
if !isUnseekable(err) { if !isUnseekable(err) {
return errors.Wrapf(err, "unabled to resume write to %v", ref) return errors.Wrapf(err, "unabled to resume write to %v", ws.Ref)
} }
// reader is unseekable, try to move the writer back to the start. // reader is unseekable, try to move the writer back to the start.
@ -69,7 +80,7 @@ func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, size i
if err := cw.Commit(size, expected); err != nil { if err := cw.Commit(size, expected); err != nil {
if !IsExists(err) { if !IsExists(err) {
return errors.Wrapf(err, "failed commit on ref %q", ref) return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
} }
} }

View File

@ -83,11 +83,11 @@ func compareSysStat(s1, s2 interface{}) (bool, error) {
func compareCapabilities(p1, p2 string) (bool, error) { func compareCapabilities(p1, p2 string) (bool, error) {
c1, err := sysx.LGetxattr(p1, "security.capability") c1, err := sysx.LGetxattr(p1, "security.capability")
if err != nil && err != syscall.ENODATA { if err != nil && err != sysx.ENODATA {
return false, errors.Wrapf(err, "failed to get xattr for %s", p1) return false, errors.Wrapf(err, "failed to get xattr for %s", p1)
} }
c2, err := sysx.LGetxattr(p2, "security.capability") c2, err := sysx.LGetxattr(p2, "security.capability")
if err != nil && err != syscall.ENODATA { if err != nil && err != sysx.ENODATA {
return false, errors.Wrapf(err, "failed to get xattr for %s", p2) return false, errors.Wrapf(err, "failed to get xattr for %s", p2)
} }
return bytes.Equal(c1, c2), nil return bytes.Equal(c1, c2), nil

View File

@ -0,0 +1,87 @@
// +build linux
package fs
import (
"fmt"
"io/ioutil"
"os"
"syscall"
"unsafe"
)
func locateDummyIfEmpty(path string) (string, error) {
children, err := ioutil.ReadDir(path)
if err != nil {
return "", err
}
if len(children) != 0 {
return "", nil
}
dummyFile, err := ioutil.TempFile(path, "fsutils-dummy")
if err != nil {
return "", err
}
name := dummyFile.Name()
err = dummyFile.Close()
return name, err
}
// SupportsDType returns whether the filesystem mounted on path supports d_type
func SupportsDType(path string) (bool, error) {
// locate dummy so that we have at least one dirent
dummy, err := locateDummyIfEmpty(path)
if err != nil {
return false, err
}
if dummy != "" {
defer os.Remove(dummy)
}
visited := 0
supportsDType := true
fn := func(ent *syscall.Dirent) bool {
visited++
if ent.Type == syscall.DT_UNKNOWN {
supportsDType = false
// stop iteration
return true
}
// continue iteration
return false
}
if err = iterateReadDir(path, fn); err != nil {
return false, err
}
if visited == 0 {
return false, fmt.Errorf("did not hit any dirent during iteration %s", path)
}
return supportsDType, nil
}
func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error {
d, err := os.Open(path)
if err != nil {
return err
}
defer d.Close()
fd := int(d.Fd())
buf := make([]byte, 4096)
for {
nbytes, err := syscall.ReadDirent(fd, buf)
if err != nil {
return err
}
if nbytes == 0 {
break
}
for off := 0; off < nbytes; {
ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off]))
if stop := fn(ent); stop {
return nil
}
off += int(ent.Reclen)
}
}
return nil
}

View File

@ -27,7 +27,7 @@ func diskUsage(roots ...string) (Usage, error) {
} }
stat := fi.Sys().(*syscall.Stat_t) stat := fi.Sys().(*syscall.Stat_t)
inodes[inode{dev: uint64(stat.Dev), ino: stat.Ino}] = struct{}{} inodes[inode{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}] = struct{}{}
size += fi.Size() size += fi.Size()
return nil return nil
}); err != nil { }); err != nil {

View File

@ -11,7 +11,17 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
var SkipDesc = fmt.Errorf("skip descriptor") var (
// SkipDesc is used to skip processing of a descriptor and
// its descendants.
SkipDesc = fmt.Errorf("skip descriptor")
// StopHandler is used to signify that the descriptor
// has been handled and should not be handled further.
// This applies only to a single descriptor in a handler
// chain and does not apply to descendant descriptors.
StopHandler = fmt.Errorf("stop handler")
)
type Handler interface { type Handler interface {
Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error)
@ -24,12 +34,17 @@ func (fn HandlerFunc) Handle(ctx context.Context, desc ocispec.Descriptor) (subd
} }
// Handlers returns a handler that will run the handlers in sequence. // Handlers returns a handler that will run the handlers in sequence.
//
// A handler may return `StopHandler` to stop calling additional handlers
func Handlers(handlers ...Handler) HandlerFunc { func Handlers(handlers ...Handler) HandlerFunc {
return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
var children []ocispec.Descriptor var children []ocispec.Descriptor
for _, handler := range handlers { for _, handler := range handlers {
ch, err := handler.Handle(ctx, desc) ch, err := handler.Handle(ctx, desc)
if err != nil { if err != nil {
if errors.Cause(err) == StopHandler {
break
}
return nil, err return nil, err
} }

View File

@ -7,7 +7,6 @@ import (
"io/ioutil" "io/ioutil"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/log"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
) )
@ -18,6 +17,13 @@ type Image struct {
Target ocispec.Descriptor Target ocispec.Descriptor
} }
type Store interface {
Put(ctx context.Context, name string, desc ocispec.Descriptor) error
Get(ctx context.Context, name string) (Image, error)
List(ctx context.Context) ([]Image, error)
Delete(ctx context.Context, name string) error
}
// TODO(stevvooe): Many of these functions make strong platform assumptions, // TODO(stevvooe): Many of these functions make strong platform assumptions,
// which are untrue in a lot of cases. More refactoring must be done here to // which are untrue in a lot of cases. More refactoring must be done here to
// make this work in all cases. // make this work in all cases.
@ -66,25 +72,7 @@ func (image *Image) RootFS(ctx context.Context, provider content.Provider) ([]di
if err != nil { if err != nil {
return nil, err return nil, err
} }
return RootFS(ctx, provider, desc)
p, err := content.ReadBlob(ctx, provider, desc.Digest)
if err != nil {
log.G(ctx).Fatal(err)
}
var config ocispec.Image
if err := json.Unmarshal(p, &config); err != nil {
log.G(ctx).Fatal(err)
}
// TODO(stevvooe): Remove this bit when OCI structure uses correct type for
// rootfs.DiffIDs.
var diffIDs []digest.Digest
for _, diffID := range config.RootFS.DiffIDs {
diffIDs = append(diffIDs, digest.Digest(diffID))
}
return diffIDs, nil
} }
// Size returns the total size of an image's packed resources. // Size returns the total size of an image's packed resources.
@ -123,3 +111,28 @@ func (image *Image) Size(ctx context.Context, provider content.Provider) (int64,
}), image.Target) }), image.Target)
} }
// RootFS returns the unpacked diffids that make up and images rootfs.
//
// These are used to verify that a set of layers unpacked to the expected
// values.
func RootFS(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]digest.Digest, error) {
p, err := content.ReadBlob(ctx, provider, desc.Digest)
if err != nil {
return nil, err
}
var config ocispec.Image
if err := json.Unmarshal(p, &config); err != nil {
return nil, err
}
// TODO(stevvooe): Remove this bit when OCI structure uses correct type for
// rootfs.DiffIDs.
var diffIDs []digest.Digest
for _, diffID := range config.RootFS.DiffIDs {
diffIDs = append(diffIDs, digest.Digest(diffID))
}
return diffIDs, nil
}

View File

@ -10,4 +10,10 @@ const (
MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json" MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json"
MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json" MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json"
MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json"
// Checkpoint/Restore Media Types
MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar"
MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar"
MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar"
MediaTypeContainerd1RW = "application/vnd.containerd.container.rw.tar"
MediaTypeContainerd1CheckpointConfig = "application/vnd.containerd.container.checkpoint.config.v1+json"
) )

View File

@ -1,215 +0,0 @@
package images
import (
"context"
"encoding/binary"
"fmt"
"github.com/boltdb/bolt"
"github.com/containerd/containerd/log"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
var (
ErrExists = errors.New("images: exists")
ErrNotFound = errors.New("images: not found")
)
type Store interface {
Put(ctx context.Context, name string, desc ocispec.Descriptor) error
Get(ctx context.Context, name string) (Image, error)
List(ctx context.Context) ([]Image, error)
Delete(ctx context.Context, name string) error
}
// IsNotFound returns true if the error is due to a missing image.
func IsNotFound(err error) bool {
return errors.Cause(err) == ErrNotFound
}
func IsExists(err error) bool {
return errors.Cause(err) == ErrExists
}
var (
bucketKeyStorageVersion = []byte("v1")
bucketKeyImages = []byte("images")
bucketKeyDigest = []byte("digest")
bucketKeyMediaType = []byte("mediatype")
bucketKeySize = []byte("size")
)
// TODO(stevvooe): This file comprises the data required to implement the
// "metadata" store. For now, it is bound tightly to the local machine and bolt
// but we can take this and use it to define a service interface.
// InitDB will initialize the database for use. The database must be opened for
// write and the caller must not be holding an open transaction.
func InitDB(db *bolt.DB) error {
log.L.Debug("init db")
return db.Update(func(tx *bolt.Tx) error {
_, err := createBucketIfNotExists(tx, bucketKeyStorageVersion, bucketKeyImages)
return err
})
}
func NewImageStore(tx *bolt.Tx) Store {
return &storage{tx: tx}
}
type storage struct {
tx *bolt.Tx
}
func (s *storage) Get(ctx context.Context, name string) (Image, error) {
var image Image
if err := withImageBucket(s.tx, name, func(bkt *bolt.Bucket) error {
image.Name = name
return readImage(&image, bkt)
}); err != nil {
return Image{}, err
}
return image, nil
}
func (s *storage) Put(ctx context.Context, name string, desc ocispec.Descriptor) error {
return withImagesBucket(s.tx, func(bkt *bolt.Bucket) error {
ibkt, err := bkt.CreateBucketIfNotExists([]byte(name))
if err != nil {
return err
}
var (
buf [binary.MaxVarintLen64]byte
sizeEncoded []byte = buf[:]
)
sizeEncoded = sizeEncoded[:binary.PutVarint(sizeEncoded, desc.Size)]
if len(sizeEncoded) == 0 {
return fmt.Errorf("failed encoding size = %v", desc.Size)
}
for _, v := range [][2][]byte{
{bucketKeyDigest, []byte(desc.Digest)},
{bucketKeyMediaType, []byte(desc.MediaType)},
{bucketKeySize, sizeEncoded},
} {
if err := ibkt.Put(v[0], v[1]); err != nil {
return err
}
}
return nil
})
}
func (s *storage) List(ctx context.Context) ([]Image, error) {
var images []Image
if err := withImagesBucket(s.tx, func(bkt *bolt.Bucket) error {
return bkt.ForEach(func(k, v []byte) error {
var (
image = Image{
Name: string(k),
}
kbkt = bkt.Bucket(k)
)
if err := readImage(&image, kbkt); err != nil {
return err
}
images = append(images, image)
return nil
})
}); err != nil {
return nil, err
}
return images, nil
}
func (s *storage) Delete(ctx context.Context, name string) error {
return withImagesBucket(s.tx, func(bkt *bolt.Bucket) error {
return bkt.DeleteBucket([]byte(name))
})
}
func readImage(image *Image, bkt *bolt.Bucket) error {
return bkt.ForEach(func(k, v []byte) error {
if v == nil {
return nil // skip it? a bkt maybe?
}
// TODO(stevvooe): This is why we need to use byte values for
// keys, rather than full arrays.
switch string(k) {
case string(bucketKeyDigest):
image.Target.Digest = digest.Digest(v)
case string(bucketKeyMediaType):
image.Target.MediaType = string(v)
case string(bucketKeySize):
image.Target.Size, _ = binary.Varint(v)
}
return nil
})
}
func createBucketIfNotExists(tx *bolt.Tx, keys ...[]byte) (*bolt.Bucket, error) {
bkt, err := tx.CreateBucketIfNotExists(keys[0])
if err != nil {
return nil, err
}
for _, key := range keys[1:] {
bkt, err = bkt.CreateBucketIfNotExists(key)
if err != nil {
return nil, err
}
}
return bkt, nil
}
func withImagesBucket(tx *bolt.Tx, fn func(bkt *bolt.Bucket) error) error {
bkt := getImagesBucket(tx)
if bkt == nil {
return ErrNotFound
}
return fn(bkt)
}
func withImageBucket(tx *bolt.Tx, name string, fn func(bkt *bolt.Bucket) error) error {
bkt := getImageBucket(tx, name)
if bkt == nil {
return ErrNotFound
}
return fn(bkt)
}
func getImagesBucket(tx *bolt.Tx) *bolt.Bucket {
return getBucket(tx, bucketKeyStorageVersion, bucketKeyImages)
}
func getImageBucket(tx *bolt.Tx, name string) *bolt.Bucket {
return getBucket(tx, bucketKeyStorageVersion, bucketKeyImages, []byte(name))
}
func getBucket(tx *bolt.Tx, keys ...[]byte) *bolt.Bucket {
bkt := tx.Bucket(keys[0])
for _, key := range keys[1:] {
if bkt == nil {
break
}
bkt = bkt.Bucket(key)
}
return bkt
}

View File

@ -0,0 +1,107 @@
package metadata
import (
"github.com/boltdb/bolt"
"github.com/containerd/containerd/log"
)
var (
bucketKeyStorageVersion = []byte("v1")
bucketKeyImages = []byte("images")
bucketKeyContainers = []byte("containers")
bucketKeyDigest = []byte("digest")
bucketKeyMediaType = []byte("mediatype")
bucketKeySize = []byte("size")
bucketKeyLabels = []byte("labels")
bucketKeyImage = []byte("image")
bucketKeyRuntime = []byte("runtime")
bucketKeySpec = []byte("spec")
bucketKeyRootFS = []byte("rootfs")
)
// InitDB will initialize the database for use. The database must be opened for
// write and the caller must not be holding an open transaction.
func InitDB(db *bolt.DB) error {
log.L.Debug("init db")
return db.Update(func(tx *bolt.Tx) error {
if _, err := createBucketIfNotExists(tx, bucketKeyStorageVersion, bucketKeyImages); err != nil {
return err
}
if _, err := createBucketIfNotExists(tx, bucketKeyStorageVersion, bucketKeyContainers); err != nil {
return err
}
return nil
})
}
func getBucket(tx *bolt.Tx, keys ...[]byte) *bolt.Bucket {
bkt := tx.Bucket(keys[0])
for _, key := range keys[1:] {
if bkt == nil {
break
}
bkt = bkt.Bucket(key)
}
return bkt
}
func createBucketIfNotExists(tx *bolt.Tx, keys ...[]byte) (*bolt.Bucket, error) {
bkt, err := tx.CreateBucketIfNotExists(keys[0])
if err != nil {
return nil, err
}
for _, key := range keys[1:] {
bkt, err = bkt.CreateBucketIfNotExists(key)
if err != nil {
return nil, err
}
}
return bkt, nil
}
func withImagesBucket(tx *bolt.Tx, fn func(bkt *bolt.Bucket) error) error {
bkt := getImagesBucket(tx)
if bkt == nil {
return ErrNotFound
}
return fn(bkt)
}
func withImageBucket(tx *bolt.Tx, name string, fn func(bkt *bolt.Bucket) error) error {
bkt := getImageBucket(tx, name)
if bkt == nil {
return ErrNotFound
}
return fn(bkt)
}
func getImagesBucket(tx *bolt.Tx) *bolt.Bucket {
return getBucket(tx, bucketKeyStorageVersion, bucketKeyImages)
}
func getImageBucket(tx *bolt.Tx, name string) *bolt.Bucket {
return getBucket(tx, bucketKeyStorageVersion, bucketKeyImages, []byte(name))
}
func createContainersBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt, err := tx.CreateBucketIfNotExists(bucketKeyStorageVersion)
if err != nil {
return nil, err
}
return bkt.CreateBucketIfNotExists(bucketKeyContainers)
}
func getContainersBucket(tx *bolt.Tx) *bolt.Bucket {
return getBucket(tx, bucketKeyStorageVersion, bucketKeyContainers)
}
func getContainerBucket(tx *bolt.Tx, id string) *bolt.Bucket {
return getBucket(tx, bucketKeyStorageVersion, bucketKeyContainers, []byte(id))
}

View File

@ -0,0 +1,171 @@
package metadata
import (
"context"
"github.com/boltdb/bolt"
"github.com/containerd/containerd/containers"
"github.com/pkg/errors"
)
type containerStore struct {
tx *bolt.Tx
}
func NewContainerStore(tx *bolt.Tx) containers.Store {
return &containerStore{
tx: tx,
}
}
func (s *containerStore) Get(ctx context.Context, id string) (containers.Container, error) {
bkt := getContainerBucket(s.tx, id)
if bkt == nil {
return containers.Container{}, errors.Wrap(ErrNotFound, "bucket does not exist")
}
container := containers.Container{ID: id}
if err := readContainer(&container, bkt); err != nil {
return containers.Container{}, errors.Wrap(err, "failed to read container")
}
return container, nil
}
func (s *containerStore) List(ctx context.Context, filter string) ([]containers.Container, error) {
var (
m = []containers.Container{}
bkt = getContainersBucket(s.tx)
)
if bkt == nil {
return m, nil
}
err := bkt.ForEach(func(k, v []byte) error {
cbkt := bkt.Bucket(k)
if cbkt == nil {
return nil
}
container := containers.Container{ID: string(k)}
if err := readContainer(&container, cbkt); err != nil {
return errors.Wrap(err, "failed to read container")
}
m = append(m, container)
return nil
})
if err != nil {
return nil, err
}
return m, nil
}
func (s *containerStore) Create(ctx context.Context, container containers.Container) (containers.Container, error) {
bkt, err := createContainersBucket(s.tx)
if err != nil {
return containers.Container{}, err
}
cbkt, err := bkt.CreateBucket([]byte(container.ID))
if err != nil {
if err == bolt.ErrBucketExists {
err = errors.Wrap(ErrExists, "content for id already exists")
}
return containers.Container{}, err
}
if err := writeContainer(&container, cbkt); err != nil {
return containers.Container{}, errors.Wrap(err, "failed to write container")
}
return container, nil
}
func (s *containerStore) Update(ctx context.Context, container containers.Container) (containers.Container, error) {
bkt := getContainersBucket(s.tx)
if bkt == nil {
return containers.Container{}, errors.Wrap(ErrNotFound, "no containers")
}
cbkt := bkt.Bucket([]byte(container.ID))
if cbkt == nil {
return containers.Container{}, errors.Wrap(ErrNotFound, "no content for id")
}
if err := writeContainer(&container, cbkt); err != nil {
return containers.Container{}, errors.Wrap(err, "failed to write container")
}
return container, nil
}
func (s *containerStore) Delete(ctx context.Context, id string) error {
bkt := getContainersBucket(s.tx)
if bkt == nil {
return errors.Wrap(ErrNotFound, "no containers")
}
err := bkt.DeleteBucket([]byte(id))
if err == bolt.ErrBucketNotFound {
return errors.Wrap(ErrNotFound, "no content for id")
}
return err
}
func readContainer(container *containers.Container, bkt *bolt.Bucket) error {
return bkt.ForEach(func(k, v []byte) error {
switch string(k) {
case string(bucketKeyImage):
container.Image = string(v)
case string(bucketKeyRuntime):
container.Runtime = string(v)
case string(bucketKeySpec):
container.Spec = v
case string(bucketKeyRootFS):
container.RootFS = string(v)
case string(bucketKeyLabels):
lbkt := bkt.Bucket(bucketKeyLabels)
if lbkt == nil {
return nil
}
container.Labels = map[string]string{}
if err := lbkt.ForEach(func(k, v []byte) error {
container.Labels[string(k)] = string(v)
return nil
}); err != nil {
return err
}
}
return nil
})
}
func writeContainer(container *containers.Container, bkt *bolt.Bucket) error {
for _, v := range [][2][]byte{
{bucketKeyImage, []byte(container.Image)},
{bucketKeyRuntime, []byte(container.Runtime)},
{bucketKeySpec, container.Spec},
{bucketKeyRootFS, []byte(container.RootFS)},
} {
if err := bkt.Put(v[0], v[1]); err != nil {
return err
}
}
// Remove existing labels to keep from merging
if lbkt := bkt.Bucket(bucketKeyLabels); lbkt != nil {
if err := bkt.DeleteBucket(bucketKeyLabels); err != nil {
return err
}
}
lbkt, err := bkt.CreateBucket(bucketKeyLabels)
if err != nil {
return err
}
for k, v := range container.Labels {
if err := lbkt.Put([]byte(k), []byte(v)); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,17 @@
package metadata
import "github.com/pkg/errors"
var (
ErrExists = errors.New("metadata: exists")
ErrNotFound = errors.New("metadata: not found")
)
// IsNotFound returns true if the error is due to a missing image.
func IsNotFound(err error) bool {
return errors.Cause(err) == ErrNotFound
}
func IsExists(err error) bool {
return errors.Cause(err) == ErrExists
}

View File

@ -0,0 +1,120 @@
package metadata
import (
"context"
"encoding/binary"
"fmt"
"github.com/boltdb/bolt"
"github.com/containerd/containerd/images"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
type imageStore struct {
tx *bolt.Tx
}
func NewImageStore(tx *bolt.Tx) images.Store {
return &imageStore{tx: tx}
}
func (s *imageStore) Get(ctx context.Context, name string) (images.Image, error) {
var image images.Image
if err := withImageBucket(s.tx, name, func(bkt *bolt.Bucket) error {
image.Name = name
return readImage(&image, bkt)
}); err != nil {
return images.Image{}, err
}
return image, nil
}
func (s *imageStore) Put(ctx context.Context, name string, desc ocispec.Descriptor) error {
return withImagesBucket(s.tx, func(bkt *bolt.Bucket) error {
ibkt, err := bkt.CreateBucketIfNotExists([]byte(name))
if err != nil {
return err
}
var (
buf [binary.MaxVarintLen64]byte
sizeEncoded []byte = buf[:]
)
sizeEncoded = sizeEncoded[:binary.PutVarint(sizeEncoded, desc.Size)]
if len(sizeEncoded) == 0 {
return fmt.Errorf("failed encoding size = %v", desc.Size)
}
for _, v := range [][2][]byte{
{bucketKeyDigest, []byte(desc.Digest)},
{bucketKeyMediaType, []byte(desc.MediaType)},
{bucketKeySize, sizeEncoded},
} {
if err := ibkt.Put(v[0], v[1]); err != nil {
return err
}
}
return nil
})
}
func (s *imageStore) List(ctx context.Context) ([]images.Image, error) {
var m []images.Image
if err := withImagesBucket(s.tx, func(bkt *bolt.Bucket) error {
return bkt.ForEach(func(k, v []byte) error {
var (
image = images.Image{
Name: string(k),
}
kbkt = bkt.Bucket(k)
)
if err := readImage(&image, kbkt); err != nil {
return err
}
m = append(m, image)
return nil
})
}); err != nil {
return nil, err
}
return m, nil
}
func (s *imageStore) Delete(ctx context.Context, name string) error {
return withImagesBucket(s.tx, func(bkt *bolt.Bucket) error {
err := bkt.DeleteBucket([]byte(name))
if err == bolt.ErrBucketNotFound {
return ErrNotFound
}
return err
})
}
func readImage(image *images.Image, bkt *bolt.Bucket) error {
return bkt.ForEach(func(k, v []byte) error {
if v == nil {
return nil // skip it? a bkt maybe?
}
// TODO(stevvooe): This is why we need to use byte values for
// keys, rather than full arrays.
switch string(k) {
case string(bucketKeyDigest):
image.Target.Digest = digest.Digest(v)
case string(bucketKeyMediaType):
image.Target.MediaType = string(v)
case string(bucketKeySize):
image.Target.Size, _ = binary.Varint(v)
}
return nil
})
}

View File

@ -1,4 +1,4 @@
package containerd package mount
// Mount is the lingua franca of containerd. A mount represents a // Mount is the lingua franca of containerd. A mount represents a
// serialized mount syscall. Components either emit or consume mounts. // serialized mount syscall. Components either emit or consume mounts.

View File

@ -1,4 +1,4 @@
package containerd package mount
import ( import (
"strings" "strings"

View File

@ -1,6 +1,6 @@
// +build darwin freebsd // +build darwin freebsd
package containerd package mount
import "github.com/pkg/errors" import "github.com/pkg/errors"

View File

@ -1,4 +1,4 @@
package containerd package mount
import "github.com/pkg/errors" import "github.com/pkg/errors"

View File

@ -0,0 +1,40 @@
package mount
// Info reveals information about a particular mounted filesystem. This
// struct is populated from the content in the /proc/<pid>/mountinfo file.
type Info struct {
// ID is a unique identifier of the mount (may be reused after umount).
ID int
// Parent indicates the ID of the mount parent (or of self for the top of the
// mount tree).
Parent int
// Major indicates one half of the device ID which identifies the device class.
Major int
// Minor indicates one half of the device ID which identifies a specific
// instance of device.
Minor int
// Root of the mount within the filesystem.
Root string
// Mountpoint indicates the mount point relative to the process's root.
Mountpoint string
// Options represents mount-specific options.
Options string
// Optional represents optional fields.
Optional string
// FSType indicates the type of filesystem, such as EXT3.
FSType string
// Source indicates filesystem specific information or "none".
Source string
// VFSOptions represents per super block options.
VFSOptions string
}

View File

@ -0,0 +1,45 @@
package mount
/*
#include <sys/param.h>
#include <sys/ucred.h>
#include <sys/mount.h>
*/
import "C"
import (
"fmt"
"reflect"
"unsafe"
)
// Self retrieves a list of mounts for the current running process.
func Self() ([]Info, error) {
var rawEntries *C.struct_statfs
count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
if count == 0 {
return nil, fmt.Errorf("Failed to call getmntinfo")
}
var entries []C.struct_statfs
header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
header.Cap = count
header.Len = count
header.Data = uintptr(unsafe.Pointer(rawEntries))
var out []Info
for _, entry := range entries {
var mountinfo Info
mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
mountinfo.FSType = C.GoString(&entry.f_fstypename[0])
out = append(out, mountinfo)
}
return out, nil
}
// PID collects the mounts for a specific process ID.
func PID(pid int) ([]Info, error) {
return nil, fmt.Errorf("mountinfo.PID is not implemented on freebsd")
}

View File

@ -0,0 +1,94 @@
// +build linux
package mount
import (
"bufio"
"fmt"
"io"
"os"
"strings"
)
const (
/* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
(1) mount ID: unique identifier of the mount (may be reused after umount)
(2) parent ID: ID of parent (or of self for the top of the mount tree)
(3) major:minor: value of st_dev for files on filesystem
(4) root: root of the mount within the filesystem
(5) mount point: mount point relative to the process's root
(6) mount options: per mount options
(7) optional fields: zero or more fields of the form "tag[:value]"
(8) separator: marks the end of the optional fields
(9) filesystem type: name of filesystem of the form "type[.subtype]"
(10) mount source: filesystem specific information or "none"
(11) super options: per super block options*/
mountinfoFormat = "%d %d %d:%d %s %s %s %s"
)
// Self retrieves a list of mounts for the current running process.
func Self() ([]Info, error) {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return nil, err
}
defer f.Close()
return parseInfoFile(f)
}
func parseInfoFile(r io.Reader) ([]Info, error) {
var (
s = bufio.NewScanner(r)
out = []Info{}
)
for s.Scan() {
if err := s.Err(); err != nil {
return nil, err
}
var (
p = Info{}
text = s.Text()
optionalFields string
)
if _, err := fmt.Sscanf(text, mountinfoFormat,
&p.ID, &p.Parent, &p.Major, &p.Minor,
&p.Root, &p.Mountpoint, &p.Options, &optionalFields); err != nil {
return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
}
// Safe as mountinfo encodes mountpoints with spaces as \040.
index := strings.Index(text, " - ")
postSeparatorFields := strings.Fields(text[index+3:])
if len(postSeparatorFields) < 3 {
return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
}
if optionalFields != "-" {
p.Optional = optionalFields
}
p.FSType = postSeparatorFields[0]
p.Source = postSeparatorFields[1]
p.VFSOptions = strings.Join(postSeparatorFields[2:], " ")
out = append(out, p)
}
return out, nil
}
// PID collects the mounts for a specific process ID. If the process
// ID is unknown, it is better to use `Self` which will inspect
// "/proc/self/mountinfo" instead.
func PID(pid int) ([]Info, error) {
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
if err != nil {
return nil, err
}
defer f.Close()
return parseInfoFile(f)
}

View File

@ -0,0 +1,43 @@
// +build solaris,cgo
package mount
/*
#include <stdio.h>
#include <sys/mnttab.h>
*/
import "C"
import (
"fmt"
)
// Self retrieves a list of mounts for the current running process.
func Self() ([]Info, error) {
mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r"))
if mnttab == nil {
return nil, fmt.Errorf("Failed to open %s", C.MNTTAB)
}
var out []Info
var mp C.struct_mnttab
ret := C.getmntent(mnttab, &mp)
for ret == 0 {
var mountinfo Info
mountinfo.Mountpoint = C.GoString(mp.mnt_mountp)
mountinfo.Source = C.GoString(mp.mnt_special)
mountinfo.FSType = C.GoString(mp.mnt_fstype)
mountinfo.Options = C.GoString(mp.mnt_mntopts)
out = append(out, mountinfo)
ret = C.getmntent(mnttab, &mp)
}
C.fclose(mnttab)
return out, nil
}
// PID collects the mounts for a specific process ID.
func PID(pid int) ([]Info, error) {
return nil, fmt.Errorf("mountinfo.PID is not implemented on solaris")
}

View File

@ -0,0 +1,18 @@
// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
package mount
import (
"fmt"
"runtime"
)
// Self retrieves a list of mounts for the current running process.
func Self() ([]Info, error) {
return nil, fmt.Errorf("mountinfo.Self is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
}
// PID collects the mounts for a specific process ID.
func PID(pid int) ([]Info, error) {
return nil, fmt.Errorf("mountinfo.PID is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
}

View File

@ -1,15 +1,17 @@
package containerd package plugin
import "golang.org/x/net/context" import "context"
type ContainerInfo struct { type TaskInfo struct {
ID string ID string
Runtime string ContainerID string
Runtime string
Spec []byte
} }
type Container interface { type Task interface {
// Information of the container // Information of the container
Info() ContainerInfo Info() TaskInfo
// Start the container's user defined process // Start the container's user defined process
Start(context.Context) error Start(context.Context) error
// State returns the container's state // State returns the container's state
@ -19,17 +21,27 @@ type Container interface {
// Resume unpauses the container process // Resume unpauses the container process
Resume(context.Context) error Resume(context.Context) error
// Kill signals a container // Kill signals a container
Kill(context.Context, uint32, bool) error Kill(context.Context, uint32, uint32, bool) error
// Exec adds a process into the container // Exec adds a process into the container
Exec(context.Context, ExecOpts) (Process, error) Exec(context.Context, ExecOpts) (Process, error)
// Processes returns all pids for the container
Processes(context.Context) ([]uint32, error)
// Pty resizes the processes pty/console // Pty resizes the processes pty/console
Pty(context.Context, uint32, ConsoleSize) error Pty(context.Context, uint32, ConsoleSize) error
// CloseStdin closes the processes stdin // CloseStdin closes the processes stdin
CloseStdin(context.Context, uint32) error CloseStdin(context.Context, uint32) error
// Checkpoint checkpoints a container to an image with live system data
Checkpoint(context.Context, CheckpointOpts) error
} }
type LinuxContainer interface { type CheckpointOpts struct {
Container Exit bool
AllowTCP bool
AllowUnixSockets bool
AllowTerminal bool
FileLocks bool
EmptyNamespaces []string
Path string
} }
type ExecOpts struct { type ExecOpts struct {

View File

@ -1,4 +1,4 @@
package containerd package plugin
import "errors" import "errors"

View File

@ -1,4 +1,4 @@
package containerd package plugin
import "time" import "time"

View File

@ -1,46 +1,44 @@
package plugin package plugin
import "github.com/containerd/containerd" // TaskMonitor provides an interface for monitoring of containers within containerd
type TaskMonitor interface {
// ContainerMonitor provides an interface for monitoring of containers within containerd
type ContainerMonitor interface {
// Monitor adds the provided container to the monitor // Monitor adds the provided container to the monitor
Monitor(containerd.Container) error Monitor(Task) error
// Stop stops and removes the provided container from the monitor // Stop stops and removes the provided container from the monitor
Stop(containerd.Container) error Stop(Task) error
// Events emits events from the monitor // Events emits events from the monitor
Events(chan<- *containerd.Event) Events(chan<- *Event)
} }
func NewMultiContainerMonitor(monitors ...ContainerMonitor) ContainerMonitor { func NewMultiTaskMonitor(monitors ...TaskMonitor) TaskMonitor {
return &multiContainerMonitor{ return &multiTaskMonitor{
monitors: monitors, monitors: monitors,
} }
} }
func NewNoopMonitor() ContainerMonitor { func NewNoopMonitor() TaskMonitor {
return &noopContainerMonitor{} return &noopTaskMonitor{}
} }
type noopContainerMonitor struct { type noopTaskMonitor struct {
} }
func (mm *noopContainerMonitor) Monitor(c containerd.Container) error { func (mm *noopTaskMonitor) Monitor(c Task) error {
return nil return nil
} }
func (mm *noopContainerMonitor) Stop(c containerd.Container) error { func (mm *noopTaskMonitor) Stop(c Task) error {
return nil return nil
} }
func (mm *noopContainerMonitor) Events(events chan<- *containerd.Event) { func (mm *noopTaskMonitor) Events(events chan<- *Event) {
} }
type multiContainerMonitor struct { type multiTaskMonitor struct {
monitors []ContainerMonitor monitors []TaskMonitor
} }
func (mm *multiContainerMonitor) Monitor(c containerd.Container) error { func (mm *multiTaskMonitor) Monitor(c Task) error {
for _, m := range mm.monitors { for _, m := range mm.monitors {
if err := m.Monitor(c); err != nil { if err := m.Monitor(c); err != nil {
return err return err
@ -49,7 +47,7 @@ func (mm *multiContainerMonitor) Monitor(c containerd.Container) error {
return nil return nil
} }
func (mm *multiContainerMonitor) Stop(c containerd.Container) error { func (mm *multiTaskMonitor) Stop(c Task) error {
for _, m := range mm.monitors { for _, m := range mm.monitors {
if err := m.Stop(c); err != nil { if err := m.Stop(c); err != nil {
return err return err
@ -58,7 +56,7 @@ func (mm *multiContainerMonitor) Stop(c containerd.Container) error {
return nil return nil
} }
func (mm *multiContainerMonitor) Events(events chan<- *containerd.Event) { func (mm *multiTaskMonitor) Events(events chan<- *Event) {
for _, m := range mm.monitors { for _, m := range mm.monitors {
m.Events(events) m.Events(events)
} }

View File

@ -5,7 +5,6 @@ import (
"sync" "sync"
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
"github.com/containerd/containerd"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/snapshot" "github.com/containerd/containerd/snapshot"
@ -19,7 +18,7 @@ const (
RuntimePlugin PluginType = iota + 1 RuntimePlugin PluginType = iota + 1
GRPCPlugin GRPCPlugin
SnapshotPlugin SnapshotPlugin
ContainerMonitorPlugin TaskMonitorPlugin
) )
type Registration struct { type Registration struct {
@ -28,17 +27,17 @@ type Registration struct {
Init func(*InitContext) (interface{}, error) Init func(*InitContext) (interface{}, error)
} }
// TODO(@crosbymichael): how to we keep this struct from growing but support dependency injection for loaded plugins? // TODO(@crosbymichael): how do we keep this struct from growing but support dependency injection for loaded plugins?
type InitContext struct { type InitContext struct {
Root string Root string
State string State string
Runtimes map[string]containerd.Runtime Runtimes map[string]Runtime
Content content.Store Content content.Store
Meta *bolt.DB Meta *bolt.DB
Snapshotter snapshot.Snapshotter Snapshotter snapshot.Snapshotter
Config interface{} Config interface{}
Context context.Context Context context.Context
Monitor ContainerMonitor Monitor TaskMonitor
} }
type Service interface { type Service interface {

View File

@ -1,9 +1,10 @@
package containerd package plugin
import ( import (
"context"
"time" "time"
"golang.org/x/net/context" "github.com/containerd/containerd/mount"
) )
type IO struct { type IO struct {
@ -17,9 +18,10 @@ type CreateOpts struct {
// Spec is the OCI runtime spec // Spec is the OCI runtime spec
Spec []byte Spec []byte
// Rootfs mounts to perform to gain access to the container's filesystem // Rootfs mounts to perform to gain access to the container's filesystem
Rootfs []Mount Rootfs []mount.Mount
// IO for the container's main process // IO for the container's main process
IO IO IO IO
Checkpoint string
} }
type Exit struct { type Exit struct {
@ -31,11 +33,11 @@ type Exit struct {
// arch, or custom usage. // arch, or custom usage.
type Runtime interface { type Runtime interface {
// Create creates a container with the provided id and options // Create creates a container with the provided id and options
Create(ctx context.Context, id string, opts CreateOpts) (Container, error) Create(ctx context.Context, id string, opts CreateOpts) (Task, error)
// Containers returns all the current containers for the runtime // Containers returns all the current containers for the runtime
Containers(context.Context) ([]Container, error) Tasks(context.Context) ([]Task, error)
// Delete removes the container in the runtime // Delete removes the container in the runtime
Delete(context.Context, Container) (*Exit, error) Delete(context.Context, Task) (*Exit, error)
// Events returns events for the runtime and all containers created by the runtime // Events returns events for the runtime and all containers created by the runtime
Events(context.Context) <-chan *Event Events(context.Context) <-chan *Event
} }

View File

@ -79,20 +79,20 @@ func Parse(s string) (Spec, error) {
return Spec{}, ErrHostnameRequired return Spec{}, ErrHostnameRequired
} }
parts := splitRe.Split(u.Path, 2) var object string
if len(parts) < 2 {
return Spec{}, ErrObjectRequired
}
// This allows us to retain the @ to signify digests or shortend digests in if idx := splitRe.FindStringIndex(u.Path); idx != nil {
// the object. // This allows us to retain the @ to signify digests or shortend digests in
object := u.Path[len(parts[0]):] // the object.
if object[:1] == ":" { object = u.Path[idx[0]:]
object = object[1:] if object[:1] == ":" {
object = object[1:]
}
u.Path = u.Path[:idx[0]]
} }
return Spec{ return Spec{
Locator: path.Join(u.Host, parts[0]), Locator: path.Join(u.Host, u.Path),
Object: object, Object: object,
}, nil }, nil
} }
@ -119,6 +119,9 @@ func (r Spec) Digest() digest.Digest {
// String returns the normalized string for the ref. // String returns the normalized string for the ref.
func (r Spec) String() string { func (r Spec) String() string {
if r.Object == "" {
return r.Locator
}
if r.Object[:1] == "@" { if r.Object[:1] == "@" {
return fmt.Sprintf("%v%v", r.Locator, r.Object) return fmt.Sprintf("%v%v", r.Locator, r.Object)
} }

View File

@ -0,0 +1,78 @@
package docker
import (
"context"
"io"
"net/http"
"path"
"strings"
"github.com/Sirupsen/logrus"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
type dockerFetcher struct {
*dockerBase
}
func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(
logrus.Fields{
"base": r.base.String(),
"digest": desc.Digest,
},
))
paths, err := getV2URLPaths(desc)
if err != nil {
return nil, err
}
for _, path := range paths {
u := r.url(path)
req, err := http.NewRequest(http.MethodGet, u, nil)
if err != nil {
return nil, err
}
req.Header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", "))
resp, err := r.doRequestWithRetries(ctx, req, nil)
if err != nil {
return nil, err
}
if resp.StatusCode > 299 {
if resp.StatusCode == http.StatusNotFound {
continue // try one of the other urls.
}
resp.Body.Close()
return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
}
return resp.Body, nil
}
return nil, errors.New("not found")
}
// getV2URLPaths generates the candidate urls paths for the object based on the
// set of hints and the provided object id. URLs are returned in the order of
// most to least likely succeed.
func getV2URLPaths(desc ocispec.Descriptor) ([]string, error) {
var urls []string
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
urls = append(urls, path.Join("manifests", desc.Digest.String()))
}
// always fallback to attempting to get the object out of the blobs store.
urls = append(urls, path.Join("blobs", desc.Digest.String()))
return urls, nil
}

View File

@ -0,0 +1,144 @@
package docker
import (
"bytes"
"context"
"io"
"io/ioutil"
"net/http"
"path"
"strings"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
type dockerPusher struct {
*dockerBase
tag string
}
func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor, r io.Reader) error {
var (
isManifest bool
existCheck string
)
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
isManifest = true
existCheck = path.Join("manifests", desc.Digest.String())
default:
existCheck = path.Join("blobs", desc.Digest.String())
}
req, err := http.NewRequest(http.MethodHead, p.url(existCheck), nil)
if err != nil {
return err
}
req.Header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", "))
resp, err := p.doRequestWithRetries(ctx, req, nil)
if err != nil {
if errors.Cause(err) != ErrInvalidAuthorization {
return err
}
log.G(ctx).WithError(err).Debugf("Unable to check existence, continuing with push")
} else {
if resp.StatusCode == http.StatusOK {
return nil
}
if resp.StatusCode != http.StatusNotFound {
// TODO: log error
return errors.Errorf("unexpected response: %s", resp.Status)
}
}
// TODO: Lookup related objects for cross repository push
if isManifest {
// Read all to use bytes.Reader for using GetBody
b, err := ioutil.ReadAll(r)
if err != nil {
return errors.Wrap(err, "failed to read manifest")
}
var putPath string
if p.tag != "" {
putPath = path.Join("manifests", p.tag)
} else {
putPath = path.Join("manifests", desc.Digest.String())
}
req, err := http.NewRequest(http.MethodPut, p.url(putPath), nil)
if err != nil {
return err
}
req.ContentLength = int64(len(b))
req.Body = ioutil.NopCloser(bytes.NewReader(b))
req.GetBody = func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(b)), nil
}
req.Header.Add("Content-Type", desc.MediaType)
resp, err := p.doRequestWithRetries(ctx, req, nil)
if err != nil {
return err
}
if resp.StatusCode != http.StatusCreated {
// TODO: log error
return errors.Errorf("unexpected response: %s", resp.Status)
}
} else {
// TODO: Do monolithic upload if size is small
// TODO: Turn multi-request blob uploader into ingester
// Start upload request
req, err := http.NewRequest(http.MethodPost, p.url("blobs", "uploads")+"/", nil)
if err != nil {
return err
}
resp, err := p.doRequestWithRetries(ctx, req, nil)
if err != nil {
return err
}
if resp.StatusCode != http.StatusAccepted {
// TODO: log error
return errors.Errorf("unexpected response: %s", resp.Status)
}
location := resp.Header.Get("Location")
// Support paths without host in location
if strings.HasPrefix(location, "/") {
u := p.base
u.Path = location
location = u.String()
}
// TODO: Support chunked upload
req, err = http.NewRequest(http.MethodPut, location, r)
if err != nil {
return err
}
q := req.URL.Query()
q.Add("digest", desc.Digest.String())
req.URL.RawQuery = q.Encode()
req.ContentLength = desc.Size
resp, err = p.doRequest(ctx, req)
if err != nil {
return err
}
if resp.StatusCode != http.StatusCreated {
// TODO: log error
return errors.Errorf("unexpected response: %s", resp.Status)
}
}
return nil
}

View File

@ -4,7 +4,6 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/textproto" "net/textproto"
@ -66,13 +65,169 @@ func NewResolver(options ResolverOptions) remotes.Resolver {
var _ remotes.Resolver = &dockerResolver{} var _ remotes.Resolver = &dockerResolver{}
func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, remotes.Fetcher, error) { func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) {
refspec, err := reference.Parse(ref) refspec, err := reference.Parse(ref)
if err != nil { if err != nil {
return "", ocispec.Descriptor{}, nil, err return "", ocispec.Descriptor{}, err
}
if refspec.Object == "" {
return "", ocispec.Descriptor{}, reference.ErrObjectRequired
}
base, err := r.base(refspec)
if err != nil {
return "", ocispec.Descriptor{}, err
}
fetcher := dockerFetcher{
dockerBase: base,
} }
var ( var (
urls []string
dgst = refspec.Digest()
)
if dgst != "" {
if err := dgst.Validate(); err != nil {
// need to fail here, since we can't actually resolve the invalid
// digest.
return "", ocispec.Descriptor{}, err
}
// turns out, we have a valid digest, make a url.
urls = append(urls, fetcher.url("manifests", dgst.String()))
} else {
urls = append(urls, fetcher.url("manifests", refspec.Object))
}
// fallback to blobs on not found.
urls = append(urls, fetcher.url("blobs", dgst.String()))
for _, u := range urls {
req, err := http.NewRequest(http.MethodHead, u, nil)
if err != nil {
return "", ocispec.Descriptor{}, err
}
// set headers for all the types we support for resolution.
req.Header.Set("Accept", strings.Join([]string{
images.MediaTypeDockerSchema2Manifest,
images.MediaTypeDockerSchema2ManifestList,
ocispec.MediaTypeImageManifest,
ocispec.MediaTypeImageIndex, "*"}, ", "))
log.G(ctx).Debug("resolving")
resp, err := fetcher.doRequestWithRetries(ctx, req, nil)
if err != nil {
return "", ocispec.Descriptor{}, err
}
resp.Body.Close() // don't care about body contents.
if resp.StatusCode > 299 {
if resp.StatusCode == http.StatusNotFound {
continue
}
return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
}
// this is the only point at which we trust the registry. we use the
// content headers to assemble a descriptor for the name. when this becomes
// more robust, we mostly get this information from a secure trust store.
dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
if dgstHeader != "" {
if err := dgstHeader.Validate(); err != nil {
return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader)
}
dgst = dgstHeader
}
if dgst == "" {
return "", ocispec.Descriptor{}, errors.Errorf("could not resolve digest for %v", ref)
}
var (
size int64
sizeHeader = resp.Header.Get("Content-Length")
)
size, err = strconv.ParseInt(sizeHeader, 10, 64)
if err != nil {
return "", ocispec.Descriptor{}, errors.Wrapf(err, "invalid size header: %q", sizeHeader)
}
if size < 0 {
return "", ocispec.Descriptor{}, errors.Errorf("%q in header not a valid size", sizeHeader)
}
desc := ocispec.Descriptor{
Digest: dgst,
MediaType: resp.Header.Get("Content-Type"), // need to strip disposition?
Size: size,
}
log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved")
return ref, desc, nil
}
return "", ocispec.Descriptor{}, errors.Errorf("%v not found", ref)
}
func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
refspec, err := reference.Parse(ref)
if err != nil {
return nil, err
}
base, err := r.base(refspec)
if err != nil {
return nil, err
}
return dockerFetcher{
dockerBase: base,
}, nil
}
func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) {
refspec, err := reference.Parse(ref)
if err != nil {
return nil, err
}
// Manifests can be pushed by digest like any other object, but the passed in
// reference cannot take a digest without the associated content. A tag is allowed
// and will be used to tag pushed manifests.
if refspec.Object != "" && strings.Contains(refspec.Object, "@") {
return nil, errors.New("cannot use digest reference for push locator")
}
base, err := r.base(refspec)
if err != nil {
return nil, err
}
return dockerPusher{
dockerBase: base,
tag: refspec.Object,
}, nil
}
type dockerBase struct {
base url.URL
token string
client *http.Client
useBasic bool
username string
secret string
}
func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) {
var (
err error
base url.URL base url.URL
username, secret string username, secret string
) )
@ -93,184 +248,55 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
if r.credentials != nil { if r.credentials != nil {
username, secret, err = r.credentials(base.Host) username, secret, err = r.credentials(base.Host)
if err != nil { if err != nil {
return "", ocispec.Descriptor{}, nil, err return nil, err
} }
} }
prefix := strings.TrimPrefix(refspec.Locator, host+"/") prefix := strings.TrimPrefix(refspec.Locator, host+"/")
base.Path = path.Join("/v2", prefix) base.Path = path.Join("/v2", prefix)
fetcher := &dockerFetcher{ return &dockerBase{
base: base, base: base,
client: r.client, client: r.client,
username: username, username: username,
secret: secret, secret: secret,
} }, nil
var (
urls []string
dgst = refspec.Digest()
)
if dgst != "" {
if err := dgst.Validate(); err != nil {
// need to fail here, since we can't actually resolve the invalid
// digest.
return "", ocispec.Descriptor{}, nil, err
}
// turns out, we have a valid digest, make a url.
urls = append(urls, fetcher.url("manifests", dgst.String()))
} else {
urls = append(urls, fetcher.url("manifests", refspec.Object))
}
// fallback to blobs on not found.
urls = append(urls, fetcher.url("blobs", dgst.String()))
for _, u := range urls {
req, err := http.NewRequest(http.MethodHead, u, nil)
if err != nil {
return "", ocispec.Descriptor{}, nil, err
}
// set headers for all the types we support for resolution.
req.Header.Set("Accept", strings.Join([]string{
images.MediaTypeDockerSchema2Manifest,
images.MediaTypeDockerSchema2ManifestList,
ocispec.MediaTypeImageManifest,
ocispec.MediaTypeImageIndex, "*"}, ", "))
log.G(ctx).Debug("resolving")
resp, err := fetcher.doRequest(ctx, req)
if err != nil {
return "", ocispec.Descriptor{}, nil, err
}
resp.Body.Close() // don't care about body contents.
if resp.StatusCode > 299 {
if resp.StatusCode == http.StatusNotFound {
continue
}
return "", ocispec.Descriptor{}, nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
}
// this is the only point at which we trust the registry. we use the
// content headers to assemble a descriptor for the name. when this becomes
// more robust, we mostly get this information from a secure trust store.
dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
if dgstHeader != "" {
if err := dgstHeader.Validate(); err != nil {
return "", ocispec.Descriptor{}, nil, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader)
}
dgst = dgstHeader
}
if dgst == "" {
return "", ocispec.Descriptor{}, nil, errors.Errorf("could not resolve digest for %v", ref)
}
var (
size int64
sizeHeader = resp.Header.Get("Content-Length")
)
size, err = strconv.ParseInt(sizeHeader, 10, 64)
if err != nil {
return "", ocispec.Descriptor{}, nil, errors.Wrapf(err, "invalid size header: %q", sizeHeader)
}
if size < 0 {
return "", ocispec.Descriptor{}, nil, errors.Errorf("%q in header not a valid size", sizeHeader)
}
desc := ocispec.Descriptor{
Digest: dgst,
MediaType: resp.Header.Get("Content-Type"), // need to strip disposition?
Size: size,
}
log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved")
return ref, desc, fetcher, nil
}
return "", ocispec.Descriptor{}, nil, errors.Errorf("%v not found", ref)
} }
type dockerFetcher struct { func (r *dockerBase) url(ps ...string) string {
base url.URL
token string
client *http.Client
useBasic bool
username string
secret string
}
func (r *dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(
logrus.Fields{
"base": r.base.String(),
"digest": desc.Digest,
},
))
paths, err := getV2URLPaths(desc)
if err != nil {
return nil, err
}
for _, path := range paths {
u := r.url(path)
req, err := http.NewRequest(http.MethodGet, u, nil)
if err != nil {
return nil, err
}
req.Header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", "))
resp, err := r.doRequest(ctx, req)
if err != nil {
return nil, err
}
if resp.StatusCode > 299 {
if resp.StatusCode == http.StatusNotFound {
continue // try one of the other urls.
}
resp.Body.Close()
return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
}
return resp.Body, nil
}
return nil, errors.New("not found")
}
func (r *dockerFetcher) url(ps ...string) string {
url := r.base url := r.base
url.Path = path.Join(url.Path, path.Join(ps...)) url.Path = path.Join(url.Path, path.Join(ps...))
return url.String() return url.String()
} }
func (r *dockerFetcher) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) { func (r *dockerBase) authorize(req *http.Request) {
return r.doRequestWithRetries(ctx, req, nil) if r.useBasic {
req.SetBasicAuth(r.username, r.secret)
} else if r.token != "" {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", r.token))
}
} }
func (r *dockerFetcher) doRequestWithRetries(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Response, error) { func (r *dockerBase) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String())) ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String()))
log.G(ctx).WithField("request.headers", req.Header).Debug("fetch content") log.G(ctx).WithField("request.headers", req.Header).WithField("request.method", req.Method).Debug("Do request")
r.authorize(req) r.authorize(req)
resp, err := ctxhttp.Do(ctx, r.client, req) resp, err := ctxhttp.Do(ctx, r.client, req)
if err != nil { if err != nil {
return nil, err return nil, errors.Wrap(err, "failed to do request")
} }
log.G(ctx).WithFields(logrus.Fields{ log.G(ctx).WithFields(logrus.Fields{
"status": resp.Status, "status": resp.Status,
"response.headers": resp.Header, "response.headers": resp.Header,
}).Debug("fetch response received") }).Debug("fetch response received")
return resp, nil
}
func (r *dockerBase) doRequestWithRetries(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Response, error) {
resp, err := r.doRequest(ctx, req)
if err != nil {
return nil, err
}
responses = append(responses, resp) responses = append(responses, resp)
req, err = r.retryRequest(ctx, req, responses) req, err = r.retryRequest(ctx, req, responses)
@ -283,15 +309,7 @@ func (r *dockerFetcher) doRequestWithRetries(ctx context.Context, req *http.Requ
return resp, err return resp, err
} }
func (r *dockerFetcher) authorize(req *http.Request) { func (r *dockerBase) retryRequest(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Request, error) {
if r.useBasic {
req.SetBasicAuth(r.username, r.secret)
} else if r.token != "" {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", r.token))
}
}
func (r *dockerFetcher) retryRequest(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Request, error) {
if len(responses) > 5 { if len(responses) > 5 {
return nil, nil return nil, nil
} }
@ -300,19 +318,19 @@ func (r *dockerFetcher) retryRequest(ctx context.Context, req *http.Request, res
log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized") log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized")
for _, c := range parseAuthHeader(last.Header) { for _, c := range parseAuthHeader(last.Header) {
if c.scheme == bearerAuth { if c.scheme == bearerAuth {
if errStr := c.parameters["error"]; errStr != "" { if err := invalidAuthorization(c, responses); err != nil {
// TODO: handle expired case r.token = ""
return nil, errors.Wrapf(ErrInvalidAuthorization, "server message: %s", errStr) return nil, err
} }
if err := r.setTokenAuth(ctx, c.parameters); err != nil { if err := r.setTokenAuth(ctx, c.parameters); err != nil {
return nil, err return nil, err
} }
return req, nil return copyRequest(req)
} else if c.scheme == basicAuth { } else if c.scheme == basicAuth {
if r.username != "" && r.secret != "" { if r.username != "" && r.secret != "" {
r.useBasic = true r.useBasic = true
} }
return req, nil return copyRequest(req)
} }
} }
return nil, nil return nil, nil
@ -322,7 +340,7 @@ func (r *dockerFetcher) retryRequest(ctx context.Context, req *http.Request, res
if strings.Contains(req.URL.Path, "/manifests/") { if strings.Contains(req.URL.Path, "/manifests/") {
// TODO: copy request? // TODO: copy request?
req.Method = http.MethodGet req.Method = http.MethodGet
return req, nil return copyRequest(req)
} }
} }
@ -330,6 +348,42 @@ func (r *dockerFetcher) retryRequest(ctx context.Context, req *http.Request, res
return nil, nil return nil, nil
} }
func invalidAuthorization(c challenge, responses []*http.Response) error {
errStr := c.parameters["error"]
if errStr == "" {
return nil
}
n := len(responses)
if n == 1 || (n > 1 && !sameRequest(responses[n-2].Request, responses[n-1].Request)) {
return nil
}
return errors.Wrapf(ErrInvalidAuthorization, "server message: %s", errStr)
}
func sameRequest(r1, r2 *http.Request) bool {
if r1.Method != r2.Method {
return false
}
if *r1.URL != *r2.URL {
return false
}
return true
}
func copyRequest(req *http.Request) (*http.Request, error) {
ireq := *req
if ireq.GetBody != nil {
var err error
ireq.Body, err = ireq.GetBody()
if err != nil {
return nil, err
}
}
return &ireq, nil
}
func isManifestAccept(h http.Header) bool { func isManifestAccept(h http.Header) bool {
for _, ah := range h[textproto.CanonicalMIMEHeaderKey("Accept")] { for _, ah := range h[textproto.CanonicalMIMEHeaderKey("Accept")] {
switch ah { switch ah {
@ -346,7 +400,7 @@ func isManifestAccept(h http.Header) bool {
return false return false
} }
func (r *dockerFetcher) setTokenAuth(ctx context.Context, params map[string]string) error { func (r *dockerBase) setTokenAuth(ctx context.Context, params map[string]string) error {
realm, ok := params["realm"] realm, ok := params["realm"]
if !ok { if !ok {
return errors.New("no realm specified for token auth challenge") return errors.New("no realm specified for token auth challenge")
@ -387,24 +441,6 @@ func (r *dockerFetcher) setTokenAuth(ctx context.Context, params map[string]stri
return nil return nil
} }
// getV2URLPaths generates the candidate urls paths for the object based on the
// set of hints and the provided object id. URLs are returned in the order of
// most to least likely succeed.
func getV2URLPaths(desc ocispec.Descriptor) ([]string, error) {
var urls []string
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
urls = append(urls, path.Join("manifests", desc.Digest.String()))
}
// always fallback to attempting to get the object out of the blobs store.
urls = append(urls, path.Join("blobs", desc.Digest.String()))
return urls, nil
}
type tokenOptions struct { type tokenOptions struct {
realm string realm string
service string service string
@ -419,7 +455,7 @@ type postTokenResponse struct {
Scope string `json:"scope"` Scope string `json:"scope"`
} }
func (r *dockerFetcher) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) { func (r *dockerBase) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) {
form := url.Values{} form := url.Values{}
form.Set("scope", strings.Join(to.scopes, " ")) form.Set("scope", strings.Join(to.scopes, " "))
form.Set("service", to.service) form.Set("service", to.service)
@ -473,7 +509,7 @@ type getTokenResponse struct {
} }
// getToken fetches a token using a GET request // getToken fetches a token using a GET request
func (r *dockerFetcher) getToken(ctx context.Context, to tokenOptions) (string, error) { func (r *dockerBase) getToken(ctx context.Context, to tokenOptions) (string, error) {
req, err := http.NewRequest("GET", to.realm, nil) req, err := http.NewRequest("GET", to.realm, nil)
if err != nil { if err != nil {
return "", err return "", err

View File

@ -56,11 +56,41 @@ func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc
func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error { func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error {
log.G(ctx).Debug("fetch") log.G(ctx).Debug("fetch")
ref := MakeRefKey(ctx, desc) ref := MakeRefKey(ctx, desc)
cw, err := ingester.Writer(ctx, ref, desc.Size, desc.Digest)
if err != nil {
if !content.IsExists(err) {
return err
}
return nil
}
defer cw.Close()
rc, err := fetcher.Fetch(ctx, desc) rc, err := fetcher.Fetch(ctx, desc)
if err != nil { if err != nil {
return err return err
} }
defer rc.Close() defer rc.Close()
return content.WriteBlob(ctx, ingester, ref, rc, desc.Size, desc.Digest) return content.Copy(cw, rc, desc.Size, desc.Digest)
}
func PushHandler(provider content.Provider, pusher Pusher) images.HandlerFunc {
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{
"digest": desc.Digest,
"mediatype": desc.MediaType,
"size": desc.Size,
}))
log.G(ctx).Debug("push")
r, err := provider.Reader(ctx, desc.Digest)
if err != nil {
return nil, err
}
defer r.Close()
return nil, pusher.Push(ctx, desc, r)
}
} }

View File

@ -7,7 +7,7 @@ import (
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
) )
// Resolver provides a remote based on a locator. // Resolver provides remotes based on a locator.
type Resolver interface { type Resolver interface {
// Resolve attempts to resolve the reference into a name and descriptor. // Resolve attempts to resolve the reference into a name and descriptor.
// //
@ -20,7 +20,15 @@ type Resolver interface {
// While the name may differ from ref, it should itself be a valid ref. // While the name may differ from ref, it should itself be a valid ref.
// //
// If the resolution fails, an error will be returned. // If the resolution fails, an error will be returned.
Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, fetcher Fetcher, err error) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error)
// Fetcher returns a new fetcher for the provided reference.
// All content fetched from the returned fetcher will be
// from the namespace referred to by ref.
Fetcher(ctx context.Context, ref string) (Fetcher, error)
// Pusher returns a new pusher for the provided reference
Pusher(ctx context.Context, ref string) (Pusher, error)
} }
type Fetcher interface { type Fetcher interface {
@ -28,6 +36,12 @@ type Fetcher interface {
Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
} }
type Pusher interface {
// Push pushes the resource identified by the descriptor using the
// passed in reader.
Push(ctx context.Context, d ocispec.Descriptor, r io.Reader) error
}
// FetcherFunc allows package users to implement a Fetcher with just a // FetcherFunc allows package users to implement a Fetcher with just a
// function. // function.
type FetcherFunc func(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) type FetcherFunc func(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
@ -35,3 +49,11 @@ type FetcherFunc func(ctx context.Context, desc ocispec.Descriptor) (io.ReadClos
func (fn FetcherFunc) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { func (fn FetcherFunc) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
return fn(ctx, desc) return fn(ctx, desc)
} }
// PusherFunc allows package users to implement a Pusher with just a
// function.
type PusherFunc func(ctx context.Context, desc ocispec.Descriptor, r io.Reader) error
func (fn PusherFunc) Pusher(ctx context.Context, desc ocispec.Descriptor, r io.Reader) error {
return fn(ctx, desc, r)
}

View File

@ -2,14 +2,10 @@ package rootfs
import ( import (
"context" "context"
"io" "fmt"
"io/ioutil"
"os"
"github.com/containerd/containerd"
"github.com/containerd/containerd/archive"
"github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/log" "github.com/containerd/containerd/log"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/snapshot" "github.com/containerd/containerd/snapshot"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/identity" "github.com/opencontainers/image-spec/identity"
@ -17,134 +13,72 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
type Unpacker interface { type Applier interface {
Unpack(ctx context.Context, layers []ocispec.Descriptor) (digest.Digest, error) Apply(context.Context, ocispec.Descriptor, []mount.Mount) (ocispec.Descriptor, error)
} }
type Mounter interface { type Layer struct {
Mount(target string, mounts ...containerd.Mount) error Diff ocispec.Descriptor
Unmount(target string) error Blob ocispec.Descriptor
} }
// ApplyLayer applies the layer to the provided parent. The resulting snapshot func ApplyLayers(ctx context.Context, layers []Layer, sn snapshot.Snapshotter, a Applier) (digest.Digest, error) {
// will be stored under its ChainID. var chain []digest.Digest
// for _, layer := range layers {
// The parent *must* be the chainID of the parent layer. if err := applyLayer(ctx, layer, chain, sn, a); err != nil {
// // TODO: possibly wait and retry if extraction of same chain id was in progress
// The returned digest is the diffID for the applied layer. return "", err
func ApplyLayer(snapshots snapshot.Snapshotter, mounter Mounter, rd io.Reader, parent digest.Digest) (digest.Digest, error) {
ctx := context.TODO()
// create a temporary directory to work from, needs to be on same
// filesystem. Probably better if this shared but we'll use a tempdir, for
// now.
dir, err := ioutil.TempDir("", "unpack-")
if err != nil {
return "", errors.Wrapf(err, "creating temporary directory failed")
}
defer os.RemoveAll(dir)
// TODO(stevvooe): Choose this key WAY more carefully. We should be able to
// create collisions for concurrent, conflicting unpack processes but we
// would need to have it be a function of the parent diffID and child
// layerID (since we don't know the diffID until we are done!).
key := dir
mounts, err := snapshots.Prepare(ctx, key, parent.String())
if err != nil {
return "", err
}
if err := mounter.Mount(dir, mounts...); err != nil {
if err := snapshots.Remove(ctx, key); err != nil {
log.L.WithError(err).Error("snapshot rollback failed")
} }
return "", err
chain = append(chain, layer.Diff.Digest)
} }
defer mounter.Unmount(dir) return identity.ChainID(chain), nil
rd, err = compression.DecompressStream(rd)
if err != nil {
return "", err
}
digester := digest.Canonical.Digester() // used to calculate diffID.
rd = io.TeeReader(rd, digester.Hash())
if _, err := archive.Apply(context.Background(), dir, rd); err != nil {
return "", err
}
diffID := digester.Digest()
chainID := diffID
if parent != "" {
chainID = identity.ChainID([]digest.Digest{parent, chainID})
}
if _, err := snapshots.Stat(ctx, chainID.String()); err == nil {
return diffID, snapshots.Remove(ctx, key)
}
return diffID, snapshots.Commit(ctx, chainID.String(), key)
} }
// Prepare the root filesystem from the set of layers. Snapshots are created func applyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshot.Snapshotter, a Applier) error {
// for each layer if they don't exist, keyed by their chain id. If the snapshot
// already exists, it will be skipped.
//
// If successful, the chainID for the top-level layer is returned. That
// identifier can be used to check out a snapshot.
func Prepare(ctx context.Context, snapshots snapshot.Snapshotter, mounter Mounter, layers []ocispec.Descriptor,
// TODO(stevvooe): The following functions are candidate for internal
// object functions. We can use these to formulate the beginnings of a
// rootfs Controller.
//
// Just pass them in for now.
openBlob func(context.Context, digest.Digest) (io.ReadCloser, error),
resolveDiffID func(digest.Digest) digest.Digest,
registerDiffID func(diffID, dgst digest.Digest) error) (digest.Digest, error) {
var ( var (
parent digest.Digest parent = identity.ChainID(chain)
chain []digest.Digest chainID = identity.ChainID(append(chain, layer.Diff.Digest))
diff ocispec.Descriptor
) )
for _, layer := range layers { _, err := sn.Stat(ctx, chainID.String())
// This will convert a possibly compressed layer hash to the if err == nil {
// uncompressed hash, if we know about it. If we don't, we unpack and log.G(ctx).Debugf("Extraction not needed, layer snapshot exists")
// calculate it. If we do have it, we then calculate the chain id for return nil
// the application and see if the snapshot is there. } else if !snapshot.IsNotExist(err) {
diffID := resolveDiffID(layer.Digest) return errors.Wrap(err, "failed to stat snapshot")
if diffID != "" {
chainLocal := append(chain, diffID)
chainID := identity.ChainID(chainLocal)
if _, err := snapshots.Stat(ctx, chainID.String()); err == nil {
continue
}
}
rc, err := openBlob(ctx, layer.Digest)
if err != nil {
return "", err
}
defer rc.Close() // pretty lazy!
diffID, err = ApplyLayer(snapshots, mounter, rc, parent)
if err != nil {
return "", err
}
// Register the association between the diffID and the layer's digest.
// For uncompressed layers, this will be the same. For compressed
// layers, we can look up the diffID from the digest if we've already
// unpacked it.
if err := registerDiffID(diffID, layer.Digest); err != nil {
return "", err
}
chain = append(chain, diffID)
parent = identity.ChainID(chain)
} }
return parent, nil key := fmt.Sprintf("extract %s", chainID)
// Prepare snapshot with from parent
mounts, err := sn.Prepare(ctx, key, parent.String())
if err != nil {
//TODO: If is snapshot exists error, retry
return errors.Wrap(err, "failed to prepare extraction layer")
}
defer func() {
if err != nil {
log.G(ctx).WithError(err).WithField("key", key).Infof("Apply failure, attempting cleanup")
if rerr := sn.Remove(ctx, key); rerr != nil {
log.G(ctx).WithError(rerr).Warnf("Extraction snapshot %q removal failed: %v", key)
}
}
}()
diff, err = a.Apply(ctx, layer.Blob, mounts)
if err != nil {
return errors.Wrapf(err, "failed to extract layer %s", layer.Diff.Digest)
}
if diff.Digest != layer.Diff.Digest {
err = errors.Errorf("wrong diff id calculated on extraction %q", diff.Digest)
return err
}
if err = sn.Commit(ctx, chainID.String(), key); err != nil {
return errors.Wrapf(err, "failed to commit snapshot %s", parent)
}
return nil
} }

52
vendor/github.com/containerd/containerd/rootfs/diff.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
package rootfs
import (
"context"
"fmt"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/snapshot"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
type MountDiffer interface {
DiffMounts(ctx context.Context, lower, upper []mount.Mount, media, ref string) (ocispec.Descriptor, error)
}
type DiffOptions struct {
MountDiffer
content.Store
snapshot.Snapshotter
}
func Diff(ctx context.Context, snapshotID, contentRef string, sn snapshot.Snapshotter, md MountDiffer) (ocispec.Descriptor, error) {
info, err := sn.Stat(ctx, snapshotID)
if err != nil {
return ocispec.Descriptor{}, err
}
lowerKey := fmt.Sprintf("%s-parent-view", info.Parent)
lower, err := sn.View(ctx, lowerKey, info.Parent)
if err != nil {
return ocispec.Descriptor{}, err
}
defer sn.Remove(ctx, lowerKey)
var upper []mount.Mount
if info.Kind == snapshot.KindActive {
upper, err = sn.Mounts(ctx, snapshotID)
if err != nil {
return ocispec.Descriptor{}, err
}
} else {
upperKey := fmt.Sprintf("%s-view", snapshotID)
upper, err = sn.View(ctx, upperKey, snapshotID)
if err != nil {
return ocispec.Descriptor{}, err
}
defer sn.Remove(ctx, lowerKey)
}
return md.DiffMounts(ctx, lower, upper, ocispec.MediaTypeImageLayer, contentRef)
}

View File

@ -6,8 +6,8 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"github.com/containerd/containerd"
"github.com/containerd/containerd/log" "github.com/containerd/containerd/log"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/snapshot" "github.com/containerd/containerd/snapshot"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -19,7 +19,12 @@ var (
type initializerFunc func(string) error type initializerFunc func(string) error
func InitRootFS(ctx context.Context, name string, parent digest.Digest, readonly bool, snapshotter snapshot.Snapshotter, mounter Mounter) ([]containerd.Mount, error) { type Mounter interface {
Mount(target string, mounts ...mount.Mount) error
Unmount(target string) error
}
func InitRootFS(ctx context.Context, name string, parent digest.Digest, readonly bool, snapshotter snapshot.Snapshotter, mounter Mounter) ([]mount.Mount, error) {
_, err := snapshotter.Stat(ctx, name) _, err := snapshotter.Stat(ctx, name)
if err == nil { if err == nil {
return nil, errors.Errorf("rootfs already exists") return nil, errors.Errorf("rootfs already exists")

View File

@ -19,6 +19,7 @@ func (rr *remoteReader) Read(p []byte) (n int, err error) {
} }
return return
} }
rr.extra = rr.extra[:0]
p = p[n:] p = p[n:]
for len(p) > 0 { for len(p) > 0 {

View File

@ -0,0 +1,75 @@
package diff
import (
"context"
diffapi "github.com/containerd/containerd/api/services/diff"
"github.com/containerd/containerd/api/types/descriptor"
mounttypes "github.com/containerd/containerd/api/types/mount"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/rootfs"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
type DiffService interface {
rootfs.Applier
rootfs.MountDiffer
}
// NewApplierFromClient returns a new Applier which communicates
// over a GRPC connection.
func NewDiffServiceFromClient(client diffapi.DiffClient) DiffService {
return &remote{
client: client,
}
}
type remote struct {
client diffapi.DiffClient
}
func (r *remote) Apply(ctx context.Context, diff ocispec.Descriptor, mounts []mount.Mount) (ocispec.Descriptor, error) {
req := &diffapi.ApplyRequest{
Diff: fromDescriptor(diff),
Mounts: fromMounts(mounts),
}
resp, err := r.client.Apply(ctx, req)
if err != nil {
return ocispec.Descriptor{}, err
}
return toDescriptor(resp.Applied), nil
}
func (r *remote) DiffMounts(ctx context.Context, a, b []mount.Mount, media, ref string) (ocispec.Descriptor, error) {
req := &diffapi.DiffRequest{
Left: fromMounts(a),
Right: fromMounts(b),
MediaType: media,
Ref: ref,
}
resp, err := r.client.Diff(ctx, req)
if err != nil {
return ocispec.Descriptor{}, err
}
return toDescriptor(resp.Diff), nil
}
func fromDescriptor(d ocispec.Descriptor) *descriptor.Descriptor {
return &descriptor.Descriptor{
MediaType: d.MediaType,
Digest: d.Digest,
Size_: d.Size,
}
}
func fromMounts(mounts []mount.Mount) []*mounttypes.Mount {
apiMounts := make([]*mounttypes.Mount, len(mounts))
for i, m := range mounts {
apiMounts[i] = &mounttypes.Mount{
Type: m.Type,
Source: m.Source,
Options: m.Options,
}
}
return apiMounts
}

View File

@ -0,0 +1,203 @@
package diff
import (
"io"
"io/ioutil"
"os"
diffapi "github.com/containerd/containerd/api/services/diff"
"github.com/containerd/containerd/api/types/descriptor"
mounttypes "github.com/containerd/containerd/api/types/mount"
"github.com/containerd/containerd/archive"
"github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/snapshot"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
func init() {
plugin.Register("diff-grpc", &plugin.Registration{
Type: plugin.GRPCPlugin,
Init: func(ic *plugin.InitContext) (interface{}, error) {
return newService(ic.Content, ic.Snapshotter)
},
})
}
type service struct {
store content.Store
snapshotter snapshot.Snapshotter
}
func newService(store content.Store, snapshotter snapshot.Snapshotter) (*service, error) {
return &service{
store: store,
snapshotter: snapshotter,
}, nil
}
func (s *service) Register(gs *grpc.Server) error {
diffapi.RegisterDiffServer(gs, s)
return nil
}
func (s *service) Apply(ctx context.Context, er *diffapi.ApplyRequest) (*diffapi.ApplyResponse, error) {
desc := toDescriptor(er.Diff)
// TODO: Check for supported media types
mounts := toMounts(er.Mounts)
dir, err := ioutil.TempDir("", "extract-")
if err != nil {
return nil, errors.Wrap(err, "failed to create temporary directory")
}
defer os.RemoveAll(dir)
if err := mount.MountAll(mounts, dir); err != nil {
return nil, errors.Wrap(err, "failed to mount")
}
defer mount.Unmount(dir, 0)
r, err := s.store.Reader(ctx, desc.Digest)
if err != nil {
return nil, errors.Wrap(err, "failed to get reader from content store")
}
defer r.Close()
// TODO: only decompress stream if media type is compressed
ds, err := compression.DecompressStream(r)
if err != nil {
return nil, err
}
defer ds.Close()
digester := digest.Canonical.Digester()
rc := &readCounter{
r: io.TeeReader(ds, digester.Hash()),
}
if _, err := archive.Apply(ctx, dir, rc); err != nil {
return nil, err
}
// Read any trailing data
if _, err := io.Copy(ioutil.Discard, rc); err != nil {
return nil, err
}
resp := &diffapi.ApplyResponse{
Applied: &descriptor.Descriptor{
MediaType: ocispec.MediaTypeImageLayer,
Digest: digester.Digest(),
Size_: rc.c,
},
}
return resp, nil
}
func (s *service) Diff(ctx context.Context, dr *diffapi.DiffRequest) (*diffapi.DiffResponse, error) {
aMounts := toMounts(dr.Left)
bMounts := toMounts(dr.Right)
aDir, err := ioutil.TempDir("", "left-")
if err != nil {
return nil, errors.Wrap(err, "failed to create temporary directory")
}
defer os.RemoveAll(aDir)
bDir, err := ioutil.TempDir("", "right-")
if err != nil {
return nil, errors.Wrap(err, "failed to create temporary directory")
}
defer os.RemoveAll(bDir)
if err := mount.MountAll(aMounts, aDir); err != nil {
return nil, errors.Wrap(err, "failed to mount")
}
defer mount.Unmount(aDir, 0)
if err := mount.MountAll(bMounts, bDir); err != nil {
return nil, errors.Wrap(err, "failed to mount")
}
defer mount.Unmount(bDir, 0)
cw, err := s.store.Writer(ctx, dr.Ref, 0, "")
if err != nil {
return nil, errors.Wrap(err, "failed to open writer")
}
// TODO: Validate media type
// TODO: Support compressed media types (link compressed to uncompressed)
//dgstr := digest.SHA256.Digester()
//wc := &writeCounter{}
//compressed, err := compression.CompressStream(cw, compression.Gzip)
//if err != nil {
// return nil, errors.Wrap(err, "failed to get compressed stream")
//}
//err = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash(), wc), lowerDir, upperDir)
//compressed.Close()
err = archive.WriteDiff(ctx, cw, aDir, bDir)
if err != nil {
return nil, errors.Wrap(err, "failed to write diff")
}
dgst := cw.Digest()
if err := cw.Commit(0, dgst); err != nil {
return nil, errors.Wrap(err, "failed to commit")
}
info, err := s.store.Info(ctx, dgst)
if err != nil {
return nil, errors.Wrap(err, "failed to get info from content store")
}
desc := ocispec.Descriptor{
MediaType: dr.MediaType,
Digest: info.Digest,
Size: info.Size,
}
return &diffapi.DiffResponse{
Diff: fromDescriptor(desc),
}, nil
}
type readCounter struct {
r io.Reader
c int64
}
func (rc *readCounter) Read(p []byte) (n int, err error) {
n, err = rc.r.Read(p)
rc.c += int64(n)
return
}
func toDescriptor(d *descriptor.Descriptor) ocispec.Descriptor {
return ocispec.Descriptor{
MediaType: d.MediaType,
Digest: d.Digest,
Size: d.Size_,
}
}
func toMounts(apim []*mounttypes.Mount) []mount.Mount {
mounts := make([]mount.Mount, len(apim))
for i, m := range apim {
mounts[i] = mount.Mount{
Type: m.Type,
Source: m.Source,
Options: m.Options,
}
}
return mounts
}

View File

@ -4,6 +4,7 @@ import (
imagesapi "github.com/containerd/containerd/api/services/images" imagesapi "github.com/containerd/containerd/api/services/images"
"github.com/containerd/containerd/api/types/descriptor" "github.com/containerd/containerd/api/types/descriptor"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/metadata"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors" "github.com/pkg/errors"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -67,9 +68,9 @@ func rewriteGRPCError(err error) error {
switch grpc.Code(errors.Cause(err)) { switch grpc.Code(errors.Cause(err)) {
case codes.AlreadyExists: case codes.AlreadyExists:
return images.ErrExists return metadata.ErrExists
case codes.NotFound: case codes.NotFound:
return images.ErrNotFound return metadata.ErrNotFound
} }
return err return err
@ -77,9 +78,9 @@ func rewriteGRPCError(err error) error {
func mapGRPCError(err error, id string) error { func mapGRPCError(err error, id string) error {
switch { switch {
case images.IsNotFound(err): case metadata.IsNotFound(err):
return grpc.Errorf(codes.NotFound, "image %v not found", id) return grpc.Errorf(codes.NotFound, "image %v not found", id)
case images.IsExists(err): case metadata.IsExists(err):
return grpc.Errorf(codes.AlreadyExists, "image %v already exists", id) return grpc.Errorf(codes.AlreadyExists, "image %v already exists", id)
} }

View File

@ -4,6 +4,7 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
imagesapi "github.com/containerd/containerd/api/services/images" imagesapi "github.com/containerd/containerd/api/services/images"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/metadata"
"github.com/containerd/containerd/plugin" "github.com/containerd/containerd/plugin"
"github.com/golang/protobuf/ptypes/empty" "github.com/golang/protobuf/ptypes/empty"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -73,7 +74,7 @@ func (s *Service) Delete(ctx context.Context, req *imagesapi.DeleteRequest) (*em
} }
func (s *Service) withStore(ctx context.Context, fn func(ctx context.Context, store images.Store) error) func(tx *bolt.Tx) error { func (s *Service) withStore(ctx context.Context, fn func(ctx context.Context, store images.Store) error) func(tx *bolt.Tx) error {
return func(tx *bolt.Tx) error { return fn(ctx, images.NewImageStore(tx)) } return func(tx *bolt.Tx) error { return fn(ctx, metadata.NewImageStore(tx)) }
} }
func (s *Service) withStoreView(ctx context.Context, fn func(ctx context.Context, store images.Store) error) error { func (s *Service) withStoreView(ctx context.Context, fn func(ctx context.Context, store images.Store) error) error {

View File

@ -1,39 +0,0 @@
package rootfs
import (
"context"
rootfsapi "github.com/containerd/containerd/api/services/rootfs"
containerd_v1_types "github.com/containerd/containerd/api/types/descriptor"
"github.com/containerd/containerd/rootfs"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
func NewUnpackerFromClient(client rootfsapi.RootFSClient) rootfs.Unpacker {
return remoteUnpacker{
client: client,
}
}
type remoteUnpacker struct {
client rootfsapi.RootFSClient
}
func (rp remoteUnpacker) Unpack(ctx context.Context, layers []ocispec.Descriptor) (digest.Digest, error) {
pr := rootfsapi.UnpackRequest{
Layers: make([]*containerd_v1_types.Descriptor, len(layers)),
}
for i, l := range layers {
pr.Layers[i] = &containerd_v1_types.Descriptor{
MediaType: l.MediaType,
Digest: l.Digest,
Size_: l.Size,
}
}
resp, err := rp.client.Unpack(ctx, &pr)
if err != nil {
return "", err
}
return resp.ChainID, nil
}

View File

@ -1,114 +0,0 @@
package rootfs
import (
"github.com/containerd/containerd"
rootfsapi "github.com/containerd/containerd/api/services/rootfs"
containerd_v1_types "github.com/containerd/containerd/api/types/mount"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/rootfs"
"github.com/containerd/containerd/snapshot"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
func init() {
plugin.Register("rootfs-grpc", &plugin.Registration{
Type: plugin.GRPCPlugin,
Init: func(ic *plugin.InitContext) (interface{}, error) {
return NewService(ic.Content, ic.Snapshotter)
},
})
}
type Service struct {
store content.Store
snapshotter snapshot.Snapshotter
}
func NewService(store content.Store, snapshotter snapshot.Snapshotter) (*Service, error) {
return &Service{
store: store,
snapshotter: snapshotter,
}, nil
}
func (s *Service) Register(gs *grpc.Server) error {
rootfsapi.RegisterRootFSServer(gs, s)
return nil
}
func (s *Service) Unpack(ctx context.Context, pr *rootfsapi.UnpackRequest) (*rootfsapi.UnpackResponse, error) {
layers := make([]ocispec.Descriptor, len(pr.Layers))
for i, l := range pr.Layers {
layers[i] = ocispec.Descriptor{
MediaType: l.MediaType,
Digest: l.Digest,
Size: l.Size_,
}
}
log.G(ctx).Infof("Preparing %#v", layers)
chainID, err := rootfs.Prepare(ctx, s.snapshotter, mounter{}, layers, s.store.Reader, emptyResolver, noopRegister)
if err != nil {
log.G(ctx).Errorf("Rootfs Prepare failed!: %v", err)
return nil, err
}
log.G(ctx).Infof("ChainID %#v", chainID)
return &rootfsapi.UnpackResponse{
ChainID: chainID,
}, nil
}
func (s *Service) Prepare(ctx context.Context, ir *rootfsapi.PrepareRequest) (*rootfsapi.MountResponse, error) {
mounts, err := rootfs.InitRootFS(ctx, ir.Name, ir.ChainID, ir.Readonly, s.snapshotter, mounter{})
if err != nil {
return nil, grpc.Errorf(codes.AlreadyExists, "%v", err)
}
return &rootfsapi.MountResponse{
Mounts: apiMounts(mounts),
}, nil
}
func (s *Service) Mounts(ctx context.Context, mr *rootfsapi.MountsRequest) (*rootfsapi.MountResponse, error) {
mounts, err := s.snapshotter.Mounts(ctx, mr.Name)
if err != nil {
return nil, err
}
return &rootfsapi.MountResponse{
Mounts: apiMounts(mounts),
}, nil
}
func apiMounts(mounts []containerd.Mount) []*containerd_v1_types.Mount {
am := make([]*containerd_v1_types.Mount, len(mounts))
for i, m := range mounts {
am[i] = &containerd_v1_types.Mount{
Type: m.Type,
Source: m.Source,
Options: m.Options,
}
}
return am
}
type mounter struct{}
func (mounter) Mount(dir string, mounts ...containerd.Mount) error {
return containerd.MountAll(mounts, dir)
}
func (mounter) Unmount(dir string) error {
return containerd.Unmount(dir, 0)
}
func emptyResolver(digest.Digest) digest.Digest {
return digest.Digest("")
}
func noopRegister(digest.Digest, digest.Digest) error {
return nil
}

View File

@ -0,0 +1,158 @@
package snapshot
import (
"context"
"io"
"strings"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
snapshotapi "github.com/containerd/containerd/api/services/snapshot"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/snapshot"
"github.com/pkg/errors"
)
// NewSnapshotterFromClient returns a new Snapshotter which communicates
// over a GRPC connection.
func NewSnapshotterFromClient(client snapshotapi.SnapshotClient) snapshot.Snapshotter {
return &remoteSnapshotter{
client: client,
}
}
type remoteSnapshotter struct {
client snapshotapi.SnapshotClient
}
func (r *remoteSnapshotter) Stat(ctx context.Context, key string) (snapshot.Info, error) {
resp, err := r.client.Stat(ctx, &snapshotapi.StatRequest{Key: key})
if err != nil {
return snapshot.Info{}, rewriteGRPCError(err)
}
return toInfo(resp.Info), nil
}
func (r *remoteSnapshotter) Usage(ctx context.Context, key string) (snapshot.Usage, error) {
resp, err := r.client.Usage(ctx, &snapshotapi.UsageRequest{Key: key})
if err != nil {
return snapshot.Usage{}, rewriteGRPCError(err)
}
return toUsage(resp), nil
}
func (r *remoteSnapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) {
resp, err := r.client.Mounts(ctx, &snapshotapi.MountsRequest{Key: key})
if err != nil {
return nil, rewriteGRPCError(err)
}
return toMounts(resp), nil
}
func (r *remoteSnapshotter) Prepare(ctx context.Context, key, parent string) ([]mount.Mount, error) {
resp, err := r.client.Prepare(ctx, &snapshotapi.PrepareRequest{Key: key, Parent: parent})
if err != nil {
return nil, rewriteGRPCError(err)
}
return toMounts(resp), nil
}
func (r *remoteSnapshotter) View(ctx context.Context, key, parent string) ([]mount.Mount, error) {
resp, err := r.client.View(ctx, &snapshotapi.PrepareRequest{Key: key, Parent: parent})
if err != nil {
return nil, rewriteGRPCError(err)
}
return toMounts(resp), nil
}
func (r *remoteSnapshotter) Commit(ctx context.Context, name, key string) error {
_, err := r.client.Commit(ctx, &snapshotapi.CommitRequest{
Name: name,
Key: key,
})
return rewriteGRPCError(err)
}
func (r *remoteSnapshotter) Remove(ctx context.Context, key string) error {
_, err := r.client.Remove(ctx, &snapshotapi.RemoveRequest{Key: key})
return rewriteGRPCError(err)
}
func (r *remoteSnapshotter) Walk(ctx context.Context, fn func(context.Context, snapshot.Info) error) error {
sc, err := r.client.List(ctx, &snapshotapi.ListRequest{})
if err != nil {
rewriteGRPCError(err)
}
for {
resp, err := sc.Recv()
if err != nil {
if err == io.EOF {
return nil
}
return rewriteGRPCError(err)
}
if resp == nil {
return nil
}
for _, info := range resp.Info {
if err := fn(ctx, toInfo(info)); err != nil {
return err
}
}
}
}
func rewriteGRPCError(err error) error {
switch grpc.Code(errors.Cause(err)) {
case codes.AlreadyExists:
return snapshot.ErrSnapshotExist
case codes.NotFound:
return snapshot.ErrSnapshotNotExist
case codes.FailedPrecondition:
desc := grpc.ErrorDesc(errors.Cause(err))
if strings.Contains(desc, snapshot.ErrSnapshotNotActive.Error()) {
return snapshot.ErrSnapshotNotActive
}
if strings.Contains(desc, snapshot.ErrSnapshotNotCommitted.Error()) {
return snapshot.ErrSnapshotNotCommitted
}
}
return err
}
func toKind(kind snapshotapi.Kind) snapshot.Kind {
if kind == snapshotapi.KindActive {
return snapshot.KindActive
}
return snapshot.KindCommitted
}
func toInfo(info snapshotapi.Info) snapshot.Info {
return snapshot.Info{
Name: info.Name,
Parent: info.Parent,
Kind: toKind(info.Kind),
Readonly: info.Readonly,
}
}
func toUsage(resp *snapshotapi.UsageResponse) snapshot.Usage {
return snapshot.Usage{
Inodes: resp.Inodes,
Size: resp.Size_,
}
}
func toMounts(resp *snapshotapi.MountsResponse) []mount.Mount {
mounts := make([]mount.Mount, len(resp.Mounts))
for i, m := range resp.Mounts {
mounts[i] = mount.Mount{
Type: m.Type,
Source: m.Source,
Options: m.Options,
}
}
return mounts
}

View File

@ -0,0 +1,204 @@
package snapshot
import (
gocontext "context"
snapshotapi "github.com/containerd/containerd/api/services/snapshot"
mounttypes "github.com/containerd/containerd/api/types/mount"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/snapshot"
protoempty "github.com/golang/protobuf/ptypes/empty"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
func init() {
plugin.Register("snapshots-grpc", &plugin.Registration{
Type: plugin.GRPCPlugin,
Init: func(ic *plugin.InitContext) (interface{}, error) {
return newService(ic.Snapshotter)
},
})
}
var empty = &protoempty.Empty{}
type service struct {
snapshotter snapshot.Snapshotter
}
func newService(snapshotter snapshot.Snapshotter) (*service, error) {
return &service{
snapshotter: snapshotter,
}, nil
}
func (s *service) Register(gs *grpc.Server) error {
snapshotapi.RegisterSnapshotServer(gs, s)
return nil
}
func (s *service) Prepare(ctx context.Context, pr *snapshotapi.PrepareRequest) (*snapshotapi.MountsResponse, error) {
log.G(ctx).WithField("parent", pr.Parent).WithField("key", pr.Key).Debugf("Preparing snapshot")
// TODO: Apply namespace
// TODO: Lookup snapshot id from metadata store
mounts, err := s.snapshotter.Prepare(ctx, pr.Key, pr.Parent)
if err != nil {
return nil, grpcError(err)
}
return fromMounts(mounts), nil
}
func (s *service) View(ctx context.Context, pr *snapshotapi.PrepareRequest) (*snapshotapi.MountsResponse, error) {
log.G(ctx).WithField("parent", pr.Parent).WithField("key", pr.Key).Debugf("Preparing view snapshot")
// TODO: Apply namespace
// TODO: Lookup snapshot id from metadata store
mounts, err := s.snapshotter.View(ctx, pr.Key, pr.Parent)
if err != nil {
return nil, grpcError(err)
}
return fromMounts(mounts), nil
}
func (s *service) Mounts(ctx context.Context, mr *snapshotapi.MountsRequest) (*snapshotapi.MountsResponse, error) {
log.G(ctx).WithField("key", mr.Key).Debugf("Getting snapshot mounts")
// TODO: Apply namespace
// TODO: Lookup snapshot id from metadata store
mounts, err := s.snapshotter.Mounts(ctx, mr.Key)
if err != nil {
return nil, grpcError(err)
}
return fromMounts(mounts), nil
}
func (s *service) Commit(ctx context.Context, cr *snapshotapi.CommitRequest) (*protoempty.Empty, error) {
log.G(ctx).WithField("key", cr.Key).WithField("name", cr.Name).Debugf("Committing snapshot")
// TODO: Apply namespace
// TODO: Lookup snapshot id from metadata store
if err := s.snapshotter.Commit(ctx, cr.Name, cr.Key); err != nil {
return nil, grpcError(err)
}
return empty, nil
}
func (s *service) Remove(ctx context.Context, rr *snapshotapi.RemoveRequest) (*protoempty.Empty, error) {
log.G(ctx).WithField("key", rr.Key).Debugf("Removing snapshot")
// TODO: Apply namespace
// TODO: Lookup snapshot id from metadata store
if err := s.snapshotter.Remove(ctx, rr.Key); err != nil {
return nil, grpcError(err)
}
return empty, nil
}
func (s *service) Stat(ctx context.Context, sr *snapshotapi.StatRequest) (*snapshotapi.StatResponse, error) {
log.G(ctx).WithField("key", sr.Key).Debugf("Statting snapshot")
// TODO: Apply namespace
info, err := s.snapshotter.Stat(ctx, sr.Key)
if err != nil {
return nil, grpcError(err)
}
return &snapshotapi.StatResponse{Info: fromInfo(info)}, nil
}
func (s *service) List(sr *snapshotapi.ListRequest, ss snapshotapi.Snapshot_ListServer) error {
// TODO: Apply namespace
var (
buffer []snapshotapi.Info
sendBlock = func(block []snapshotapi.Info) error {
return ss.Send(&snapshotapi.ListResponse{
Info: block,
})
}
)
err := s.snapshotter.Walk(ss.Context(), func(ctx gocontext.Context, info snapshot.Info) error {
buffer = append(buffer, fromInfo(info))
if len(buffer) >= 100 {
if err := sendBlock(buffer); err != nil {
return err
}
buffer = buffer[:0]
}
return nil
})
if err != nil {
return err
}
if len(buffer) > 0 {
// Send remaining infos
if err := sendBlock(buffer); err != nil {
return err
}
}
return nil
}
func (s *service) Usage(ctx context.Context, ur *snapshotapi.UsageRequest) (*snapshotapi.UsageResponse, error) {
// TODO: Apply namespace
usage, err := s.snapshotter.Usage(ctx, ur.Key)
if err != nil {
return nil, grpcError(err)
}
return fromUsage(usage), nil
}
func grpcError(err error) error {
if snapshot.IsNotExist(err) {
return grpc.Errorf(codes.NotFound, err.Error())
}
if snapshot.IsExist(err) {
return grpc.Errorf(codes.AlreadyExists, err.Error())
}
if snapshot.IsNotActive(err) || snapshot.IsNotCommitted(err) {
return grpc.Errorf(codes.FailedPrecondition, err.Error())
}
return err
}
func fromKind(kind snapshot.Kind) snapshotapi.Kind {
if kind == snapshot.KindActive {
return snapshotapi.KindActive
}
return snapshotapi.KindCommitted
}
func fromInfo(info snapshot.Info) snapshotapi.Info {
return snapshotapi.Info{
Name: info.Name,
Parent: info.Parent,
Kind: fromKind(info.Kind),
Readonly: info.Readonly,
}
}
func fromUsage(usage snapshot.Usage) *snapshotapi.UsageResponse {
return &snapshotapi.UsageResponse{
Inodes: usage.Inodes,
Size_: usage.Size,
}
}
func fromMounts(mounts []mount.Mount) *snapshotapi.MountsResponse {
resp := &snapshotapi.MountsResponse{
Mounts: make([]*mounttypes.Mount, len(mounts)),
}
for i, m := range mounts {
resp.Mounts[i] = &mounttypes.Mount{
Type: m.Type,
Source: m.Source,
Options: m.Options,
}
}
return resp
}

View File

@ -3,7 +3,7 @@ package snapshot
import ( import (
"context" "context"
"github.com/containerd/containerd" "github.com/containerd/containerd/mount"
) )
// Kind identifies the kind of snapshot. // Kind identifies the kind of snapshot.
@ -179,7 +179,7 @@ type Snapshotter interface {
// available only for active snapshots. // available only for active snapshots.
// //
// This can be used to recover mounts after calling View or Prepare. // This can be used to recover mounts after calling View or Prepare.
Mounts(ctx context.Context, key string) ([]containerd.Mount, error) Mounts(ctx context.Context, key string) ([]mount.Mount, error)
// Prepare creates an active snapshot identified by key descending from the // Prepare creates an active snapshot identified by key descending from the
// provided parent. The returned mounts can be used to mount the snapshot // provided parent. The returned mounts can be used to mount the snapshot
@ -195,7 +195,7 @@ type Snapshotter interface {
// one is done with the transaction, Remove should be called on the key. // one is done with the transaction, Remove should be called on the key.
// //
// Multiple calls to Prepare or View with the same key should fail. // Multiple calls to Prepare or View with the same key should fail.
Prepare(ctx context.Context, key, parent string) ([]containerd.Mount, error) Prepare(ctx context.Context, key, parent string) ([]mount.Mount, error)
// View behaves identically to Prepare except the result may not be // View behaves identically to Prepare except the result may not be
// committed back to the snapshot snapshotter. View returns a readonly view on // committed back to the snapshot snapshotter. View returns a readonly view on
@ -210,7 +210,7 @@ type Snapshotter interface {
// Commit may not be called on the provided key and will return an error. // Commit may not be called on the provided key and will return an error.
// To collect the resources associated with key, Remove must be called with // To collect the resources associated with key, Remove must be called with
// key as the argument. // key as the argument.
View(ctx context.Context, key, parent string) ([]containerd.Mount, error) View(ctx context.Context, key, parent string) ([]mount.Mount, error)
// Commit captures the changes between key and its parent into a snapshot // Commit captures the changes between key and its parent into a snapshot
// identified by name. The name can then be used with the snapshotter's other // identified by name. The name can then be used with the snapshotter's other

View File

@ -1,4 +1,4 @@
// +build linux,!arm64 // +build linux
package sys package sys

View File

@ -1,74 +0,0 @@
// +build linux,arm64
package sys
// #include <sys/epoll.h>
/*
int EpollCreate1(int flag) {
return epoll_create1(flag);
}
int EpollCtl(int efd, int op,int sfd, int events, int fd) {
struct epoll_event event;
event.events = events;
event.data.fd = fd;
return epoll_ctl(efd, op, sfd, &event);
}
struct event_t {
uint32_t events;
int fd;
};
struct epoll_event events[128];
int run_epoll_wait(int fd, struct event_t *event) {
int n, i;
n = epoll_wait(fd, events, 128, -1);
for (i = 0; i < n; i++) {
event[i].events = events[i].events;
event[i].fd = events[i].data.fd;
}
return n;
}
*/
import "C"
import (
"fmt"
"unsafe"
"golang.org/x/sys/unix"
)
// EpollCreate1 calls a C implementation
func EpollCreate1(flag int) (int, error) {
fd := int(C.EpollCreate1(C.int(flag)))
if fd < 0 {
return fd, fmt.Errorf("failed to create epoll, errno is %d", fd)
}
return fd, nil
}
// EpollCtl calls a C implementation
func EpollCtl(epfd int, op int, fd int, event *unix.EpollEvent) error {
errno := C.EpollCtl(C.int(epfd), C.int(unix.EPOLL_CTL_ADD), C.int(fd), C.int(event.Events), C.int(event.Fd))
if errno < 0 {
return fmt.Errorf("Failed to ctl epoll")
}
return nil
}
// EpollWait calls a C implementation
func EpollWait(epfd int, events []unix.EpollEvent, msec int) (int, error) {
var c_events [128]C.struct_event_t
n := int(C.run_epoll_wait(C.int(epfd), (*C.struct_event_t)(unsafe.Pointer(&c_events))))
if n < 0 {
return int(n), fmt.Errorf("Failed to wait epoll")
}
for i := 0; i < n; i++ {
events[i].Fd = int32(c_events[i].fd)
events[i].Events = uint32(c_events[i].events)
}
return int(n), nil
}

View File

@ -1,39 +0,0 @@
github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6
github.com/containerd/go-runc 5fe4d8cb7fdc0fae5f5a7f4f1d65a565032401b2
github.com/containerd/console a3863895279f5104533fd999c1babf80faffd98c
github.com/containerd/cgroups 7b2d1a0f50963678d5799e29d17a4d611f5a5dee
github.com/docker/go-metrics 8fd5772bf1584597834c6f7961a530f06cbfbb87
github.com/godbus/dbus c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f
github.com/prometheus/client_golang v0.8.0
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common 195bde7883f7c39ea62b0d92ab7359b5327065cb
github.com/prometheus/procfs fcdb11ccb4389efb1b210b7ffb623ab71c5fdd60
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/matttproud/golang_protobuf_extensions v1.0.0
github.com/docker/go-units v0.3.1
github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
github.com/opencontainers/runc 50401b5b4c2e01e4f1372b73a021742deeaf4e2d
github.com/opencontainers/runtime-spec 035da1dca3dfbb00d752eb58b0b158d6129f3776
github.com/Sirupsen/logrus v0.11.0
github.com/containerd/btrfs e9c546f46bccffefe71a6bc137e4c21b5503cc18
github.com/stretchr/testify v1.1.4
github.com/davecgh/go-spew v1.1.0
github.com/pmezard/go-difflib v1.0.0
github.com/containerd/fifo 1c36a62ed52ac0235d524d6371b746db4e4eef72
github.com/urfave/cli 8ba6f23b6e36d03666a14bd9421f5e3efcb59aca
golang.org/x/net 8b4af36cd21a1f85a7484b49feb7c79363106d8e
google.golang.org/grpc v1.0.5
github.com/pkg/errors v0.8.0
github.com/nightlyone/lockfile 1d49c987357a327b5b03aa84cbddd582c328615d
github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
golang.org/x/sys f3918c30c5c2cb527c0b071a27c35120a6c0719a
github.com/opencontainers/image-spec a431dbcf6a74fca2e0e040b819a836dbe3fb23ca
github.com/containerd/continuity 6414d06cab9e2fe082ea29ff42aab627e740d00c
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
github.com/BurntSushi/toml v0.2.0-21-g9906417
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
github.com/Microsoft/go-winio fff283ad5116362ca252298cfc9b95828956d85d
github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
github.com/Microsoft/hcsshim v0.5.15
github.com/Azure/go-ansiterm fa152c58bc15761d0200cb75fe958b89a9d4888e

View File

@ -2,5 +2,7 @@ Aaron Lehmann <aaron.lehmann@docker.com>
Akihiro Suda <suda.akihiro@lab.ntt.co.jp> Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
Brandon Philips <brandon.philips@coreos.com> Brandon Philips <brandon.philips@coreos.com>
Derek McGowan <derek@mcgstyle.net> Derek McGowan <derek@mcgstyle.net>
Justin Cormack <justin.cormack@docker.com>
Justin Cummins <sul3n3t@gmail.com>
Stephen J Day <stephen.day@docker.com> Stephen J Day <stephen.day@docker.com>
Tonis Tiigi <tonistiigi@gmail.com> Tonis Tiigi <tonistiigi@gmail.com>

View File

@ -0,0 +1,17 @@
package sysx
const (
// AtSymlinkNoFollow defined from AT_SYMLINK_NOFOLLOW in <sys/fcntl.h>
AtSymlinkNofollow = 0x200
)
const (
// SYS_FCHMODAT defined from golang.org/sys/unix
SYS_FCHMODAT = 490
)
// These functions will be generated by generate.sh
// $ GOOS=freebsd GOARCH=amd64 ./generate.sh chmod
//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)

View File

@ -0,0 +1,25 @@
// mksyscall.pl chmod_freebsd.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := syscall.Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View File

@ -0,0 +1,7 @@
package sysx
import (
"syscall"
)
const ENODATA = syscall.ENODATA

View File

@ -0,0 +1,9 @@
// +build darwin freebsd
package sysx
import (
"syscall"
)
const ENODATA = syscall.ENOATTR

View File

@ -0,0 +1,53 @@
package sysx
import (
"errors"
)
// Initial stub version for FreeBSD. FreeBSD has a different
// syscall API from Darwin and Linux for extended attributes;
// it is also not widely used. It is not exposed at all by the
// Go syscall package, so we need to implement directly eventually.
var unsupported error = errors.New("extended attributes unsupported on FreeBSD")
// Listxattr calls syscall listxattr and reads all content
// and returns a string array
func Listxattr(path string) ([]string, error) {
return []string{}, nil
}
// Removexattr calls syscall removexattr
func Removexattr(path string, attr string) (err error) {
return unsupported
}
// Setxattr calls syscall setxattr
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
return unsupported
}
// Getxattr calls syscall getxattr
func Getxattr(path, attr string) ([]byte, error) {
return []byte{}, nil
}
// LListxattr lists xattrs, not following symlinks
func LListxattr(path string) ([]string, error) {
return []string{}, nil
}
// LRemovexattr removes an xattr, not following symlinks
func LRemovexattr(path string, attr string) (err error) {
return unsupported
}
// LSetxattr sets an xattr, not following symlinks
func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
return unsupported
}
// LGetxattr gets an xattr, not following symlinks
func LGetxattr(path, attr string) ([]byte, error) {
return []byte{}, nil
}

View File

@ -7,6 +7,9 @@ import "syscall"
// $ GOOS=linux GOARCH=amd64 ./generate.sh xattr // $ GOOS=linux GOARCH=amd64 ./generate.sh xattr
// $ GOOS=linux GOARCH=arm ./generate.sh xattr // $ GOOS=linux GOARCH=arm ./generate.sh xattr
// $ GOOS=linux GOARCH=arm64 ./generate.sh xattr // $ GOOS=linux GOARCH=arm64 ./generate.sh xattr
// $ GOOS=linux GOARCH=ppc64 ./generate.sh xattr
// $ GOOS=linux GOARCH=ppc64le ./generate.sh xattr
// $ GOOS=linux GOARCH=s390x ./generate.sh xattr
// Listxattr calls syscall listxattr and reads all content // Listxattr calls syscall listxattr and reads all content
// and returns a string array // and returns a string array

View File

@ -1,19 +1,111 @@
// mksyscall.pl xattr_linux.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx package sysx
import "github.com/pkg/errors" import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func llistxattr(path string, dest []byte) (sz int, err error) { func llistxattr(path string, dest []byte) (sz int, err error) {
return 0, errors.Wrap(ErrNotSupported, "llistxattr not implemented on ppc64") var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(dest) > 0 {
_p1 = unsafe.Pointer(&dest[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
use(unsafe.Pointer(_p0))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lremovexattr(path string, attr string) (err error) { func lremovexattr(path string, attr string) (err error) {
return errors.Wrap(ErrNotSupported, "lremovexattr not implemented on ppc64") var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lsetxattr(path string, attr string, data []byte, flags int) (err error) { func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
return errors.Wrap(ErrNotSupported, "lsetxattr not implemented on ppc64") var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(data) > 0 {
_p2 = unsafe.Pointer(&data[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) { func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
return 0, errors.Wrap(ErrNotSupported, "lgetxattr not implemented on ppc64") var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(dest) > 0 {
_p2 = unsafe.Pointer(&dest[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
} }

View File

@ -1,19 +1,111 @@
// mksyscall.pl xattr_linux.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx package sysx
import "github.com/pkg/errors" import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func llistxattr(path string, dest []byte) (sz int, err error) { func llistxattr(path string, dest []byte) (sz int, err error) {
return 0, errors.Wrap(ErrNotSupported, "llistxattr not implemented on ppc64le") var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(dest) > 0 {
_p1 = unsafe.Pointer(&dest[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
use(unsafe.Pointer(_p0))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lremovexattr(path string, attr string) (err error) { func lremovexattr(path string, attr string) (err error) {
return errors.Wrap(ErrNotSupported, "lremovexattr not implemented on ppc64le") var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lsetxattr(path string, attr string, data []byte, flags int) (err error) { func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
return errors.Wrap(ErrNotSupported, "lsetxattr not implemented on ppc64le") var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(data) > 0 {
_p2 = unsafe.Pointer(&data[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) { func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
return 0, errors.Wrap(ErrNotSupported, "lgetxattr not implemented on ppc64le") var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(dest) > 0 {
_p2 = unsafe.Pointer(&dest[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
} }

View File

@ -1,19 +1,111 @@
// mksyscall.pl xattr_linux.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx package sysx
import "github.com/pkg/errors" import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func llistxattr(path string, dest []byte) (sz int, err error) { func llistxattr(path string, dest []byte) (sz int, err error) {
return 0, errors.Wrap(ErrNotSupported, "llistxattr not implemented on s390x") var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(dest) > 0 {
_p1 = unsafe.Pointer(&dest[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
use(unsafe.Pointer(_p0))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lremovexattr(path string, attr string) (err error) { func lremovexattr(path string, attr string) (err error) {
return errors.Wrap(ErrNotSupported, "lremovexattr not implemented on s390x") var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lsetxattr(path string, attr string, data []byte, flags int) (err error) { func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
return errors.Wrap(ErrNotSupported, "lsetxattr not implemented on s390x") var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(data) > 0 {
_p2 = unsafe.Pointer(&data[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) { func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
return 0, errors.Wrap(ErrNotSupported, "lgetxattr not implemented on s390x") var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(dest) > 0 {
_p2 = unsafe.Pointer(&dest[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
} }

3
vendor/github.com/containerd/fifo/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,3 @@
language: go
go:
- 1.7.x

201
vendor/github.com/containerd/fifo/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -75,7 +75,11 @@ func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.Re
} }
select { select {
case <-ctx.Done(): case <-ctx.Done():
f.Close() select {
case <-f.opened:
default:
f.Close()
}
case <-f.opened: case <-f.opened:
case <-f.closed: case <-f.closed:
} }

View File

@ -36,7 +36,7 @@ func getHandle(fn string) (*handle, error) {
h := &handle{ h := &handle{
f: f, f: f,
name: fn, name: fn,
dev: stat.Dev, dev: uint64(stat.Dev),
ino: stat.Ino, ino: stat.Ino,
} }
@ -62,7 +62,7 @@ func (h *handle) Path() (string, error) {
if err := syscall.Stat(h.procPath(), &stat); err != nil { if err := syscall.Stat(h.procPath(), &stat); err != nil {
return "", errors.Wrapf(err, "path %v could not be statted", h.procPath()) return "", errors.Wrapf(err, "path %v could not be statted", h.procPath())
} }
if stat.Dev != h.dev || stat.Ino != h.ino { if uint64(stat.Dev) != h.dev || stat.Ino != h.ino {
return "", errors.Errorf("failed to verify handle %v/%v %v/%v", stat.Dev, h.dev, stat.Ino, h.ino) return "", errors.Errorf("failed to verify handle %v/%v %v/%v", stat.Dev, h.dev, stat.Ino, h.ino)
} }
return h.procPath(), nil return h.procPath(), nil

View File

@ -23,7 +23,7 @@ func getHandle(fn string) (*handle, error) {
h := &handle{ h := &handle{
fn: fn, fn: fn,
dev: uint64(stat.Dev), dev: uint64(stat.Dev),
ino: stat.Ino, ino: uint64(stat.Ino),
} }
return h, nil return h, nil
@ -34,7 +34,7 @@ func (h *handle) Path() (string, error) {
if err := syscall.Stat(h.fn, &stat); err != nil { if err := syscall.Stat(h.fn, &stat); err != nil {
return "", errors.Wrapf(err, "path %v could not be statted", h.fn) return "", errors.Wrapf(err, "path %v could not be statted", h.fn)
} }
if uint64(stat.Dev) != h.dev || stat.Ino != h.ino { if uint64(stat.Dev) != h.dev || uint64(stat.Ino) != h.ino {
return "", errors.Errorf("failed to verify handle %v/%v %v/%v for %v", stat.Dev, h.dev, stat.Ino, h.ino, h.fn) return "", errors.Errorf("failed to verify handle %v/%v %v/%v for %v", stat.Dev, h.dev, stat.Ino, h.ino, h.fn)
} }
return h.fn, nil return h.fn, nil

View File

@ -1,5 +1,7 @@
### fifo ### fifo
[![Build Status](https://travis-ci.org/containerd/fifo.svg?branch=master)](https://travis-ci.org/containerd/fifo)
Go package for handling fifos in a sane way. Go package for handling fifos in a sane way.
``` ```
@ -27,4 +29,4 @@ func (f *fifo) Write(b []byte) (int, error)
// Close the fifo. Next reads/writes will error. This method can also be used // Close the fifo. Next reads/writes will error. This method can also be used
// before open(2) has returned and fifo was never opened. // before open(2) has returned and fifo was never opened.
func (f *fifo) Close() error func (f *fifo) Close() error
``` ```

155
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go generated vendored Normal file
View File

@ -0,0 +1,155 @@
// Code generated by protoc-gen-go.
// source: github.com/golang/protobuf/ptypes/any/any.proto
// DO NOT EDIT!
/*
Package any is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/any/any.proto
It has these top-level messages:
Any
*/
package any
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
type Any struct {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Any) XXX_WellKnownType() string { return "Any" }
func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 187 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd,
0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9,
0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00,
0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00,
}

140
vendor/github.com/golang/protobuf/ptypes/any/any.proto generated vendored Normal file
View File

@ -0,0 +1,140 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option go_package = "github.com/golang/protobuf/ptypes/any";
option java_package = "com.google.protobuf";
option java_outer_classname = "AnyProto";
option java_multiple_files = true;
option java_generate_equals_and_hash = true;
option objc_class_prefix = "GPB";
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
message Any {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
string type_url = 1;
// Must be a valid serialized protocol buffer of the above specified type.
bytes value = 2;
}

View File

@ -1,21 +0,0 @@
MIT
Copyright (C) 2016 Tõnis Tiigi <tonistiigi@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -7,7 +7,7 @@
// and between processes. // and between processes.
// //
// Incoming requests to a server should create a Context, and outgoing calls to // Incoming requests to a server should create a Context, and outgoing calls to
// servers should accept a Context. The chain of function calls between must // servers should accept a Context. The chain of function calls between must
// propagate the Context, optionally replacing it with a modified copy created // propagate the Context, optionally replacing it with a modified copy created
// using WithDeadline, WithTimeout, WithCancel, or WithValue. // using WithDeadline, WithTimeout, WithCancel, or WithValue.
// //
@ -16,14 +16,14 @@
// propagation: // propagation:
// //
// Do not store Contexts inside a struct type; instead, pass a Context // Do not store Contexts inside a struct type; instead, pass a Context
// explicitly to each function that needs it. The Context should be the first // explicitly to each function that needs it. The Context should be the first
// parameter, typically named ctx: // parameter, typically named ctx:
// //
// func DoSomething(ctx context.Context, arg Arg) error { // func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ... // // ... use ctx ...
// } // }
// //
// Do not pass a nil Context, even if a function permits it. Pass context.TODO // Do not pass a nil Context, even if a function permits it. Pass context.TODO
// if you are unsure about which Context to use. // if you are unsure about which Context to use.
// //
// Use context Values only for request-scoped data that transits processes and // Use context Values only for request-scoped data that transits processes and
@ -44,13 +44,13 @@ import "time"
// Context's methods may be called by multiple goroutines simultaneously. // Context's methods may be called by multiple goroutines simultaneously.
type Context interface { type Context interface {
// Deadline returns the time when work done on behalf of this context // Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is // should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results. // set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool) Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this // Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can // context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value. // never be canceled. Successive calls to Done return the same value.
// //
// WithCancel arranges for Done to be closed when cancel is called; // WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline // WithDeadline arranges for Done to be closed when the deadline
@ -79,24 +79,24 @@ type Context interface {
// a Done channel for cancelation. // a Done channel for cancelation.
Done() <-chan struct{} Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns // Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the // Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined. // context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value. // After Done is closed, successive calls to Err return the same value.
Err() error Err() error
// Value returns the value associated with this context for key, or nil // Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with // if no value is associated with key. Successive calls to Value with
// the same key returns the same result. // the same key returns the same result.
// //
// Use context values only for request-scoped data that transits // Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to // processes and API boundaries, not for passing optional parameters to
// functions. // functions.
// //
// A key identifies a specific value in a Context. Functions that wish // A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global // to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and // variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality; // Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid // packages should define keys as an unexported type to avoid
// collisions. // collisions.
// //
@ -115,7 +115,7 @@ type Context interface {
// // This prevents collisions with keys defined in other packages. // // This prevents collisions with keys defined in other packages.
// type key int // type key int
// //
// // userKey is the key for user.User values in Contexts. It is // // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext // // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly. // // instead of using this key directly.
// var userKey key = 0 // var userKey key = 0
@ -134,14 +134,14 @@ type Context interface {
} }
// Background returns a non-nil, empty Context. It is never canceled, has no // Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function, // values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming // initialization, and tests, and as the top-level Context for incoming
// requests. // requests.
func Background() Context { func Background() Context {
return background return background
} }
// TODO returns a non-nil, empty Context. Code should use context.TODO when // TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the // it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context // surrounding function has not yet been extended to accept a Context
// parameter). TODO is recognized by static analysis tools that determine // parameter). TODO is recognized by static analysis tools that determine

View File

@ -35,8 +35,8 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
} }
// WithDeadline returns a copy of the parent context with the deadline adjusted // WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d, // to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned // WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned // context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is // cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first. // closed, whichever happens first.

View File

@ -13,7 +13,7 @@ import (
"time" "time"
) )
// An emptyCtx is never canceled, has no values, and has no deadline. It is not // An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses. // struct{}, since vars of this type must have distinct addresses.
type emptyCtx int type emptyCtx int
@ -104,7 +104,7 @@ func propagateCancel(parent Context, child canceler) {
} }
// parentCancelCtx follows a chain of parent references until it finds a // parentCancelCtx follows a chain of parent references until it finds a
// *cancelCtx. This function understands how each of the concrete types in this // *cancelCtx. This function understands how each of the concrete types in this
// package represents its parent. // package represents its parent.
func parentCancelCtx(parent Context) (*cancelCtx, bool) { func parentCancelCtx(parent Context) (*cancelCtx, bool) {
for { for {
@ -134,14 +134,14 @@ func removeChild(parent Context, child canceler) {
p.mu.Unlock() p.mu.Unlock()
} }
// A canceler is a context type that can be canceled directly. The // A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx. // implementations are *cancelCtx and *timerCtx.
type canceler interface { type canceler interface {
cancel(removeFromParent bool, err error) cancel(removeFromParent bool, err error)
Done() <-chan struct{} Done() <-chan struct{}
} }
// A cancelCtx can be canceled. When canceled, it also cancels any children // A cancelCtx can be canceled. When canceled, it also cancels any children
// that implement canceler. // that implement canceler.
type cancelCtx struct { type cancelCtx struct {
Context Context
@ -193,8 +193,8 @@ func (c *cancelCtx) cancel(removeFromParent bool, err error) {
} }
// WithDeadline returns a copy of the parent context with the deadline adjusted // WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d, // to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned // WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned // context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is // cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first. // closed, whichever happens first.
@ -226,8 +226,8 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
return c, func() { c.cancel(true, Canceled) } return c, func() { c.cancel(true, Canceled) }
} }
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to // A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
// implement Done and Err. It implements cancel by stopping its timer then // implement Done and Err. It implements cancel by stopping its timer then
// delegating to cancelCtx.cancel. // delegating to cancelCtx.cancel.
type timerCtx struct { type timerCtx struct {
*cancelCtx *cancelCtx
@ -281,7 +281,7 @@ func WithValue(parent Context, key interface{}, val interface{}) Context {
return &valueCtx{parent, key, val} return &valueCtx{parent, key, val}
} }
// A valueCtx carries a key-value pair. It implements Value for that key and // A valueCtx carries a key-value pair. It implements Value for that key and
// delegates all other calls to the embedded Context. // delegates all other calls to the embedded Context.
type valueCtx struct { type valueCtx struct {
Context Context

Some files were not shown because too many files have changed in this diff Show More