From 27de1a5862046c6e8293f7f242b679cb71127ccf Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Mon, 5 Aug 2019 23:55:51 -0700 Subject: [PATCH] Update containerd to 5222236c1b57d055362dbc413d042ab56427270a. Signed-off-by: Lantao Liu --- vendor.conf | 13 +- vendor/github.com/Microsoft/go-winio/file.go | 18 +- vendor/github.com/Microsoft/go-winio/go.mod | 9 + .../github.com/Microsoft/go-winio/hvsock.go | 305 ++++++++ .../Microsoft/go-winio/pkg/etw/etw.go | 13 +- .../Microsoft/go-winio/pkg/etw/eventdata.go | 6 + .../Microsoft/go-winio/pkg/etw/eventopt.go | 8 +- .../Microsoft/go-winio/pkg/etw/fieldopt.go | 12 + .../Microsoft/go-winio/pkg/etw/newprovider.go | 53 ++ .../pkg/etw/newprovider_unsupported.go | 12 + .../Microsoft/go-winio/pkg/etw/provider.go | 104 ++- .../Microsoft/go-winio/pkg/etw/wrapper_32.go | 51 ++ .../Microsoft/go-winio/pkg/etw/wrapper_64.go | 41 ++ .../go-winio/pkg/etw/zsyscall_windows.go | 30 +- .../Microsoft/go-winio/pkg/etwlogrus/hook.go | 47 +- .../Microsoft/go-winio/pkg/guid/guid.go | 191 ++++- .../github.com/Microsoft/go-winio/syscall.go | 2 +- .../Microsoft/go-winio/zsyscall_windows.go | 33 + vendor/github.com/containerd/cgroups/blkio.go | 1 + .../github.com/containerd/cgroups/cgroup.go | 3 + .../github.com/containerd/cgroups/memory.go | 4 + .../containerd/cgroups/metrics.pb.go | 654 ++++++++++++++---- .../containerd/cgroups/metrics.proto | 13 + vendor/github.com/containerd/cgroups/utils.go | 2 +- .../containerd/containerd/README.md | 2 +- .../api/services/diff/v1/diff.pb.go | 266 +++++-- .../api/services/diff/v1/diff.proto | 3 + .../introspection/v1/introspection.pb.go | 284 +++++++- .../introspection/v1/introspection.proto | 7 + .../archive/compression/compression.go | 2 +- .../containerd/archive/time_unix.go | 2 +- .../containerd/containerd/cio/io.go | 29 +- .../containerd/containerd/client.go | 139 +++- .../containerd/cmd/containerd/command/main.go | 3 +- .../cmd/containerd/command/main_windows.go | 2 +- .../cmd/containerd/command/publish.go | 2 +- .../cmd/containerd/command/service_windows.go | 29 +- .../containerd/containerd/container.go | 6 +- .../containerd/containerd/container_opts.go | 61 +- .../containerd/container_opts_unix.go | 15 +- .../containerd/containerd/content/helpers.go | 22 + .../containerd/defaults/defaults_unix.go | 2 + .../containerd/defaults/defaults_windows.go | 2 + .../github.com/containerd/containerd/diff.go | 7 +- .../containerd/containerd/diff/apply/apply.go | 86 +-- .../containerd/containerd/diff/diff.go | 13 +- .../containerd/containerd/diff/stream.go | 187 +++++ .../containerd/containerd/diff/stream_unix.go | 146 ++++ .../containerd/diff/stream_windows.go | 165 +++++ .../containerd/diff/walking/differ.go | 4 +- .../github.com/containerd/containerd/gc/gc.go | 7 + .../github.com/containerd/containerd/image.go | 42 +- .../containerd/images/archive/exporter.go | 32 +- .../containerd/images/archive/importer.go | 149 +++- .../containerd/images/archive/reference.go | 2 +- .../containerd/containerd/images/image.go | 77 ++- .../containerd/images/mediatypes.go | 2 + .../containerd/containerd/import.go | 17 +- .../containerd/containerd/metadata/content.go | 2 +- .../containerd/containerd/metadata/gc.go | 39 +- .../containerd/metadata/namespaces.go | 10 +- .../containerd/metadata/snapshot.go | 39 +- .../containerd/containerd/namespaces.go | 16 +- .../containerd/containerd/namespaces/store.go | 11 +- .../containerd/containerd/namespaces/ttrpc.go | 2 +- .../process.go => namespaces_opts_linux.go} | 34 +- .../containerd/containerd/oci/spec_opts.go | 50 ++ .../containerd/oci/spec_opts_linux.go | 63 ++ .../containerd/oci/spec_opts_unix.go | 63 ++ .../containerd/oci/spec_opts_windows.go | 5 + .../proc => pkg/process}/deleted_state.go | 5 +- .../v1/linux/proc => pkg/process}/exec.go | 26 +- .../linux/proc => pkg/process}/exec_state.go | 2 +- .../v1/linux/proc => pkg/process}/init.go | 33 +- .../linux/proc => pkg/process}/init_state.go | 20 +- .../v1/linux/proc => pkg/process}/io.go | 28 +- .../proc/proc.go => pkg/process/process.go} | 28 +- .../v1/linux/proc => pkg/process}/types.go | 2 +- .../v1/linux/proc => pkg/process}/utils.go | 55 +- .../containerd/pkg/stdio/platform.go | 33 + .../containerd/containerd/pkg/stdio/stdio.go | 30 + .../pkg/ttrpcutil/client_windows.go | 51 +- .../containerd/remotes/docker/authorizer.go | 299 ++++++-- .../containerd/remotes/docker/fetcher.go | 131 ++-- .../containerd/remotes/docker/handler.go | 42 ++ .../containerd/remotes/docker/pusher.go | 192 +++-- .../containerd/remotes/docker/registry.go | 202 ++++++ .../containerd/remotes/docker/resolver.go | 530 ++++++++------ .../remotes/docker/schema1/converter.go | 4 +- .../containerd/remotes/docker/scope.go | 45 +- .../containerd/containerd/remotes/handlers.go | 55 +- .../containerd/containerd/rootfs/apply.go | 26 +- .../containerd/containerd/runtime/task.go | 3 + .../containerd/runtime/v1/linux/runtime.go | 10 +- .../containerd/runtime/v1/linux/task.go | 13 +- .../runtime/v1/shim/client/client.go | 21 +- .../containerd/runtime/v1/shim/service.go | 44 +- .../containerd/runtime/v2/binary.go | 2 +- .../containerd/runtime/v2/manager.go | 7 + .../containerd/containerd/runtime/v2/shim.go | 10 +- .../runtime/v2/shim/shim_windows.go | 244 +------ .../containerd/runtime/v2/shim/util_unix.go | 16 + .../runtime/v2/shim/util_windows.go | 77 +-- .../containerd/services/diff/local.go | 3 + .../services/introspection/service.go | 60 +- .../services/server/config/config.go | 16 + .../containerd/services/server/server.go | 5 + .../containerd/services/tasks/local.go | 31 +- .../containerd/services/tasks/local_unix.go | 2 + .../services/tasks/local_windows.go | 35 + .../containerd/snapshots/snapshotter.go | 13 +- .../containerd/containerd/vendor.conf | 34 +- .../github.com/containerd/go-runc/README.md | 13 +- vendor/github.com/containerd/go-runc/runc.go | 12 +- vendor/github.com/containerd/ttrpc/channel.go | 5 +- vendor/github.com/containerd/ttrpc/client.go | 207 +++--- vendor/github.com/containerd/ttrpc/config.go | 15 +- .../containerd/ttrpc/interceptor.go | 50 ++ .../github.com/containerd/ttrpc/metadata.go | 55 +- vendor/github.com/containerd/ttrpc/server.go | 15 +- .../github.com/containerd/ttrpc/services.go | 14 +- vendor/github.com/containerd/ttrpc/types.go | 19 +- vendor/github.com/google/uuid/LICENSE | 27 + vendor/github.com/google/uuid/README.md | 19 + vendor/github.com/google/uuid/dce.go | 80 +++ vendor/github.com/google/uuid/doc.go | 12 + vendor/github.com/google/uuid/go.mod | 1 + vendor/github.com/google/uuid/hash.go | 53 ++ vendor/github.com/google/uuid/marshal.go | 37 + vendor/github.com/google/uuid/node.go | 90 +++ vendor/github.com/google/uuid/node_js.go | 12 + vendor/github.com/google/uuid/node_net.go | 33 + vendor/github.com/google/uuid/sql.go | 59 ++ vendor/github.com/google/uuid/time.go | 123 ++++ vendor/github.com/google/uuid/util.go | 43 ++ vendor/github.com/google/uuid/uuid.go | 245 +++++++ vendor/github.com/google/uuid/version1.go | 44 ++ vendor/github.com/google/uuid/version4.go | 38 + .../runc/libcontainer/nsenter/nsexec.c | 89 ++- .../opencontainers/runc/vendor.conf | 4 +- 140 files changed, 6100 insertions(+), 1572 deletions(-) create mode 100644 vendor/github.com/Microsoft/go-winio/go.mod create mode 100644 vendor/github.com/Microsoft/go-winio/hvsock.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go create mode 100644 vendor/github.com/containerd/containerd/diff/stream.go create mode 100644 vendor/github.com/containerd/containerd/diff/stream_unix.go create mode 100644 vendor/github.com/containerd/containerd/diff/stream_windows.go rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc/process.go => namespaces_opts_linux.go} (54%) create mode 100644 vendor/github.com/containerd/containerd/oci/spec_opts_linux.go create mode 100644 vendor/github.com/containerd/containerd/oci/spec_opts_unix.go rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/deleted_state.go (95%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/exec.go (91%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/exec_state.go (99%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/init.go (95%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/init_state.go (96%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/io.go (89%) rename vendor/github.com/containerd/containerd/{runtime/proc/proc.go => pkg/process/process.go} (70%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/types.go (99%) rename vendor/github.com/containerd/containerd/{runtime/v1/linux/proc => pkg/process}/utils.go (70%) create mode 100644 vendor/github.com/containerd/containerd/pkg/stdio/platform.go create mode 100644 vendor/github.com/containerd/containerd/pkg/stdio/stdio.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/registry.go create mode 100644 vendor/github.com/containerd/containerd/services/tasks/local_windows.go create mode 100644 vendor/github.com/containerd/ttrpc/interceptor.go create mode 100644 vendor/github.com/google/uuid/LICENSE create mode 100644 vendor/github.com/google/uuid/README.md create mode 100644 vendor/github.com/google/uuid/dce.go create mode 100644 vendor/github.com/google/uuid/doc.go create mode 100644 vendor/github.com/google/uuid/go.mod create mode 100644 vendor/github.com/google/uuid/hash.go create mode 100644 vendor/github.com/google/uuid/marshal.go create mode 100644 vendor/github.com/google/uuid/node.go create mode 100644 vendor/github.com/google/uuid/node_js.go create mode 100644 vendor/github.com/google/uuid/node_net.go create mode 100644 vendor/github.com/google/uuid/sql.go create mode 100644 vendor/github.com/google/uuid/time.go create mode 100644 vendor/github.com/google/uuid/util.go create mode 100644 vendor/github.com/google/uuid/uuid.go create mode 100644 vendor/github.com/google/uuid/version1.go create mode 100644 vendor/github.com/google/uuid/version4.go diff --git a/vendor.conf b/vendor.conf index 49da57b6e..dde1761b9 100644 --- a/vendor.conf +++ b/vendor.conf @@ -21,11 +21,12 @@ github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823 github.com/pkg/errors v0.8.1 github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db -github.com/opencontainers/runc v1.0.0-rc8 +github.com/opencontainers/runc f4982d86f7fde0b6f953cc62ccc4022c519a10a9 # v1.0.0-rc8-32-gf4982d86 github.com/opencontainers/image-spec v1.0.1 github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/grpc-ecosystem/go-grpc-prometheus v1.1 +github.com/google/uuid v1.1.1 github.com/golang/protobuf v1.2.0 github.com/gogo/protobuf v1.2.1 github.com/gogo/googleapis v1.2.0 @@ -35,16 +36,16 @@ github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 github.com/coreos/go-systemd v14 github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40 -github.com/containerd/ttrpc a5bd8ce9e40bc7c065a11c6936f4d032ce6bfa2b -github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3 +github.com/containerd/ttrpc 1fb3814edf44a76e0ccf503decf726d994919a9a +github.com/containerd/go-runc 9007c2405372fe28918845901a3276c0915689a1 github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c github.com/containerd/continuity bd77b46c8352f74eb12c85bdc01f4b90f69d66b4 -github.com/containerd/containerd 31afff294400b5a69bdb3ec387ecdf5bad57a038 +github.com/containerd/containerd 5222236c1b57d055362dbc413d042ab56427270a github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f -github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1 +github.com/containerd/cgroups c4b9ac5c7601384c965b9646fc515884e091ebb9 github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 github.com/Microsoft/hcsshim 8abdbb8205e4192c68b5f84c31197156f31be517 -github.com/Microsoft/go-winio 84b4ab48a50763fe7b3abcef38e5205c12027fac +github.com/Microsoft/go-winio v0.4.14 github.com/BurntSushi/toml v0.3.1 # kubernetes dependencies diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go index 4334ff1cb..0385e4108 100644 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -16,6 +16,7 @@ import ( //sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort //sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus //sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult type atomicBool int32 @@ -79,6 +80,7 @@ type win32File struct { wg sync.WaitGroup wgLock sync.RWMutex closing atomicBool + socket bool readDeadline deadlineHandler writeDeadline deadlineHandler } @@ -109,7 +111,13 @@ func makeWin32File(h syscall.Handle) (*win32File, error) { } func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { - return makeWin32File(h) + // If we return the result of makeWin32File directly, it can result in an + // interface-wrapped nil, rather than a nil interface value. + f, err := makeWin32File(h) + if err != nil { + return nil, err + } + return f, nil } // closeHandle closes the resources associated with a Win32 handle @@ -190,6 +198,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er if f.closing.isSet() { err = ErrFileClosed } + } else if err != nil && f.socket { + // err is from Win32. Query the overlapped structure to get the winsock error. + var bytes, flags uint32 + err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) } case <-timeout: cancelIoEx(f.handle, &c.o) @@ -265,6 +277,10 @@ func (f *win32File) Flush() error { return syscall.FlushFileBuffers(f.handle) } +func (f *win32File) Fd() uintptr { + return uintptr(f.handle) +} + func (d *deadlineHandler) set(deadline time.Time) error { d.setLock.Lock() defer d.setLock.Unlock() diff --git a/vendor/github.com/Microsoft/go-winio/go.mod b/vendor/github.com/Microsoft/go-winio/go.mod new file mode 100644 index 000000000..b3846826b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/go.mod @@ -0,0 +1,9 @@ +module github.com/Microsoft/go-winio + +go 1.12 + +require ( + github.com/pkg/errors v0.8.1 + github.com/sirupsen/logrus v1.4.1 + golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b +) diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go new file mode 100644 index 000000000..dbfe790ee --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -0,0 +1,305 @@ +package winio + +import ( + "fmt" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" +) + +//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const ( + afHvSock = 34 // AF_HYPERV + + socketError = ^uintptr(0) +) + +// An HvsockAddr is an address for a AF_HYPERV socket. +type HvsockAddr struct { + VMID guid.GUID + ServiceID guid.GUID +} + +type rawHvsockAddr struct { + Family uint16 + _ uint16 + VMID guid.GUID + ServiceID guid.GUID +} + +// Network returns the address's network name, "hvsock". +func (addr *HvsockAddr) Network() string { + return "hvsock" +} + +func (addr *HvsockAddr) String() string { + return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) +} + +// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. +func VsockServiceID(port uint32) guid.GUID { + g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") + g.Data1 = port + return g +} + +func (addr *HvsockAddr) raw() rawHvsockAddr { + return rawHvsockAddr{ + Family: afHvSock, + VMID: addr.VMID, + ServiceID: addr.ServiceID, + } +} + +func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { + addr.VMID = raw.VMID + addr.ServiceID = raw.ServiceID +} + +// HvsockListener is a socket listener for the AF_HYPERV address family. +type HvsockListener struct { + sock *win32File + addr HvsockAddr +} + +// HvsockConn is a connected socket of the AF_HYPERV address family. +type HvsockConn struct { + sock *win32File + local, remote HvsockAddr +} + +func newHvSocket() (*win32File, error) { + fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + f, err := makeWin32File(fd) + if err != nil { + syscall.Close(fd) + return nil, err + } + f.socket = true + return f, nil +} + +// ListenHvsock listens for connections on the specified hvsock address. +func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { + l := &HvsockListener{addr: *addr} + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("listen", err) + } + sa := addr.raw() + err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("socket", err)) + } + err = syscall.Listen(sock.handle, 16) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("listen", err)) + } + return &HvsockListener{sock: sock, addr: *addr}, nil +} + +func (l *HvsockListener) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} +} + +// Addr returns the listener's network address. +func (l *HvsockListener) Addr() net.Addr { + return &l.addr +} + +// Accept waits for the next connection and returns it. +func (l *HvsockListener) Accept() (_ net.Conn, err error) { + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("accept", err) + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := l.sock.prepareIo() + if err != nil { + return nil, l.opErr("accept", err) + } + defer l.sock.wg.Done() + + // AcceptEx, per documentation, requires an extra 16 bytes per address. + const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) + var addrbuf [addrlen * 2]byte + + var bytes uint32 + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) + _, err = l.sock.asyncIo(c, nil, bytes, err) + if err != nil { + return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) + } + conn := &HvsockConn{ + sock: sock, + } + conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) + conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + sock = nil + return conn, nil +} + +// Close closes the listener, causing any pending Accept calls to fail. +func (l *HvsockListener) Close() error { + return l.sock.Close() +} + +/* Need to finish ConnectEx handling +func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { + sock, err := newHvSocket() + if err != nil { + return nil, err + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := sock.prepareIo() + if err != nil { + return nil, err + } + defer sock.wg.Done() + var bytes uint32 + err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) + _, err = sock.asyncIo(ctx, c, nil, bytes, err) + if err != nil { + return nil, err + } + conn := &HvsockConn{ + sock: sock, + remote: *addr, + } + sock = nil + return conn, nil +} +*/ + +func (conn *HvsockConn) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} +} + +func (conn *HvsockConn) Read(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("read", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var flags, bytes uint32 + err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsarecv", err) + } + return 0, conn.opErr("read", err) + } else if n == 0 { + err = io.EOF + } + return n, err +} + +func (conn *HvsockConn) Write(b []byte) (int, error) { + t := 0 + for len(b) != 0 { + n, err := conn.write(b) + if err != nil { + return t + n, err + } + t += n + b = b[n:] + } + return t, nil +} + +func (conn *HvsockConn) write(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("write", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var bytes uint32 + err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsasend", err) + } + return 0, conn.opErr("write", err) + } + return n, err +} + +// Close closes the socket connection, failing any pending read or write calls. +func (conn *HvsockConn) Close() error { + return conn.sock.Close() +} + +func (conn *HvsockConn) shutdown(how int) error { + err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD) + if err != nil { + return os.NewSyscallError("shutdown", err) + } + return nil +} + +// CloseRead shuts down the read end of the socket. +func (conn *HvsockConn) CloseRead() error { + err := conn.shutdown(syscall.SHUT_RD) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// CloseWrite shuts down the write end of the socket, notifying the other endpoint that +// no more data will be written. +func (conn *HvsockConn) CloseWrite() error { + err := conn.shutdown(syscall.SHUT_WR) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// LocalAddr returns the local address of the connection. +func (conn *HvsockConn) LocalAddr() net.Addr { + return &conn.local +} + +// RemoteAddr returns the remote address of the connection. +func (conn *HvsockConn) RemoteAddr() net.Addr { + return &conn.remote +} + +// SetDeadline implements the net.Conn SetDeadline method. +func (conn *HvsockConn) SetDeadline(t time.Time) error { + conn.SetReadDeadline(t) + conn.SetWriteDeadline(t) + return nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +func (conn *HvsockConn) SetReadDeadline(t time.Time) error { + return conn.sock.SetReadDeadline(t) +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { + return conn.sock.SetWriteDeadline(t) +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go index a958b26cd..10cd08d84 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go @@ -7,9 +7,14 @@ // set of C macros. package etw -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go etw.go +//go:generate go run mksyscall_windows.go -output zsyscall_windows.go etw.go //sys eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) = advapi32.EventRegister -//sys eventUnregister(providerHandle providerHandle) (win32err error) = advapi32.EventUnregister -//sys eventWriteTransfer(providerHandle providerHandle, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) = advapi32.EventWriteTransfer -//sys eventSetInformation(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) = advapi32.EventSetInformation + +//sys eventUnregister_64(providerHandle providerHandle) (win32err error) = advapi32.EventUnregister +//sys eventWriteTransfer_64(providerHandle providerHandle, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) = advapi32.EventWriteTransfer +//sys eventSetInformation_64(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) = advapi32.EventSetInformation + +//sys eventUnregister_32(providerHandle_low uint32, providerHandle_high uint32) (win32err error) = advapi32.EventUnregister +//sys eventWriteTransfer_32(providerHandle_low uint32, providerHandle_high uint32, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) = advapi32.EventWriteTransfer +//sys eventSetInformation_32(providerHandle_low uint32, providerHandle_high uint32, class eventInfoClass, information uintptr, length uint32) (win32err error) = advapi32.EventSetInformation diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go index 79dc10a5f..a524d8920 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go @@ -3,6 +3,7 @@ package etw import ( "bytes" "encoding/binary" + "syscall" ) // eventData maintains a buffer which builds up the data for an ETW event. It @@ -63,3 +64,8 @@ func (ed *eventData) writeUint32(value uint32) { func (ed *eventData) writeUint64(value uint64) { binary.Write(&ed.buffer, binary.LittleEndian, value) } + +// writeFiletime appends a FILETIME to the buffer. +func (ed *eventData) writeFiletime(value syscall.Filetime) { + binary.Write(&ed.buffer, binary.LittleEndian, value) +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go index a0a8176ff..fb6ac7dbc 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go @@ -6,8 +6,8 @@ import ( type eventOptions struct { descriptor *eventDescriptor - activityID *guid.GUID - relatedActivityID *guid.GUID + activityID guid.GUID + relatedActivityID guid.GUID tags uint32 } @@ -59,14 +59,14 @@ func WithTags(newTags uint32) EventOpt { } // WithActivityID specifies the activity ID of the event to be written. -func WithActivityID(activityID *guid.GUID) EventOpt { +func WithActivityID(activityID guid.GUID) EventOpt { return func(options *eventOptions) { options.activityID = activityID } } // WithRelatedActivityID specifies the parent activity ID of the event to be written. -func WithRelatedActivityID(activityID *guid.GUID) EventOpt { +func WithRelatedActivityID(activityID guid.GUID) EventOpt { return func(options *eventOptions) { options.relatedActivityID = activityID } diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go index c29c6bd8f..d544a2998 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go @@ -4,6 +4,8 @@ import ( "fmt" "math" "reflect" + "syscall" + "time" "unsafe" ) @@ -380,6 +382,14 @@ func Struct(name string, opts ...FieldOpt) FieldOpt { } } +// Time adds a time to the event. +func Time(name string, value time.Time) FieldOpt { + return func(em *eventMetadata, ed *eventData) { + em.writeField(name, inTypeFileTime, outTypeDateTimeUTC, 0) + ed.writeFiletime(syscall.NsecToFiletime(value.UTC().UnixNano())) + } +} + // Currently, we support logging basic builtin types (int, string, etc), slices // of basic builtin types, error, types derived from the basic types (e.g. "type // foo int"), and structs (recursively logging their fields). We do not support @@ -454,6 +464,8 @@ func SmartField(name string, v interface{}) FieldOpt { return Float64Array(name, v) case error: return StringField(name, v.Error()) + case time.Time: + return Time(name, v) default: switch rv := reflect.ValueOf(v); rv.Kind() { case reflect.Bool: diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go new file mode 100644 index 000000000..f344fb65f --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go @@ -0,0 +1,53 @@ +// +build amd64 arm64 386 + +package etw + +import ( + "bytes" + "encoding/binary" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" + "golang.org/x/sys/windows" +) + +// NewProviderWithID creates and registers a new ETW provider, allowing the +// provider ID to be manually specified. This is most useful when there is an +// existing provider ID that must be used to conform to existing diagnostic +// infrastructure. +func NewProviderWithID(name string, id guid.GUID, callback EnableCallback) (provider *Provider, err error) { + providerCallbackOnce.Do(func() { + globalProviderCallback = windows.NewCallback(providerCallbackAdapter) + }) + + provider = providers.newProvider() + defer func(provider *Provider) { + if err != nil { + providers.removeProvider(provider) + } + }(provider) + provider.ID = id + provider.callback = callback + + if err := eventRegister((*windows.GUID)(&provider.ID), globalProviderCallback, uintptr(provider.index), &provider.handle); err != nil { + return nil, err + } + + metadata := &bytes.Buffer{} + binary.Write(metadata, binary.LittleEndian, uint16(0)) // Write empty size for buffer (to update later) + metadata.WriteString(name) + metadata.WriteByte(0) // Null terminator for name + binary.LittleEndian.PutUint16(metadata.Bytes(), uint16(metadata.Len())) // Update the size at the beginning of the buffer + provider.metadata = metadata.Bytes() + + if err := eventSetInformation( + provider.handle, + eventInfoClassProviderSetTraits, + uintptr(unsafe.Pointer(&provider.metadata[0])), + uint32(len(provider.metadata))); err != nil { + + return nil, err + } + + return provider, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go new file mode 100644 index 000000000..808455cc2 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go @@ -0,0 +1,12 @@ +// +build arm + +package etw + +import ( + "github.com/Microsoft/go-winio/pkg/guid" +) + +// NewProviderWithID returns a nil provider on unsupported platforms. +func NewProviderWithID(name string, id guid.GUID, callback EnableCallback) (provider *Provider, err error) { + return nil, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go index 7a3c13dbf..236960182 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go @@ -1,12 +1,10 @@ package etw import ( - "bytes" "crypto/sha1" "encoding/binary" "strings" "unicode/utf16" - "unsafe" "github.com/Microsoft/go-winio/pkg/guid" "golang.org/x/sys/windows" @@ -16,7 +14,7 @@ import ( // name and ID (GUID), which should always have a 1:1 mapping to each other // (e.g. don't use multiple provider names with the same ID, or vice versa). type Provider struct { - ID *guid.GUID + ID guid.GUID handle providerHandle metadata []byte callback EnableCallback @@ -29,10 +27,14 @@ type Provider struct { // String returns the `provider`.ID as a string func (provider *Provider) String() string { + if provider == nil { + return "" + } + return provider.ID.String() } -type providerHandle windows.Handle +type providerHandle uint64 // ProviderState informs the provider EnableCallback what action is being // performed. @@ -59,9 +61,9 @@ const ( // EnableCallback is the form of the callback function that receives provider // enable/disable notifications from ETW. -type EnableCallback func(*guid.GUID, ProviderState, Level, uint64, uint64, uintptr) +type EnableCallback func(guid.GUID, ProviderState, Level, uint64, uint64, uintptr) -func providerCallback(sourceID *guid.GUID, state ProviderState, level Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr, i uintptr) { +func providerCallback(sourceID guid.GUID, state ProviderState, level Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr, i uintptr) { provider := providers.getProvider(uint(i)) switch state { @@ -84,7 +86,7 @@ func providerCallback(sourceID *guid.GUID, state ProviderState, level Level, mat // different size, it has only pointer-sized arguments, which are then cast to // the appropriate types when calling providerCallback. func providerCallbackAdapter(sourceID *guid.GUID, state uintptr, level uintptr, matchAnyKeyword uintptr, matchAllKeyword uintptr, filterData uintptr, i uintptr) uintptr { - providerCallback(sourceID, ProviderState(state), Level(level), uint64(matchAnyKeyword), uint64(matchAllKeyword), filterData, i) + providerCallback(*sourceID, ProviderState(state), Level(level), uint64(matchAnyKeyword), uint64(matchAllKeyword), filterData, i) return 0 } @@ -92,26 +94,27 @@ func providerCallbackAdapter(sourceID *guid.GUID, state uintptr, level uintptr, // uses the same algorithm as used by .NET's EventSource class, which is based // on RFC 4122. More information on the algorithm can be found here: // https://blogs.msdn.microsoft.com/dcook/2015/09/08/etw-provider-names-and-guids/ -// The algorithm is roughly: -// Hash = Sha1(namespace + arg.ToUpper().ToUtf16be()) -// Guid = Hash[0..15], with Hash[7] tweaked according to RFC 4122 -func providerIDFromName(name string) *guid.GUID { +// +// The algorithm is roughly the RFC 4122 algorithm for a V5 UUID, but differs in +// the following ways: +// - The input name is first upper-cased, UTF16-encoded, and converted to +// big-endian. +// - No variant is set on the result UUID. +// - The result UUID is treated as being in little-endian format, rather than +// big-endian. +func providerIDFromName(name string) guid.GUID { buffer := sha1.New() - - namespace := []byte{0x48, 0x2C, 0x2D, 0xB2, 0xC3, 0x90, 0x47, 0xC8, 0x87, 0xF8, 0x1A, 0x15, 0xBF, 0xC1, 0x30, 0xFB} - buffer.Write(namespace) - + namespace := guid.GUID{0x482C2DB2, 0xC390, 0x47C8, [8]byte{0x87, 0xF8, 0x1A, 0x15, 0xBF, 0xC1, 0x30, 0xFB}} + namespaceBytes := namespace.ToArray() + buffer.Write(namespaceBytes[:]) binary.Write(buffer, binary.BigEndian, utf16.Encode([]rune(strings.ToUpper(name)))) sum := buffer.Sum(nil) sum[7] = (sum[7] & 0xf) | 0x50 - return &guid.GUID{ - Data1: binary.LittleEndian.Uint32(sum[0:4]), - Data2: binary.LittleEndian.Uint16(sum[4:6]), - Data3: binary.LittleEndian.Uint16(sum[6:8]), - Data4: [8]byte{sum[8], sum[9], sum[10], sum[11], sum[12], sum[13], sum[14], sum[15]}, - } + a := [16]byte{} + copy(a[:], sum) + return guid.FromWindowsArray(a) } // NewProvider creates and registers a new ETW provider. The provider ID is @@ -120,49 +123,12 @@ func NewProvider(name string, callback EnableCallback) (provider *Provider, err return NewProviderWithID(name, providerIDFromName(name), callback) } -// NewProviderWithID creates and registers a new ETW provider, allowing the -// provider ID to be manually specified. This is most useful when there is an -// existing provider ID that must be used to conform to existing diagnostic -// infrastructure. -func NewProviderWithID(name string, id *guid.GUID, callback EnableCallback) (provider *Provider, err error) { - providerCallbackOnce.Do(func() { - globalProviderCallback = windows.NewCallback(providerCallbackAdapter) - }) - - provider = providers.newProvider() - defer func() { - if err != nil { - providers.removeProvider(provider) - } - }() - provider.ID = id - provider.callback = callback - - if err := eventRegister((*windows.GUID)(provider.ID), globalProviderCallback, uintptr(provider.index), &provider.handle); err != nil { - return nil, err - } - - metadata := &bytes.Buffer{} - binary.Write(metadata, binary.LittleEndian, uint16(0)) // Write empty size for buffer (to update later) - metadata.WriteString(name) - metadata.WriteByte(0) // Null terminator for name - binary.LittleEndian.PutUint16(metadata.Bytes(), uint16(metadata.Len())) // Update the size at the beginning of the buffer - provider.metadata = metadata.Bytes() - - if err := eventSetInformation( - provider.handle, - eventInfoClassProviderSetTraits, - uintptr(unsafe.Pointer(&provider.metadata[0])), - uint32(len(provider.metadata))); err != nil { - - return nil, err - } - - return provider, nil -} - // Close unregisters the provider. func (provider *Provider) Close() error { + if provider == nil { + return nil + } + providers.removeProvider(provider) return eventUnregister(provider.handle) } @@ -185,6 +151,10 @@ func (provider *Provider) IsEnabledForLevel(level Level) bool { // infrastructure, it can be useful to check if an event will actually be // consumed before doing expensive work to build the event data. func (provider *Provider) IsEnabledForLevelAndKeywords(level Level, keywords uint64) bool { + if provider == nil { + return false + } + if !provider.enabled { return false } @@ -206,6 +176,10 @@ func (provider *Provider) IsEnabledForLevelAndKeywords(level Level, keywords uin // constructed based on the EventOpt and FieldOpt values that are passed as // opts. func (provider *Provider) WriteEvent(name string, eventOpts []EventOpt, fieldOpts []FieldOpt) error { + if provider == nil { + return nil + } + options := eventOptions{descriptor: newEventDescriptor()} em := &eventMetadata{} ed := &eventData{} @@ -246,8 +220,8 @@ func (provider *Provider) WriteEvent(name string, eventOpts []EventOpt, fieldOpt // the ETW infrastructure. func (provider *Provider) writeEventRaw( descriptor *eventDescriptor, - activityID *guid.GUID, - relatedActivityID *guid.GUID, + activityID guid.GUID, + relatedActivityID guid.GUID, metadataBlobs [][]byte, dataBlobs [][]byte) error { @@ -262,5 +236,5 @@ func (provider *Provider) writeEventRaw( dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeUserData, blob)) } - return eventWriteTransfer(provider.handle, descriptor, (*windows.GUID)(activityID), (*windows.GUID)(relatedActivityID), dataDescriptorCount, &dataDescriptors[0]) + return eventWriteTransfer(provider.handle, descriptor, (*windows.GUID)(&activityID), (*windows.GUID)(&relatedActivityID), dataDescriptorCount, &dataDescriptors[0]) } diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go new file mode 100644 index 000000000..d0a7dac0c --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go @@ -0,0 +1,51 @@ +// +build 386 arm + +package etw + +import ( + "golang.org/x/sys/windows" +) + +func low(v providerHandle) uint32 { + return uint32(v & 0xffffffff) +} + +func high(v providerHandle) uint32 { + return low(v >> 32) +} + +func eventUnregister(providerHandle providerHandle) (win32err error) { + return eventUnregister_32(low(providerHandle), high(providerHandle)) +} + +func eventWriteTransfer( + providerHandle providerHandle, + descriptor *eventDescriptor, + activityID *windows.GUID, + relatedActivityID *windows.GUID, + dataDescriptorCount uint32, + dataDescriptors *eventDataDescriptor) (win32err error) { + + return eventWriteTransfer_32( + low(providerHandle), + high(providerHandle), + descriptor, + activityID, + relatedActivityID, + dataDescriptorCount, + dataDescriptors) +} + +func eventSetInformation( + providerHandle providerHandle, + class eventInfoClass, + information uintptr, + length uint32) (win32err error) { + + return eventSetInformation_32( + low(providerHandle), + high(providerHandle), + class, + information, + length) +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go new file mode 100644 index 000000000..ef8b599a6 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go @@ -0,0 +1,41 @@ +// +build amd64 arm64 + +package etw + +import ( + "golang.org/x/sys/windows" +) + +func eventUnregister(providerHandle providerHandle) (win32err error) { + return eventUnregister_64(providerHandle) +} + +func eventWriteTransfer( + providerHandle providerHandle, + descriptor *eventDescriptor, + activityID *windows.GUID, + relatedActivityID *windows.GUID, + dataDescriptorCount uint32, + dataDescriptors *eventDataDescriptor) (win32err error) { + + return eventWriteTransfer_64( + providerHandle, + descriptor, + activityID, + relatedActivityID, + dataDescriptorCount, + dataDescriptors) +} + +func eventSetInformation( + providerHandle providerHandle, + class eventInfoClass, + information uintptr, + length uint32) (win32err error) { + + return eventSetInformation_64( + providerHandle, + class, + information, + length) +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go index 802cd69dd..4e8a71922 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go @@ -53,7 +53,7 @@ func eventRegister(providerId *windows.GUID, callback uintptr, callbackContext u return } -func eventUnregister(providerHandle providerHandle) (win32err error) { +func eventUnregister_64(providerHandle providerHandle) (win32err error) { r0, _, _ := syscall.Syscall(procEventUnregister.Addr(), 1, uintptr(providerHandle), 0, 0) if r0 != 0 { win32err = syscall.Errno(r0) @@ -61,7 +61,7 @@ func eventUnregister(providerHandle providerHandle) (win32err error) { return } -func eventWriteTransfer(providerHandle providerHandle, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) { +func eventWriteTransfer_64(providerHandle providerHandle, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) { r0, _, _ := syscall.Syscall6(procEventWriteTransfer.Addr(), 6, uintptr(providerHandle), uintptr(unsafe.Pointer(descriptor)), uintptr(unsafe.Pointer(activityID)), uintptr(unsafe.Pointer(relatedActivityID)), uintptr(dataDescriptorCount), uintptr(unsafe.Pointer(dataDescriptors))) if r0 != 0 { win32err = syscall.Errno(r0) @@ -69,10 +69,34 @@ func eventWriteTransfer(providerHandle providerHandle, descriptor *eventDescript return } -func eventSetInformation(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) { +func eventSetInformation_64(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) { r0, _, _ := syscall.Syscall6(procEventSetInformation.Addr(), 4, uintptr(providerHandle), uintptr(class), uintptr(information), uintptr(length), 0, 0) if r0 != 0 { win32err = syscall.Errno(r0) } return } + +func eventUnregister_32(providerHandle_low uint32, providerHandle_high uint32) (win32err error) { + r0, _, _ := syscall.Syscall(procEventUnregister.Addr(), 2, uintptr(providerHandle_low), uintptr(providerHandle_high), 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func eventWriteTransfer_32(providerHandle_low uint32, providerHandle_high uint32, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) { + r0, _, _ := syscall.Syscall9(procEventWriteTransfer.Addr(), 7, uintptr(providerHandle_low), uintptr(providerHandle_high), uintptr(unsafe.Pointer(descriptor)), uintptr(unsafe.Pointer(activityID)), uintptr(unsafe.Pointer(relatedActivityID)), uintptr(dataDescriptorCount), uintptr(unsafe.Pointer(dataDescriptors)), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func eventSetInformation_32(providerHandle_low uint32, providerHandle_high uint32, class eventInfoClass, information uintptr, length uint32) (win32err error) { + r0, _, _ := syscall.Syscall6(procEventSetInformation.Addr(), 5, uintptr(providerHandle_low), uintptr(providerHandle_high), uintptr(class), uintptr(information), uintptr(length), 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go b/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go index 3476f7a61..38228c39c 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go @@ -1,6 +1,8 @@ package etwlogrus import ( + "sort" + "github.com/Microsoft/go-winio/pkg/etw" "github.com/sirupsen/logrus" ) @@ -31,15 +33,7 @@ func NewHookFromProvider(provider *etw.Provider) (*Hook, error) { // Levels returns the set of levels that this hook wants to receive log entries // for. func (h *Hook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.TraceLevel, - logrus.DebugLevel, - logrus.InfoLevel, - logrus.WarnLevel, - logrus.ErrorLevel, - logrus.FatalLevel, - logrus.PanicLevel, - } + return logrus.AllLevels } var logrusToETWLevelMap = map[logrus.Level]etw.Level{ @@ -62,19 +56,42 @@ func (h *Hook) Fire(e *logrus.Entry) error { return nil } - // Reserve extra space for the message field. - fields := make([]etw.FieldOpt, 0, len(e.Data)+1) + // Sort the fields by name so they are consistent in each instance + // of an event. Otherwise, the fields don't line up in WPA. + names := make([]string, 0, len(e.Data)) + hasError := false + for k := range e.Data { + if k == logrus.ErrorKey { + // Always put the error last because it is optional in some events. + hasError = true + } else { + names = append(names, k) + } + } + sort.Strings(names) + // Reserve extra space for the message and time fields. + fields := make([]etw.FieldOpt, 0, len(e.Data)+2) fields = append(fields, etw.StringField("Message", e.Message)) - - for k, v := range e.Data { - fields = append(fields, etw.SmartField(k, v)) + fields = append(fields, etw.Time("Time", e.Time)) + for _, k := range names { + fields = append(fields, etw.SmartField(k, e.Data[k])) + } + if hasError { + fields = append(fields, etw.SmartField(logrus.ErrorKey, e.Data[logrus.ErrorKey])) } - return h.provider.WriteEvent( + // Firing an ETW event is essentially best effort, as the event write can + // fail for reasons completely out of the control of the event writer (such + // as a session listening for the event having no available space in its + // buffers). Therefore, we don't return the error from WriteEvent, as it is + // just noise in many cases. + h.provider.WriteEvent( "LogrusEntry", etw.WithEventOpts(etw.WithLevel(level)), fields) + + return nil } // Close cleans up the hook and closes the ETW provider. If the provder was diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go index cc50972da..586406577 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -1,19 +1,43 @@ +// Package guid provides a GUID type. The backing structure for a GUID is +// identical to that used by the golang.org/x/sys/windows GUID type. +// There are two main binary encodings used for a GUID, the big-endian encoding, +// and the Windows (mixed-endian) encoding. See here for details: +// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding package guid import ( "crypto/rand" + "crypto/sha1" + "encoding" "encoding/binary" - "encoding/json" "fmt" "strconv" - "strings" - "github.com/pkg/errors" "golang.org/x/sys/windows" ) -var _ = (json.Marshaler)(&GUID{}) -var _ = (json.Unmarshaler)(&GUID{}) +// Variant specifies which GUID variant (or "type") of the GUID. It determines +// how the entirety of the rest of the GUID is interpreted. +type Variant uint8 + +// The variants specified by RFC 4122. +const ( + // VariantUnknown specifies a GUID variant which does not conform to one of + // the variant encodings specified in RFC 4122. + VariantUnknown Variant = iota + VariantNCS + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// Version specifies how the bits in the GUID were generated. For instance, a +// version 4 GUID is randomly generated, and a version 5 is generated from the +// hash of an input string. +type Version uint8 + +var _ = (encoding.TextMarshaler)(GUID{}) +var _ = (encoding.TextUnmarshaler)(&GUID{}) // GUID represents a GUID/UUID. It has the same structure as // golang.org/x/sys/windows.GUID so that it can be used with functions expecting @@ -23,24 +47,83 @@ var _ = (json.Unmarshaler)(&GUID{}) type GUID windows.GUID // NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. -func NewV4() (*GUID, error) { +func NewV4() (GUID, error) { var b [16]byte if _, err := rand.Read(b[:]); err != nil { - return nil, err + return GUID{}, err } - var g GUID - g.Data1 = binary.LittleEndian.Uint32(b[0:4]) - g.Data2 = binary.LittleEndian.Uint16(b[4:6]) - g.Data3 = binary.LittleEndian.Uint16(b[6:8]) - copy(g.Data4[:], b[8:16]) + g := FromArray(b) + g.setVersion(4) // Version 4 means randomly generated. + g.setVariant(VariantRFC4122) - g.Data3 = (g.Data3 & 0x0fff) | 0x4000 // Version 4 (randomly generated) - g.Data4[0] = (g.Data4[0] & 0x3f) | 0x80 // RFC4122 variant - return &g, nil + return g, nil } -func (g *GUID) String() string { +// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) +// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, +// and the sample code treats it as a series of bytes, so we do the same here. +// +// Some implementations, such as those found on Windows, treat the name as a +// big-endian UTF16 stream of bytes. If that is desired, the string can be +// encoded as such before being passed to this function. +func NewV5(namespace GUID, name []byte) (GUID, error) { + b := sha1.New() + namespaceBytes := namespace.ToArray() + b.Write(namespaceBytes[:]) + b.Write(name) + + a := [16]byte{} + copy(a[:], b.Sum(nil)) + + g := FromArray(a) + g.setVersion(5) // Version 5 means generated from a string. + g.setVariant(VariantRFC4122) + + return g, nil +} + +func fromArray(b [16]byte, order binary.ByteOrder) GUID { + var g GUID + g.Data1 = order.Uint32(b[0:4]) + g.Data2 = order.Uint16(b[4:6]) + g.Data3 = order.Uint16(b[6:8]) + copy(g.Data4[:], b[8:16]) + return g +} + +func (g GUID) toArray(order binary.ByteOrder) [16]byte { + b := [16]byte{} + order.PutUint32(b[0:4], g.Data1) + order.PutUint16(b[4:6], g.Data2) + order.PutUint16(b[6:8], g.Data3) + copy(b[8:16], g.Data4[:]) + return b +} + +// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. +func FromArray(b [16]byte) GUID { + return fromArray(b, binary.BigEndian) +} + +// ToArray returns an array of 16 bytes representing the GUID in big-endian +// encoding. +func (g GUID) ToArray() [16]byte { + return g.toArray(binary.BigEndian) +} + +// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. +func FromWindowsArray(b [16]byte) GUID { + return fromArray(b, binary.LittleEndian) +} + +// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows +// encoding. +func (g GUID) ToWindowsArray() [16]byte { + return g.toArray(binary.LittleEndian) +} + +func (g GUID) String() string { return fmt.Sprintf( "%08x-%04x-%04x-%04x-%012x", g.Data1, @@ -53,58 +136,100 @@ func (g *GUID) String() string { // FromString parses a string containing a GUID and returns the GUID. The only // format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` // format. -func FromString(s string) (*GUID, error) { +func FromString(s string) (GUID, error) { if len(s) != 36 { - return nil, errors.New("invalid GUID format (length)") + return GUID{}, fmt.Errorf("invalid GUID %q", s) } if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return nil, errors.New("invalid GUID format (dashes)") + return GUID{}, fmt.Errorf("invalid GUID %q", s) } var g GUID data1, err := strconv.ParseUint(s[0:8], 16, 32) if err != nil { - return nil, errors.Wrap(err, "invalid GUID format (Data1)") + return GUID{}, fmt.Errorf("invalid GUID %q", s) } g.Data1 = uint32(data1) data2, err := strconv.ParseUint(s[9:13], 16, 16) if err != nil { - return nil, errors.Wrap(err, "invalid GUID format (Data2)") + return GUID{}, fmt.Errorf("invalid GUID %q", s) } g.Data2 = uint16(data2) data3, err := strconv.ParseUint(s[14:18], 16, 16) if err != nil { - return nil, errors.Wrap(err, "invalid GUID format (Data3)") + return GUID{}, fmt.Errorf("invalid GUID %q", s) } g.Data3 = uint16(data3) for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { v, err := strconv.ParseUint(s[x:x+2], 16, 8) if err != nil { - return nil, errors.Wrap(err, "invalid GUID format (Data4)") + return GUID{}, fmt.Errorf("invalid GUID %q", s) } g.Data4[i] = uint8(v) } - return &g, nil + return g, nil } -// MarshalJSON marshals the GUID to JSON representation and returns it as a -// slice of bytes. -func (g *GUID) MarshalJSON() ([]byte, error) { - return json.Marshal(g.String()) +func (g *GUID) setVariant(v Variant) { + d := g.Data4[0] + switch v { + case VariantNCS: + d = (d & 0x7f) + case VariantRFC4122: + d = (d & 0x3f) | 0x80 + case VariantMicrosoft: + d = (d & 0x1f) | 0xc0 + case VariantFuture: + d = (d & 0x0f) | 0xe0 + case VariantUnknown: + fallthrough + default: + panic(fmt.Sprintf("invalid variant: %d", v)) + } + g.Data4[0] = d } -// UnmarshalJSON unmarshals a GUID from JSON representation and sets itself to -// the unmarshaled GUID. -func (g *GUID) UnmarshalJSON(data []byte) error { - g2, err := FromString(strings.Trim(string(data), "\"")) +// Variant returns the GUID variant, as defined in RFC 4122. +func (g GUID) Variant() Variant { + b := g.Data4[0] + if b&0x80 == 0 { + return VariantNCS + } else if b&0xc0 == 0x80 { + return VariantRFC4122 + } else if b&0xe0 == 0xc0 { + return VariantMicrosoft + } else if b&0xe0 == 0xe0 { + return VariantFuture + } + return VariantUnknown +} + +func (g *GUID) setVersion(v Version) { + g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) +} + +// Version returns the GUID version, as defined in RFC 4122. +func (g GUID) Version() Version { + return Version((g.Data3 & 0xF000) >> 12) +} + +// MarshalText returns the textual representation of the GUID. +func (g GUID) MarshalText() ([]byte, error) { + return []byte(g.String()), nil +} + +// UnmarshalText takes the textual representation of a GUID, and unmarhals it +// into this GUID. +func (g *GUID) UnmarshalText(text []byte) error { + g2, err := FromString(string(text)) if err != nil { return err } - *g = *g2 + *g = g2 return nil } diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go index 20d64cf41..5cb52bc74 100644 --- a/vendor/github.com/Microsoft/go-winio/syscall.go +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -1,3 +1,3 @@ package winio -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 0d15441d1..e26b01faf 100644 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -38,6 +38,7 @@ func errnoErr(e syscall.Errno) error { var ( modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") modntdll = windows.NewLazySystemDLL("ntdll.dll") modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") @@ -45,6 +46,7 @@ var ( procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procCreateFileW = modkernel32.NewProc("CreateFileW") @@ -73,6 +75,7 @@ var ( procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") procBackupRead = modkernel32.NewProc("BackupRead") procBackupWrite = modkernel32.NewProc("BackupWrite") + procbind = modws2_32.NewProc("bind") ) func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { @@ -124,6 +127,24 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro return } +func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) if r1 == 0 { @@ -527,3 +548,15 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p } return } + +func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/containerd/cgroups/blkio.go b/vendor/github.com/containerd/cgroups/blkio.go index 875fb5546..7c498def6 100644 --- a/vendor/github.com/containerd/cgroups/blkio.go +++ b/vendor/github.com/containerd/cgroups/blkio.go @@ -86,6 +86,7 @@ func (b *blkioController) Stat(path string, stats *Metrics) error { } // Try to read CFQ stats available on all CFQ enabled kernels first if _, err := os.Lstat(filepath.Join(b.Path(path), fmt.Sprintf("blkio.io_serviced_recursive"))); err == nil { + settings = []blkioStatSettings{} settings = append(settings, blkioStatSettings{ name: "sectors_recursive", diff --git a/vendor/github.com/containerd/cgroups/cgroup.go b/vendor/github.com/containerd/cgroups/cgroup.go index e3ef07651..53866685b 100644 --- a/vendor/github.com/containerd/cgroups/cgroup.go +++ b/vendor/github.com/containerd/cgroups/cgroup.go @@ -497,6 +497,9 @@ func (c *cgroup) MoveTo(destination Cgroup) error { } for _, p := range processes { if err := destination.Add(p); err != nil { + if strings.Contains(err.Error(), "no such process") { + continue + } return err } } diff --git a/vendor/github.com/containerd/cgroups/memory.go b/vendor/github.com/containerd/cgroups/memory.go index ce15ca2b9..b8b5fe7ea 100644 --- a/vendor/github.com/containerd/cgroups/memory.go +++ b/vendor/github.com/containerd/cgroups/memory.go @@ -281,6 +281,10 @@ func getMemorySettings(resources *specs.LinuxResources) []memorySettings { name: "limit_in_bytes", value: mem.Limit, }, + { + name: "soft_limit_in_bytes", + value: mem.Reservation, + }, { name: "memsw.limit_in_bytes", value: mem.Swap, diff --git a/vendor/github.com/containerd/cgroups/metrics.pb.go b/vendor/github.com/containerd/cgroups/metrics.pb.go index 6043a8f7d..7dd7f6f3c 100644 --- a/vendor/github.com/containerd/cgroups/metrics.pb.go +++ b/vendor/github.com/containerd/cgroups/metrics.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/cgroups/metrics.proto -// DO NOT EDIT! /* Package cgroups is a generated protocol buffer package. @@ -21,6 +20,7 @@ BlkIOEntry RdmaStat RdmaEntry + NetworkStat */ package cgroups @@ -52,6 +52,7 @@ type Metrics struct { Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory" json:"memory,omitempty"` Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio" json:"blkio,omitempty"` Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma" json:"rdma,omitempty"` + Network []*NetworkStat `protobuf:"bytes,7,rep,name=network" json:"network,omitempty"` } func (m *Metrics) Reset() { *m = Metrics{} } @@ -209,6 +210,22 @@ func (m *RdmaEntry) Reset() { *m = RdmaEntry{} } func (*RdmaEntry) ProtoMessage() {} func (*RdmaEntry) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{11} } +type NetworkStat struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"` + RxPackets uint64 `protobuf:"varint,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"` + RxErrors uint64 `protobuf:"varint,4,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"` + RxDropped uint64 `protobuf:"varint,5,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"` + TxBytes uint64 `protobuf:"varint,6,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"` + TxPackets uint64 `protobuf:"varint,7,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"` + TxErrors uint64 `protobuf:"varint,8,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"` + TxDropped uint64 `protobuf:"varint,9,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"` +} + +func (m *NetworkStat) Reset() { *m = NetworkStat{} } +func (*NetworkStat) ProtoMessage() {} +func (*NetworkStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{12} } + func init() { proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics") proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat") @@ -222,6 +239,7 @@ func init() { proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry") proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat") proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry") + proto.RegisterType((*NetworkStat)(nil), "io.containerd.cgroups.v1.NetworkStat") } func (m *Metrics) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -300,6 +318,18 @@ func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { } i += n5 } + if len(m.Network) > 0 { + for _, msg := range m.Network { + dAtA[i] = 0x3a + i++ + i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -389,21 +419,21 @@ func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size())) - n5, err := m.Usage.MarshalTo(dAtA[i:]) + n6, err := m.Usage.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n6 } if m.Throttling != nil { dAtA[i] = 0x12 i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Throttling.Size())) - n6, err := m.Throttling.MarshalTo(dAtA[i:]) + n7, err := m.Throttling.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 } return i, nil } @@ -439,21 +469,21 @@ func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintMetrics(dAtA, i, uint64(m.User)) } if len(m.PerCPU) > 0 { - dAtA8 := make([]byte, len(m.PerCPU)*10) - var j7 int + dAtA9 := make([]byte, len(m.PerCPU)*10) + var j8 int for _, num := range m.PerCPU { for num >= 1<<7 { - dAtA8[j7] = uint8(uint64(num)&0x7f | 0x80) + dAtA9[j8] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j7++ + j8++ } - dAtA8[j7] = uint8(num) - j7++ + dAtA9[j8] = uint8(num) + j8++ } dAtA[i] = 0x22 i++ - i = encodeVarintMetrics(dAtA, i, uint64(j7)) - i += copy(dAtA[i:], dAtA8[:j7]) + i = encodeVarintMetrics(dAtA, i, uint64(j8)) + i += copy(dAtA[i:], dAtA9[:j8]) } return i, nil } @@ -706,11 +736,11 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2 i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size())) - n9, err := m.Usage.MarshalTo(dAtA[i:]) + n10, err := m.Usage.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 } if m.Swap != nil { dAtA[i] = 0x92 @@ -718,11 +748,11 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2 i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Swap.Size())) - n10, err := m.Swap.MarshalTo(dAtA[i:]) + n11, err := m.Swap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 } if m.Kernel != nil { dAtA[i] = 0x9a @@ -730,11 +760,11 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2 i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel.Size())) - n11, err := m.Kernel.MarshalTo(dAtA[i:]) + n12, err := m.Kernel.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n12 } if m.KernelTCP != nil { dAtA[i] = 0xa2 @@ -742,11 +772,11 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2 i++ i = encodeVarintMetrics(dAtA, i, uint64(m.KernelTCP.Size())) - n12, err := m.KernelTCP.MarshalTo(dAtA[i:]) + n13, err := m.KernelTCP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n13 } return i, nil } @@ -766,7 +796,6 @@ func (m *MemoryEntry) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Limit != 0 { dAtA[i] = 0x8 i++ @@ -1025,24 +1054,70 @@ func (m *RdmaEntry) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Metrics(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 +func (m *NetworkStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func encodeFixed32Metrics(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 + +func (m *NetworkStat) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.RxBytes != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.RxBytes)) + } + if m.RxPackets != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.RxPackets)) + } + if m.RxErrors != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.RxErrors)) + } + if m.RxDropped != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.RxDropped)) + } + if m.TxBytes != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TxBytes)) + } + if m.TxPackets != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TxPackets)) + } + if m.TxErrors != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TxErrors)) + } + if m.TxDropped != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TxDropped)) + } + return i, nil } + func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1081,6 +1156,12 @@ func (m *Metrics) Size() (n int) { l = m.Rdma.Size() n += 1 + l + sovMetrics(uint64(l)) } + if len(m.Network) > 0 { + for _, e := range m.Network { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } return n } @@ -1413,6 +1494,40 @@ func (m *RdmaEntry) Size() (n int) { return n } +func (m *NetworkStat) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.RxBytes != 0 { + n += 1 + sovMetrics(uint64(m.RxBytes)) + } + if m.RxPackets != 0 { + n += 1 + sovMetrics(uint64(m.RxPackets)) + } + if m.RxErrors != 0 { + n += 1 + sovMetrics(uint64(m.RxErrors)) + } + if m.RxDropped != 0 { + n += 1 + sovMetrics(uint64(m.RxDropped)) + } + if m.TxBytes != 0 { + n += 1 + sovMetrics(uint64(m.TxBytes)) + } + if m.TxPackets != 0 { + n += 1 + sovMetrics(uint64(m.TxPackets)) + } + if m.TxErrors != 0 { + n += 1 + sovMetrics(uint64(m.TxErrors)) + } + if m.TxDropped != 0 { + n += 1 + sovMetrics(uint64(m.TxDropped)) + } + return n +} + func sovMetrics(x uint64) (n int) { for { n++ @@ -1437,6 +1552,7 @@ func (this *Metrics) String() string { `Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "MemoryStat", "MemoryStat", 1) + `,`, `Blkio:` + strings.Replace(fmt.Sprintf("%v", this.Blkio), "BlkIOStat", "BlkIOStat", 1) + `,`, `Rdma:` + strings.Replace(fmt.Sprintf("%v", this.Rdma), "RdmaStat", "RdmaStat", 1) + `,`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "NetworkStat", "NetworkStat", 1) + `,`, `}`, }, "") return s @@ -1613,6 +1729,24 @@ func (this *RdmaEntry) String() string { }, "") return s } +func (this *NetworkStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkStat{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RxBytes:` + fmt.Sprintf("%v", this.RxBytes) + `,`, + `RxPackets:` + fmt.Sprintf("%v", this.RxPackets) + `,`, + `RxErrors:` + fmt.Sprintf("%v", this.RxErrors) + `,`, + `RxDropped:` + fmt.Sprintf("%v", this.RxDropped) + `,`, + `TxBytes:` + fmt.Sprintf("%v", this.TxBytes) + `,`, + `TxPackets:` + fmt.Sprintf("%v", this.TxPackets) + `,`, + `TxErrors:` + fmt.Sprintf("%v", this.TxErrors) + `,`, + `TxDropped:` + fmt.Sprintf("%v", this.TxDropped) + `,`, + `}`, + }, "") + return s +} func valueToStringMetrics(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -1624,7 +1758,6 @@ func valueToStringMetrics(v interface{}) string { func (m *Metrics) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 - for iNdEx < l { preIndex := iNdEx var wire uint64 @@ -1847,6 +1980,37 @@ func (m *Metrics) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Network = append(m.Network, &NetworkStat{}) + if err := m.Network[len(m.Network)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetrics(dAtA[iNdEx:]) @@ -4092,7 +4256,237 @@ func (m *RdmaEntry) Unmarshal(dAtA []byte) error { } return nil } +func (m *NetworkStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxBytes", wireType) + } + m.RxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxBytes |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxPackets", wireType) + } + m.RxPackets = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxPackets |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxErrors", wireType) + } + m.RxErrors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxErrors |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxDropped", wireType) + } + m.RxDropped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxDropped |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxBytes", wireType) + } + m.TxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxBytes |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxPackets", wireType) + } + m.TxPackets = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxPackets |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxErrors", wireType) + } + m.TxErrors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxErrors |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxDropped", wireType) + } + m.TxDropped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxDropped |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipMetrics(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 @@ -4201,88 +4595,102 @@ var ( func init() { proto.RegisterFile("github.com/containerd/cgroups/metrics.proto", fileDescriptorMetrics) } var fileDescriptorMetrics = []byte{ - // 1325 bytes of a gzipped FileDescriptorProto + // 1549 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x57, 0x4d, 0x6f, 0x1b, 0xb7, - 0x16, 0x8d, 0xac, 0xb1, 0x3e, 0xae, 0x6c, 0xc7, 0xa6, 0x13, 0x67, 0xec, 0x97, 0x27, 0x29, 0xb2, - 0xfd, 0x9e, 0x5b, 0x03, 0x32, 0x9a, 0x02, 0x41, 0x93, 0xa6, 0x28, 0x22, 0xb7, 0x41, 0x83, 0xd6, - 0x88, 0x32, 0xb2, 0x91, 0x76, 0x35, 0x18, 0x8d, 0x98, 0x31, 0xe3, 0xd1, 0x70, 0xc2, 0xe1, 0xc8, - 0x71, 0x57, 0xdd, 0xf5, 0x37, 0xf5, 0x1f, 0x64, 0xd9, 0x4d, 0x81, 0x76, 0x63, 0x34, 0xfa, 0x25, - 0x05, 0x2f, 0xe7, 0x4b, 0x49, 0xdc, 0x40, 0xbb, 0xb9, 0xbc, 0xe7, 0x1c, 0x5e, 0x5e, 0x1e, 0x8a, - 0x14, 0xec, 0x7b, 0x4c, 0x9e, 0xc6, 0xc3, 0xae, 0xcb, 0xc7, 0x07, 0x2e, 0x0f, 0xa4, 0xc3, 0x02, - 0x2a, 0x46, 0x07, 0xae, 0x27, 0x78, 0x1c, 0x46, 0x07, 0x63, 0x2a, 0x05, 0x73, 0xa3, 0x6e, 0x28, - 0xb8, 0xe4, 0xc4, 0x64, 0xbc, 0x9b, 0x83, 0xba, 0x09, 0xa8, 0x3b, 0xf9, 0x6c, 0xeb, 0x86, 0xc7, - 0x3d, 0x8e, 0xa0, 0x03, 0xf5, 0xa5, 0xf1, 0x9d, 0xdf, 0x16, 0xa0, 0x7a, 0xa4, 0x15, 0xc8, 0xd7, - 0x50, 0x3d, 0x8d, 0x3d, 0x2a, 0xfd, 0xa1, 0x59, 0x6a, 0x97, 0xf7, 0x1a, 0x77, 0x77, 0xbb, 0x57, - 0xa9, 0x75, 0xbf, 0xd3, 0xc0, 0x81, 0x74, 0xa4, 0x95, 0xb2, 0xc8, 0x3d, 0x30, 0x42, 0x36, 0x8a, - 0xcc, 0x85, 0x76, 0x69, 0xaf, 0x71, 0xb7, 0x73, 0x35, 0xbb, 0xcf, 0x46, 0x11, 0x52, 0x11, 0x4f, - 0x1e, 0x42, 0xd9, 0x0d, 0x63, 0xb3, 0x8c, 0xb4, 0x3b, 0x57, 0xd3, 0x0e, 0xfb, 0x27, 0x8a, 0xd5, - 0xab, 0x4e, 0x2f, 0x5b, 0xe5, 0xc3, 0xfe, 0x89, 0xa5, 0x68, 0xe4, 0x21, 0x54, 0xc6, 0x74, 0xcc, - 0xc5, 0x85, 0x69, 0xa0, 0xc0, 0xce, 0xd5, 0x02, 0x47, 0x88, 0xc3, 0x99, 0x13, 0x0e, 0xb9, 0x0f, - 0x8b, 0x43, 0xff, 0x8c, 0x71, 0x73, 0x11, 0xc9, 0xdb, 0x57, 0x93, 0x7b, 0xfe, 0xd9, 0x93, 0xa7, - 0xc8, 0xd5, 0x8c, 0xce, 0x19, 0x34, 0x0a, 0x6d, 0x20, 0x37, 0x60, 0x31, 0x8e, 0x1c, 0x8f, 0x9a, - 0xa5, 0x76, 0x69, 0xcf, 0xb0, 0x74, 0x40, 0x56, 0xa1, 0x3c, 0x76, 0x5e, 0x63, 0x4b, 0x0c, 0x4b, - 0x7d, 0x12, 0x13, 0xaa, 0x2f, 0x1c, 0xe6, 0xbb, 0x81, 0xc4, 0x15, 0x1b, 0x56, 0x1a, 0x92, 0x2d, - 0xa8, 0x85, 0x8e, 0x47, 0x23, 0xf6, 0x33, 0xc5, 0xb5, 0xd4, 0xad, 0x2c, 0xee, 0x3c, 0x80, 0x5a, - 0xda, 0x35, 0xa5, 0xe0, 0xc6, 0x42, 0xd0, 0x40, 0x26, 0x73, 0xa5, 0xa1, 0xaa, 0xc1, 0x67, 0x63, - 0x26, 0x93, 0xf9, 0x74, 0xd0, 0xf9, 0xb5, 0x04, 0xd5, 0xa4, 0x77, 0xe4, 0x8b, 0x62, 0x95, 0xff, - 0xba, 0x49, 0x87, 0xfd, 0x93, 0x13, 0x85, 0x4c, 0x57, 0xd2, 0x03, 0x90, 0xa7, 0x82, 0x4b, 0xe9, - 0xb3, 0xc0, 0xfb, 0xf8, 0x1e, 0x1f, 0x6b, 0x2c, 0xb5, 0x0a, 0xac, 0xce, 0x2b, 0xa8, 0xa5, 0xb2, - 0xaa, 0x56, 0xc9, 0xa5, 0xe3, 0xa7, 0xfd, 0xc2, 0x80, 0x6c, 0x40, 0xe5, 0x8c, 0x8a, 0x80, 0xfa, - 0xc9, 0x12, 0x92, 0x88, 0x10, 0x30, 0xe2, 0x88, 0x8a, 0xa4, 0x65, 0xf8, 0x4d, 0xb6, 0xa1, 0x1a, - 0x52, 0x61, 0x2b, 0xef, 0x18, 0xed, 0xf2, 0x9e, 0xd1, 0x83, 0xe9, 0x65, 0xab, 0xd2, 0xa7, 0x42, - 0x79, 0xa3, 0x12, 0x52, 0x71, 0x18, 0xc6, 0x9d, 0xd7, 0x50, 0x4b, 0x4b, 0x51, 0x8d, 0x0b, 0xa9, - 0x60, 0x7c, 0x14, 0xa5, 0x8d, 0x4b, 0x42, 0xb2, 0x0f, 0x6b, 0x49, 0x99, 0x74, 0x64, 0xa7, 0x18, - 0x5d, 0xc1, 0x6a, 0x96, 0xe8, 0x27, 0xe0, 0x5d, 0x58, 0xc9, 0xc1, 0x92, 0x8d, 0x69, 0x52, 0xd5, - 0x72, 0x36, 0x7a, 0xcc, 0xc6, 0xb4, 0xf3, 0x57, 0x03, 0x20, 0x77, 0x9c, 0x5a, 0xaf, 0xeb, 0xb8, - 0xa7, 0x99, 0x3f, 0x30, 0x20, 0x9b, 0x50, 0x16, 0x51, 0x32, 0x95, 0x36, 0xb6, 0x35, 0x18, 0x58, - 0x6a, 0x8c, 0xfc, 0x0f, 0x6a, 0x22, 0x8a, 0x6c, 0x75, 0xba, 0xf4, 0x04, 0xbd, 0xc6, 0xf4, 0xb2, - 0x55, 0xb5, 0x06, 0x03, 0x65, 0x3b, 0xab, 0x2a, 0xa2, 0x48, 0x7d, 0x90, 0x16, 0x34, 0xc6, 0x4e, - 0x18, 0xd2, 0x91, 0xfd, 0x82, 0xf9, 0xda, 0x39, 0x86, 0x05, 0x7a, 0xe8, 0x31, 0xf3, 0xb1, 0xd3, - 0x23, 0x26, 0xe4, 0x05, 0x7a, 0xdc, 0xb0, 0x74, 0x40, 0x6e, 0x43, 0xfd, 0x5c, 0x30, 0x49, 0x87, - 0x8e, 0x7b, 0x66, 0x56, 0x30, 0x93, 0x0f, 0x10, 0x13, 0x6a, 0xa1, 0x67, 0x87, 0x9e, 0xcd, 0x02, - 0xb3, 0xaa, 0x77, 0x22, 0xf4, 0xfa, 0xde, 0x93, 0x80, 0x6c, 0x41, 0x5d, 0x67, 0x78, 0x2c, 0xcd, - 0x5a, 0xd2, 0x46, 0xaf, 0xef, 0x3d, 0x8d, 0x25, 0xd9, 0x44, 0xd6, 0x0b, 0x27, 0xf6, 0xa5, 0x59, - 0x4f, 0x53, 0x8f, 0x55, 0x48, 0xda, 0xb0, 0x14, 0x7a, 0xf6, 0xd8, 0x79, 0x99, 0xa4, 0x41, 0x97, - 0x19, 0x7a, 0x47, 0xce, 0x4b, 0x8d, 0xd8, 0x86, 0x65, 0x16, 0x38, 0xae, 0x64, 0x13, 0x6a, 0x3b, - 0x01, 0x0f, 0xcc, 0x06, 0x42, 0x96, 0xd2, 0xc1, 0x47, 0x01, 0x0f, 0xd4, 0x62, 0x8b, 0x90, 0x25, - 0xad, 0x52, 0x00, 0x14, 0x55, 0xb0, 0x1f, 0xcb, 0xb3, 0x2a, 0xd8, 0x91, 0x5c, 0x05, 0x21, 0x2b, - 0x45, 0x15, 0x04, 0xb4, 0xa1, 0x11, 0x07, 0x74, 0xc2, 0x5c, 0xe9, 0x0c, 0x7d, 0x6a, 0x5e, 0x47, - 0x40, 0x71, 0x88, 0x3c, 0x80, 0xcd, 0x53, 0x46, 0x85, 0x23, 0xdc, 0x53, 0xe6, 0x3a, 0xbe, 0xad, - 0x7f, 0x4f, 0x6c, 0x7d, 0xfc, 0x56, 0x11, 0x7f, 0xab, 0x08, 0xd0, 0x4e, 0xf8, 0x41, 0xa5, 0xc9, - 0x3d, 0x98, 0x49, 0xd9, 0xd1, 0xb9, 0x13, 0x26, 0xcc, 0x35, 0x64, 0xde, 0x2c, 0xa6, 0x07, 0xe7, - 0x4e, 0xa8, 0x79, 0x2d, 0x68, 0xe0, 0x29, 0xb1, 0xb5, 0x91, 0x88, 0x2e, 0x1b, 0x87, 0x0e, 0xd1, - 0x4d, 0x9f, 0x40, 0x5d, 0x03, 0x94, 0xa7, 0xd6, 0xd1, 0x33, 0x4b, 0xd3, 0xcb, 0x56, 0xed, 0x58, - 0x0d, 0x2a, 0x63, 0xd5, 0x30, 0x6d, 0x45, 0x11, 0xb9, 0x07, 0x2b, 0x19, 0x54, 0x7b, 0xec, 0x06, - 0xe2, 0x57, 0xa7, 0x97, 0xad, 0xa5, 0x14, 0x8f, 0x46, 0x5b, 0x4a, 0x39, 0xe8, 0xb6, 0x4f, 0x61, - 0x4d, 0xf3, 0x8a, 0x9e, 0xbb, 0x89, 0x95, 0x5c, 0xc7, 0xc4, 0x51, 0x6e, 0xbc, 0xac, 0x5e, 0x6d, - 0xbf, 0x8d, 0x42, 0xbd, 0xdf, 0xa0, 0x07, 0xff, 0x0f, 0x9a, 0x63, 0xe7, 0x4e, 0xbc, 0x85, 0x20, - 0x5d, 0xdb, 0xf3, 0xcc, 0x8e, 0xdb, 0x69, 0xb5, 0x99, 0x29, 0x4d, 0xbd, 0x25, 0x38, 0xda, 0xd7, - 0xce, 0xdc, 0x4d, 0xd5, 0x72, 0x7f, 0x6e, 0xea, 0xcd, 0xcf, 0x50, 0xca, 0xa4, 0x3b, 0x05, 0x2d, - 0xed, 0xc5, 0xad, 0x19, 0x94, 0x76, 0xe3, 0x3e, 0x90, 0x0c, 0x95, 0xbb, 0xf6, 0x3f, 0x85, 0x85, - 0xf6, 0x73, 0xeb, 0x76, 0x61, 0x5d, 0x83, 0x67, 0x0d, 0x7c, 0x1b, 0xd1, 0xba, 0x5f, 0x4f, 0x8a, - 0x2e, 0xce, 0x9a, 0x58, 0x44, 0xff, 0xb7, 0xa0, 0xfd, 0x28, 0xc7, 0xbe, 0xaf, 0x8d, 0x2d, 0x6f, - 0x7e, 0x40, 0x1b, 0x9b, 0xfe, 0xae, 0x36, 0xa2, 0x5b, 0xef, 0x69, 0x23, 0x76, 0x3f, 0xc5, 0x16, - 0xcd, 0xde, 0x4e, 0x7e, 0xf6, 0x54, 0xe2, 0xa4, 0xe0, 0xf8, 0x2f, 0xd3, 0xab, 0xe3, 0x0e, 0xfe, - 0xf6, 0xef, 0x7e, 0xec, 0x9e, 0xfd, 0x36, 0x90, 0xe2, 0x22, 0xbd, 0x3d, 0xee, 0x83, 0xa1, 0x5c, - 0x6e, 0x76, 0xe6, 0xe1, 0x22, 0x85, 0x7c, 0x95, 0x5d, 0x09, 0xdb, 0xf3, 0x90, 0xd3, 0x9b, 0x63, - 0x00, 0xa0, 0xbf, 0x6c, 0xe9, 0x86, 0xe6, 0xce, 0x1c, 0x12, 0xbd, 0xe5, 0xe9, 0x65, 0xab, 0xfe, - 0x3d, 0x92, 0x8f, 0x0f, 0xfb, 0x56, 0x5d, 0xeb, 0x1c, 0xbb, 0x61, 0x87, 0x42, 0xa3, 0x00, 0xcc, - 0xef, 0xdd, 0x52, 0xe1, 0xde, 0xcd, 0x5f, 0x04, 0x0b, 0x1f, 0x78, 0x11, 0x94, 0x3f, 0xf8, 0x22, - 0x30, 0x66, 0x5e, 0x04, 0x9d, 0x3f, 0x16, 0xa1, 0x9e, 0xbd, 0x3b, 0x88, 0x03, 0x5b, 0x8c, 0xdb, - 0x11, 0x15, 0x13, 0xe6, 0x52, 0x7b, 0x78, 0x21, 0x69, 0x64, 0x0b, 0xea, 0xc6, 0x22, 0x62, 0x13, - 0x9a, 0xbc, 0xd9, 0x76, 0x3e, 0xf2, 0x80, 0xd1, 0xbd, 0xb9, 0xc5, 0xf8, 0x40, 0xcb, 0xf4, 0x94, - 0x8a, 0x95, 0x8a, 0x90, 0x1f, 0xe1, 0x66, 0x3e, 0xc5, 0xa8, 0xa0, 0xbe, 0x30, 0x87, 0xfa, 0x7a, - 0xa6, 0x3e, 0xca, 0x95, 0x8f, 0x61, 0x9d, 0x71, 0xfb, 0x55, 0x4c, 0xe3, 0x19, 0xdd, 0xf2, 0x1c, - 0xba, 0x6b, 0x8c, 0x3f, 0x43, 0x7e, 0xae, 0x6a, 0xc3, 0x66, 0xa1, 0x25, 0xea, 0x2e, 0x2e, 0x68, - 0x1b, 0x73, 0x68, 0x6f, 0x64, 0x35, 0xab, 0xbb, 0x3b, 0x9f, 0xe0, 0x27, 0xd8, 0x60, 0xdc, 0x3e, - 0x77, 0x98, 0x7c, 0x57, 0x7d, 0x71, 0xbe, 0x8e, 0x3c, 0x77, 0x98, 0x9c, 0x95, 0xd6, 0x1d, 0x19, - 0x53, 0xe1, 0xcd, 0x74, 0xa4, 0x32, 0x5f, 0x47, 0x8e, 0x90, 0x9f, 0xab, 0xf6, 0x61, 0x8d, 0xf1, - 0x77, 0x6b, 0xad, 0xce, 0xa1, 0x79, 0x9d, 0xf1, 0xd9, 0x3a, 0x9f, 0xc1, 0x5a, 0x44, 0x5d, 0xc9, - 0x45, 0xd1, 0x6d, 0xb5, 0x39, 0x14, 0x57, 0x13, 0x7a, 0x26, 0xd9, 0x99, 0x00, 0xe4, 0x79, 0xb2, - 0x02, 0x0b, 0x3c, 0xc4, 0xa3, 0x53, 0xb7, 0x16, 0x78, 0xa8, 0xde, 0x80, 0x23, 0xf5, 0xb3, 0xa3, - 0x0f, 0x4e, 0xdd, 0x4a, 0x22, 0x75, 0x9e, 0xc6, 0xce, 0x4b, 0x9e, 0x3e, 0x02, 0x75, 0x80, 0xa3, - 0x2c, 0xe0, 0x22, 0x39, 0x3b, 0x3a, 0x50, 0xa3, 0x13, 0xc7, 0x8f, 0x69, 0xfa, 0xe6, 0xc1, 0xa0, - 0x67, 0xbe, 0x79, 0xdb, 0xbc, 0xf6, 0xe7, 0xdb, 0xe6, 0xb5, 0x5f, 0xa6, 0xcd, 0xd2, 0x9b, 0x69, - 0xb3, 0xf4, 0xfb, 0xb4, 0x59, 0xfa, 0x7b, 0xda, 0x2c, 0x0d, 0x2b, 0xf8, 0x7f, 0xe8, 0xf3, 0x7f, - 0x02, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x21, 0x0b, 0xcd, 0x6e, 0x0d, 0x00, 0x00, + 0x16, 0x8d, 0x2c, 0xd9, 0xd2, 0x5c, 0xd9, 0x8e, 0x4d, 0x27, 0xce, 0xd8, 0x49, 0x2c, 0x47, 0xb6, + 0xdf, 0xf3, 0x7b, 0x06, 0x64, 0xbc, 0x3c, 0x20, 0x68, 0xd2, 0x04, 0x45, 0xe4, 0x24, 0x48, 0xd0, + 0xba, 0x51, 0x46, 0x36, 0xd2, 0xae, 0x06, 0xd4, 0x88, 0x19, 0xd1, 0x96, 0x86, 0x13, 0x0e, 0xc7, + 0x96, 0xbb, 0xea, 0xa2, 0x40, 0x57, 0xfd, 0x33, 0xfd, 0x15, 0x59, 0x76, 0x53, 0xa0, 0xdd, 0x18, + 0x8d, 0x7e, 0x49, 0x41, 0x72, 0x3e, 0xa8, 0x24, 0x8e, 0xab, 0xdd, 0x90, 0x3c, 0xe7, 0xdc, 0xcb, + 0x3b, 0x87, 0xc3, 0x3b, 0xb0, 0xe3, 0x53, 0xd1, 0x8b, 0x3b, 0x0d, 0x8f, 0x0d, 0x76, 0x3d, 0x16, + 0x08, 0x4c, 0x03, 0xc2, 0xbb, 0xbb, 0x9e, 0xcf, 0x59, 0x1c, 0x46, 0xbb, 0x03, 0x22, 0x38, 0xf5, + 0xa2, 0x46, 0xc8, 0x99, 0x60, 0xc8, 0xa6, 0xac, 0x91, 0x83, 0x1a, 0x09, 0xa8, 0x71, 0xf2, 0xbf, + 0xd5, 0x6b, 0x3e, 0xf3, 0x99, 0x02, 0xed, 0xca, 0x27, 0x8d, 0xaf, 0xff, 0x5a, 0x84, 0xf2, 0xbe, + 0x56, 0x40, 0x5f, 0x41, 0xb9, 0x17, 0xfb, 0x44, 0xf4, 0x3b, 0x76, 0x61, 0xbd, 0xb8, 0x5d, 0xbd, + 0xbb, 0xd5, 0xb8, 0x48, 0xad, 0xf1, 0x5c, 0x03, 0xdb, 0x02, 0x0b, 0x27, 0x65, 0xa1, 0x7b, 0x50, + 0x0a, 0x69, 0x37, 0xb2, 0xa7, 0xd6, 0x0b, 0xdb, 0xd5, 0xbb, 0xf5, 0x8b, 0xd9, 0x2d, 0xda, 0x8d, + 0x14, 0x55, 0xe1, 0xd1, 0x43, 0x28, 0x7a, 0x61, 0x6c, 0x17, 0x15, 0xed, 0xce, 0xc5, 0xb4, 0xbd, + 0xd6, 0xa1, 0x64, 0x35, 0xcb, 0xa3, 0xf3, 0x5a, 0x71, 0xaf, 0x75, 0xe8, 0x48, 0x1a, 0x7a, 0x08, + 0x33, 0x03, 0x32, 0x60, 0xfc, 0xcc, 0x2e, 0x29, 0x81, 0xcd, 0x8b, 0x05, 0xf6, 0x15, 0x4e, 0x45, + 0x4e, 0x38, 0xe8, 0x3e, 0x4c, 0x77, 0xfa, 0xc7, 0x94, 0xd9, 0xd3, 0x8a, 0xbc, 0x71, 0x31, 0xb9, + 0xd9, 0x3f, 0x7e, 0xf1, 0x52, 0x71, 0x35, 0x43, 0x6e, 0x97, 0x77, 0x07, 0xd8, 0x9e, 0xb9, 0x6c, + 0xbb, 0x4e, 0x77, 0x80, 0xf5, 0x76, 0x25, 0x5e, 0xd6, 0x39, 0x20, 0xe2, 0x94, 0xf1, 0x63, 0xbb, + 0x7c, 0x59, 0x9d, 0xbf, 0xd5, 0x40, 0x5d, 0xe7, 0x84, 0x55, 0x3f, 0x86, 0xaa, 0x51, 0x7f, 0x74, + 0x0d, 0xa6, 0xe3, 0x08, 0xfb, 0xc4, 0x2e, 0xac, 0x17, 0xb6, 0x4b, 0x8e, 0x1e, 0xa0, 0x05, 0x28, + 0x0e, 0xf0, 0x50, 0xbd, 0x8b, 0x92, 0x23, 0x1f, 0x91, 0x0d, 0xe5, 0x37, 0x98, 0xf6, 0xbd, 0x40, + 0xa8, 0x52, 0x97, 0x9c, 0x74, 0x88, 0x56, 0xa1, 0x12, 0x62, 0x9f, 0x44, 0xf4, 0x07, 0xa2, 0x8a, + 0x68, 0x39, 0xd9, 0xb8, 0xfe, 0x00, 0x2a, 0xe9, 0xeb, 0x92, 0x0a, 0x5e, 0xcc, 0x39, 0x09, 0x44, + 0x12, 0x2b, 0x1d, 0xca, 0x1c, 0xfa, 0x74, 0x40, 0x45, 0x12, 0x4f, 0x0f, 0xea, 0x3f, 0x17, 0xa0, + 0x9c, 0xbc, 0x34, 0xf4, 0x85, 0x99, 0xe5, 0x67, 0xcb, 0xb5, 0xd7, 0x3a, 0x3c, 0x94, 0xc8, 0x74, + 0x27, 0x4d, 0x00, 0xd1, 0xe3, 0x4c, 0x88, 0x3e, 0x0d, 0xfc, 0xcb, 0xcd, 0x75, 0xa0, 0xb1, 0xc4, + 0x31, 0x58, 0xf5, 0xb7, 0x50, 0x49, 0x65, 0x65, 0xae, 0x82, 0x09, 0xdc, 0x4f, 0xeb, 0xa5, 0x06, + 0x68, 0x19, 0x66, 0x8e, 0x09, 0x0f, 0x48, 0x3f, 0xd9, 0x42, 0x32, 0x42, 0x08, 0x4a, 0x71, 0x44, + 0x78, 0x52, 0x32, 0xf5, 0x8c, 0x36, 0xa0, 0x1c, 0x12, 0xee, 0x4a, 0xd3, 0x96, 0xd6, 0x8b, 0xdb, + 0xa5, 0x26, 0x8c, 0xce, 0x6b, 0x33, 0x2d, 0xc2, 0xa5, 0x29, 0x67, 0x42, 0xc2, 0xf7, 0xc2, 0xb8, + 0x3e, 0x84, 0x4a, 0x9a, 0x8a, 0x2c, 0x5c, 0x48, 0x38, 0x65, 0xdd, 0x28, 0x2d, 0x5c, 0x32, 0x44, + 0x3b, 0xb0, 0x98, 0xa4, 0x49, 0xba, 0x6e, 0x8a, 0xd1, 0x19, 0x2c, 0x64, 0x0b, 0xad, 0x04, 0xbc, + 0x05, 0xf3, 0x39, 0x58, 0xd0, 0x01, 0x49, 0xb2, 0x9a, 0xcb, 0x66, 0x0f, 0xe8, 0x80, 0xd4, 0xff, + 0xac, 0x02, 0xe4, 0x56, 0x97, 0xfb, 0xf5, 0xb0, 0xd7, 0xcb, 0xfc, 0xa1, 0x06, 0x68, 0x05, 0x8a, + 0x3c, 0x4a, 0x42, 0xe9, 0x13, 0xe5, 0xb4, 0xdb, 0x8e, 0x9c, 0x43, 0xff, 0x82, 0x0a, 0x8f, 0x22, + 0x57, 0x1e, 0x6b, 0x1d, 0xa0, 0x59, 0x1d, 0x9d, 0xd7, 0xca, 0x4e, 0xbb, 0x2d, 0x6d, 0xe7, 0x94, + 0x79, 0x14, 0xc9, 0x07, 0x54, 0x83, 0xea, 0x00, 0x87, 0x21, 0xe9, 0xba, 0x6f, 0x68, 0x5f, 0x3b, + 0xa7, 0xe4, 0x80, 0x9e, 0x7a, 0x46, 0xfb, 0xaa, 0xd2, 0x5d, 0xca, 0xc5, 0x99, 0x3a, 0x5c, 0x25, + 0x47, 0x0f, 0xd0, 0x2d, 0xb0, 0x4e, 0x39, 0x15, 0xa4, 0x83, 0xbd, 0x63, 0x75, 0x78, 0x4a, 0x4e, + 0x3e, 0x81, 0x6c, 0xa8, 0x84, 0xbe, 0x1b, 0xfa, 0x2e, 0x0d, 0xec, 0xb2, 0x7e, 0x13, 0xa1, 0xdf, + 0xf2, 0x5f, 0x04, 0x68, 0x15, 0x2c, 0xbd, 0xc2, 0x62, 0x61, 0x57, 0x92, 0x32, 0xfa, 0x2d, 0xff, + 0x65, 0x2c, 0xd0, 0x8a, 0x62, 0xbd, 0xc1, 0x71, 0x5f, 0xd8, 0x56, 0xba, 0xf4, 0x4c, 0x0e, 0xd1, + 0x3a, 0xcc, 0x86, 0xbe, 0x3b, 0xc0, 0x47, 0xc9, 0x32, 0xe8, 0x34, 0x43, 0x7f, 0x1f, 0x1f, 0x69, + 0xc4, 0x06, 0xcc, 0xd1, 0x00, 0x7b, 0x82, 0x9e, 0x10, 0x17, 0x07, 0x2c, 0xb0, 0xab, 0x0a, 0x32, + 0x9b, 0x4e, 0x3e, 0x0e, 0x58, 0x20, 0x37, 0x6b, 0x42, 0x66, 0xb5, 0x8a, 0x01, 0x30, 0x55, 0x54, + 0x3d, 0xe6, 0xc6, 0x55, 0x54, 0x45, 0x72, 0x15, 0x05, 0x99, 0x37, 0x55, 0x14, 0x60, 0x1d, 0xaa, + 0x71, 0x40, 0x4e, 0xa8, 0x27, 0x70, 0xa7, 0x4f, 0xec, 0xab, 0x0a, 0x60, 0x4e, 0xa1, 0x07, 0xb0, + 0xd2, 0xa3, 0x84, 0x63, 0xee, 0xf5, 0xa8, 0x87, 0xfb, 0xae, 0xfe, 0x90, 0xb9, 0xfa, 0xf8, 0x2d, + 0x28, 0xfc, 0x0d, 0x13, 0xa0, 0x9d, 0xf0, 0x8d, 0x5c, 0x46, 0xf7, 0x60, 0x6c, 0xc9, 0x8d, 0x4e, + 0x71, 0x98, 0x30, 0x17, 0x15, 0xf3, 0xba, 0xb9, 0xdc, 0x3e, 0xc5, 0xa1, 0xe6, 0xd5, 0xa0, 0xaa, + 0x4e, 0x89, 0xab, 0x8d, 0x84, 0x74, 0xda, 0x6a, 0x6a, 0x4f, 0xb9, 0xe9, 0x3f, 0x60, 0x69, 0x80, + 0xf4, 0xd4, 0x92, 0xf2, 0xcc, 0xec, 0xe8, 0xbc, 0x56, 0x39, 0x90, 0x93, 0xd2, 0x58, 0x15, 0xb5, + 0xec, 0x44, 0x11, 0xba, 0x07, 0xf3, 0x19, 0x54, 0x7b, 0xec, 0x9a, 0xc2, 0x2f, 0x8c, 0xce, 0x6b, + 0xb3, 0x29, 0x5e, 0x19, 0x6d, 0x36, 0xe5, 0x28, 0xb7, 0xfd, 0x17, 0x16, 0x35, 0xcf, 0xf4, 0xdc, + 0x75, 0x95, 0xc9, 0x55, 0xb5, 0xb0, 0x9f, 0x1b, 0x2f, 0xcb, 0x57, 0xdb, 0x6f, 0xd9, 0xc8, 0xf7, + 0x89, 0xf2, 0xe0, 0xbf, 0x41, 0x73, 0xdc, 0xdc, 0x89, 0x37, 0x14, 0x48, 0xe7, 0xf6, 0x3a, 0xb3, + 0xe3, 0x46, 0x9a, 0x6d, 0x66, 0x4a, 0x5b, 0xbf, 0x12, 0x35, 0xdb, 0xd2, 0xce, 0xdc, 0x4a, 0xd5, + 0x72, 0x7f, 0xae, 0xe8, 0x97, 0x9f, 0xa1, 0xa4, 0x49, 0x37, 0x0d, 0x2d, 0xed, 0xc5, 0xd5, 0x31, + 0x94, 0x76, 0xe3, 0x0e, 0xa0, 0x0c, 0x95, 0xbb, 0xf6, 0xa6, 0xb1, 0xd1, 0x56, 0x6e, 0xdd, 0x06, + 0x2c, 0x69, 0xf0, 0xb8, 0x81, 0x6f, 0x29, 0xb4, 0xae, 0xd7, 0x0b, 0xd3, 0xc5, 0x59, 0x11, 0x4d, + 0xf4, 0x6d, 0x43, 0xfb, 0x71, 0x8e, 0xfd, 0x58, 0x5b, 0x95, 0x7c, 0xed, 0x13, 0xda, 0xaa, 0xe8, + 0x1f, 0x6a, 0x2b, 0x74, 0xed, 0x23, 0x6d, 0x85, 0xdd, 0x49, 0xb1, 0xa6, 0xd9, 0xd7, 0x93, 0xcf, + 0x9e, 0x5c, 0x38, 0x34, 0x1c, 0xff, 0x65, 0x7a, 0x75, 0xdc, 0x51, 0xdf, 0xfe, 0xad, 0xcb, 0x2e, + 0xf8, 0xa7, 0x81, 0xe0, 0x67, 0xe9, 0xed, 0x71, 0x1f, 0x4a, 0xd2, 0xe5, 0x76, 0x7d, 0x12, 0xae, + 0xa2, 0xa0, 0x47, 0xd9, 0x95, 0xb0, 0x31, 0x09, 0x39, 0xbd, 0x39, 0xda, 0x00, 0xfa, 0xc9, 0x15, + 0x5e, 0x68, 0x6f, 0x4e, 0x20, 0xd1, 0x9c, 0x1b, 0x9d, 0xd7, 0xac, 0xaf, 0x15, 0xf9, 0x60, 0xaf, + 0xe5, 0x58, 0x5a, 0xe7, 0xc0, 0x0b, 0xeb, 0x04, 0xaa, 0x06, 0x30, 0xbf, 0x77, 0x0b, 0xc6, 0xbd, + 0x9b, 0x77, 0x04, 0x53, 0x9f, 0xe8, 0x08, 0x8a, 0x9f, 0xec, 0x08, 0x4a, 0x63, 0x1d, 0x41, 0xfd, + 0xf7, 0x69, 0xb0, 0xb2, 0x86, 0x07, 0x61, 0x58, 0xa5, 0xcc, 0x8d, 0x08, 0x3f, 0xa1, 0x1e, 0x71, + 0x3b, 0x67, 0x82, 0x44, 0x2e, 0x27, 0x5e, 0xcc, 0x23, 0x7a, 0x42, 0x92, 0x66, 0x71, 0xf3, 0x92, + 0xce, 0x49, 0xd7, 0xe6, 0x06, 0x65, 0x6d, 0x2d, 0xd3, 0x94, 0x2a, 0x4e, 0x2a, 0x82, 0xbe, 0x83, + 0xeb, 0x79, 0x88, 0xae, 0xa1, 0x3e, 0x35, 0x81, 0xfa, 0x52, 0xa6, 0xde, 0xcd, 0x95, 0x0f, 0x60, + 0x89, 0x32, 0xf7, 0x6d, 0x4c, 0xe2, 0x31, 0xdd, 0xe2, 0x04, 0xba, 0x8b, 0x94, 0xbd, 0x52, 0xfc, + 0x5c, 0xd5, 0x85, 0x15, 0xa3, 0x24, 0xf2, 0x2e, 0x36, 0xb4, 0x4b, 0x13, 0x68, 0x2f, 0x67, 0x39, + 0xcb, 0xbb, 0x3b, 0x0f, 0xf0, 0x3d, 0x2c, 0x53, 0xe6, 0x9e, 0x62, 0x2a, 0x3e, 0x54, 0x9f, 0x9e, + 0xac, 0x22, 0xaf, 0x31, 0x15, 0xe3, 0xd2, 0xba, 0x22, 0x03, 0xc2, 0xfd, 0xb1, 0x8a, 0xcc, 0x4c, + 0x56, 0x91, 0x7d, 0xc5, 0xcf, 0x55, 0x5b, 0xb0, 0x48, 0xd9, 0x87, 0xb9, 0x96, 0x27, 0xd0, 0xbc, + 0x4a, 0xd9, 0x78, 0x9e, 0xaf, 0x60, 0x31, 0x22, 0x9e, 0x60, 0xdc, 0x74, 0x5b, 0x65, 0x02, 0xc5, + 0x85, 0x84, 0x9e, 0x49, 0xd6, 0x4f, 0x00, 0xf2, 0x75, 0x34, 0x0f, 0x53, 0x2c, 0x54, 0x47, 0xc7, + 0x72, 0xa6, 0x58, 0x28, 0x7b, 0xc0, 0xae, 0xfc, 0xec, 0xe8, 0x83, 0x63, 0x39, 0xc9, 0x48, 0x9e, + 0xa7, 0x01, 0x3e, 0x62, 0x69, 0x13, 0xa8, 0x07, 0x6a, 0x96, 0x06, 0x8c, 0x27, 0x67, 0x47, 0x0f, + 0xe4, 0xec, 0x09, 0xee, 0xc7, 0x24, 0xed, 0x79, 0xd4, 0xa0, 0xfe, 0x53, 0x01, 0x2a, 0xe9, 0x6f, + 0x00, 0x7a, 0x64, 0xb6, 0xd1, 0xc5, 0xcf, 0xff, 0x75, 0x48, 0x92, 0xde, 0x4c, 0xd6, 0x6b, 0xdf, + 0xcf, 0x7b, 0xed, 0x7f, 0x4c, 0x4e, 0x1a, 0x72, 0x02, 0x56, 0x36, 0x67, 0xec, 0xb6, 0x30, 0xb6, + 0xdb, 0x1a, 0x54, 0x7b, 0x1e, 0x76, 0x7b, 0x38, 0xe8, 0xf6, 0x89, 0xee, 0x10, 0xe7, 0x1c, 0xe8, + 0x79, 0xf8, 0xb9, 0x9e, 0x49, 0x01, 0xac, 0x73, 0x44, 0x3c, 0x11, 0xa9, 0xa2, 0x68, 0xc0, 0x4b, + 0x3d, 0x53, 0xff, 0x65, 0x0a, 0xaa, 0xc6, 0x9f, 0x8b, 0xec, 0xa1, 0x03, 0x3c, 0x48, 0xe3, 0xa8, + 0x67, 0xd9, 0xb1, 0xf1, 0xa1, 0xfe, 0x96, 0x24, 0x9f, 0xa9, 0x32, 0x1f, 0xaa, 0x8f, 0x02, 0xba, + 0x0d, 0xc0, 0x87, 0x6e, 0x88, 0xbd, 0x63, 0x92, 0xc8, 0x97, 0x1c, 0x8b, 0x0f, 0x5b, 0x7a, 0x02, + 0xdd, 0x04, 0x8b, 0x0f, 0x5d, 0xc2, 0x39, 0xe3, 0x51, 0x52, 0xfb, 0x0a, 0x1f, 0x3e, 0x55, 0xe3, + 0x84, 0xdb, 0xe5, 0x4c, 0xf6, 0x02, 0xc9, 0x3b, 0xb0, 0xf8, 0xf0, 0x89, 0x9e, 0x90, 0x51, 0x45, + 0x1a, 0x55, 0xb7, 0x9e, 0x65, 0x91, 0x47, 0x15, 0x79, 0x54, 0xdd, 0x7a, 0x5a, 0xc2, 0x8c, 0x2a, + 0xb2, 0xa8, 0xba, 0xfb, 0xac, 0x08, 0x23, 0xaa, 0xc8, 0xa3, 0x5a, 0x29, 0x37, 0x89, 0xda, 0xb4, + 0xdf, 0xbd, 0x5f, 0xbb, 0xf2, 0xc7, 0xfb, 0xb5, 0x2b, 0x3f, 0x8e, 0xd6, 0x0a, 0xef, 0x46, 0x6b, + 0x85, 0xdf, 0x46, 0x6b, 0x85, 0xbf, 0x46, 0x6b, 0x85, 0xce, 0x8c, 0xfa, 0x0d, 0xff, 0xff, 0xdf, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x19, 0x9d, 0xe2, 0xd3, 0xe5, 0x0f, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/cgroups/metrics.proto b/vendor/github.com/containerd/cgroups/metrics.proto index 642623fce..62b519806 100644 --- a/vendor/github.com/containerd/cgroups/metrics.proto +++ b/vendor/github.com/containerd/cgroups/metrics.proto @@ -11,6 +11,7 @@ message Metrics { MemoryStat memory = 4; BlkIOStat blkio = 5; RdmaStat rdma = 6; + repeated NetworkStat network = 7; } message HugetlbStat { @@ -121,3 +122,15 @@ message RdmaEntry { uint32 hca_handles = 2; uint32 hca_objects = 3; } + +message NetworkStat { + string name = 1; + uint64 rx_bytes = 2; + uint64 rx_packets = 3; + uint64 rx_errors = 4; + uint64 rx_dropped = 5; + uint64 tx_bytes = 6; + uint64 tx_packets = 7; + uint64 tx_errors = 8; + uint64 tx_dropped = 9; +} diff --git a/vendor/github.com/containerd/cgroups/utils.go b/vendor/github.com/containerd/cgroups/utils.go index f3129b1a3..8a97d04dd 100644 --- a/vendor/github.com/containerd/cgroups/utils.go +++ b/vendor/github.com/containerd/cgroups/utils.go @@ -168,7 +168,7 @@ func readTasksPids(path string, subsystem Name) ([]Task, error) { func hugePageSizes() ([]string, error) { var ( pageSizes []string - sizeList = []string{"B", "kB", "MB", "GB", "TB", "PB"} + sizeList = []string{"B", "KB", "MB", "GB", "TB", "PB"} ) files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages") if err != nil { diff --git a/vendor/github.com/containerd/containerd/README.md b/vendor/github.com/containerd/containerd/README.md index 9b2ba3def..2323f26f6 100644 --- a/vendor/github.com/containerd/containerd/README.md +++ b/vendor/github.com/containerd/containerd/README.md @@ -218,7 +218,7 @@ This will be the best place to discuss design and implementation. For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development. **Slack:** Catch us in the #containerd and #containerd-dev channels on dockercommunity.slack.com. -[Click here for an invite to docker community slack.](https://join.slack.com/t/dockercommunity/shared_invite/enQtNDY4MDc1Mzc0MzIwLTgxZDBlMmM4ZGEyNDc1N2FkMzlhODJkYmE1YTVkYjM1MDE3ZjAwZjBkOGFlOTJkZjRmZGYzNjYyY2M3ZTUxYzQ) +[Click here for an invite to docker community slack.](https://dockr.ly/slack) ### Security audit diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go index 9ada87346..6c7920004 100644 --- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go @@ -9,6 +9,7 @@ import ( types "github.com/containerd/containerd/api/types" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + types1 "github.com/gogo/protobuf/types" grpc "google.golang.org/grpc" io "io" math "math" @@ -29,11 +30,12 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type ApplyRequest struct { // Diff is the descriptor of the diff to be extracted - Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"` - Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"` + Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` + Payloads map[string]*types1.Any `protobuf:"bytes,3,rep,name=payloads,proto3" json:"payloads,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ApplyRequest) Reset() { *m = ApplyRequest{} } @@ -205,6 +207,7 @@ var xxx_messageInfo_DiffResponse proto.InternalMessageInfo func init() { proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest") + proto.RegisterMapType((map[string]*types1.Any)(nil), "containerd.services.diff.v1.ApplyRequest.PayloadsEntry") proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse") proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest") proto.RegisterMapType((map[string]string)(nil), "containerd.services.diff.v1.DiffRequest.LabelsEntry") @@ -216,36 +219,40 @@ func init() { } var fileDescriptor_3b36a99e6faaa935 = []byte{ - // 457 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30, - 0x14, 0xaf, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40, - 0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x01, 0x81, 0x04, 0x4a, - 0x9b, 0x97, 0xce, 0x22, 0x8d, 0xbd, 0xd8, 0xad, 0x94, 0x1b, 0xdf, 0x85, 0x8f, 0xc2, 0x65, 0x47, - 0x8e, 0x1c, 0x69, 0x3f, 0x09, 0xb2, 0x93, 0x40, 0x24, 0xa4, 0x12, 0x76, 0xca, 0xcb, 0xf3, 0xef, - 0x9f, 0xfd, 0x6c, 0x38, 0x5d, 0x70, 0x7d, 0xb9, 0x9a, 0xb1, 0xb9, 0x58, 0xfa, 0x73, 0x91, 0xea, - 0x90, 0xa7, 0x98, 0x45, 0xf5, 0x32, 0x94, 0xdc, 0x57, 0x98, 0xad, 0xf9, 0x1c, 0x95, 0x1f, 0xf1, - 0x38, 0xf6, 0xd7, 0x87, 0xf6, 0xcb, 0x64, 0x26, 0xb4, 0xa0, 0xf7, 0xfe, 0x60, 0x59, 0x85, 0x63, - 0x76, 0x7d, 0x7d, 0xe8, 0xee, 0x2f, 0xc4, 0x42, 0x58, 0x9c, 0x6f, 0xaa, 0x82, 0xe2, 0x1e, 0x35, - 0x32, 0xd5, 0xb9, 0x44, 0xe5, 0x2f, 0xc5, 0x2a, 0xd5, 0x25, 0xef, 0xe4, 0x3f, 0x78, 0x11, 0xaa, - 0x79, 0xc6, 0xa5, 0x16, 0x59, 0x41, 0x1e, 0x5f, 0xc1, 0xde, 0x4b, 0x29, 0x93, 0x3c, 0xc0, 0xab, - 0x15, 0x2a, 0x4d, 0x9f, 0x40, 0xd7, 0xa4, 0x74, 0xc8, 0x88, 0x4c, 0x86, 0xd3, 0xfb, 0xac, 0xb6, - 0x0d, 0xab, 0xc0, 0xce, 0x7e, 0x2b, 0x04, 0x16, 0x49, 0x7d, 0xe8, 0xdb, 0x34, 0xca, 0x69, 0x8f, - 0x3a, 0x93, 0xe1, 0xf4, 0xee, 0xdf, 0x9c, 0xb7, 0x66, 0x3d, 0x28, 0x61, 0xe3, 0x37, 0x70, 0xbb, - 0xb4, 0x54, 0x52, 0xa4, 0x0a, 0xe9, 0x11, 0xdc, 0x0a, 0xa5, 0x4c, 0x38, 0x46, 0x8d, 0x6c, 0x2b, - 0xf0, 0xf8, 0x6b, 0x1b, 0x86, 0x67, 0x3c, 0x8e, 0xab, 0xec, 0x8f, 0xa0, 0x9b, 0x60, 0xac, 0x1d, - 0xb2, 0x3b, 0x87, 0x05, 0xd1, 0xc7, 0xd0, 0xcb, 0xf8, 0xe2, 0x52, 0xff, 0x2b, 0x75, 0x81, 0xa2, - 0x0f, 0x00, 0x96, 0x18, 0xf1, 0xf0, 0x93, 0x59, 0x73, 0x3a, 0x23, 0x32, 0x19, 0x04, 0x03, 0xdb, - 0xb9, 0xc8, 0x25, 0xd2, 0x3b, 0xd0, 0xc9, 0x30, 0x76, 0xba, 0xb6, 0x6f, 0x4a, 0x7a, 0x0e, 0xfd, - 0x24, 0x9c, 0x61, 0xa2, 0x9c, 0x9e, 0x35, 0x78, 0xc6, 0x76, 0xdc, 0x08, 0x56, 0xdb, 0x06, 0x3b, - 0xb7, 0xb4, 0xd7, 0xa9, 0xce, 0xf2, 0xa0, 0xd4, 0x70, 0x5f, 0xc0, 0xb0, 0xd6, 0x36, 0x76, 0x9f, - 0x31, 0xb7, 0xa7, 0x35, 0x08, 0x4c, 0x49, 0xf7, 0xa1, 0xb7, 0x0e, 0x93, 0x15, 0x3a, 0x6d, 0xdb, - 0x2b, 0x7e, 0x8e, 0xdb, 0xcf, 0xc9, 0xf8, 0x14, 0xf6, 0x0a, 0xf5, 0xf2, 0xb4, 0xab, 0x09, 0x77, - 0x9a, 0x4e, 0x78, 0xfa, 0x8d, 0x40, 0xd7, 0x48, 0xd0, 0x8f, 0xd0, 0xb3, 0x93, 0xa3, 0x07, 0x3b, - 0x37, 0x53, 0xbf, 0x50, 0xee, 0xc3, 0x26, 0xd0, 0x32, 0xda, 0x87, 0xd2, 0x67, 0xd2, 0xf4, 0xac, - 0xdc, 0x83, 0x06, 0xc8, 0x42, 0xfc, 0xd5, 0xc5, 0xf5, 0xc6, 0x6b, 0xfd, 0xd8, 0x78, 0xad, 0x2f, - 0x5b, 0x8f, 0x5c, 0x6f, 0x3d, 0xf2, 0x7d, 0xeb, 0x91, 0x9f, 0x5b, 0x8f, 0xbc, 0x3f, 0xbe, 0xd1, - 0x6b, 0x3f, 0x31, 0xdf, 0x77, 0xad, 0x59, 0xdf, 0x3e, 0xa4, 0xa7, 0xbf, 0x02, 0x00, 0x00, 0xff, - 0xff, 0x61, 0xd1, 0x6e, 0x9e, 0x34, 0x04, 0x00, 0x00, + // 526 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x41, 0x6f, 0xd3, 0x4c, + 0x10, 0x8d, 0xed, 0x24, 0xdf, 0x97, 0x49, 0x2b, 0xa1, 0x55, 0x24, 0x8c, 0x01, 0xab, 0xca, 0x29, + 0x2d, 0x62, 0x4d, 0x03, 0x2a, 0xd0, 0x5e, 0x5a, 0x54, 0xc4, 0xa5, 0x48, 0x60, 0x7a, 0x40, 0x20, + 0x81, 0x9c, 0x78, 0xed, 0xae, 0x70, 0xbc, 0x8b, 0x77, 0x1d, 0xc9, 0x37, 0xfe, 0x06, 0x67, 0x7e, + 0x0a, 0x97, 0x1e, 0x39, 0x72, 0xa4, 0xf9, 0x25, 0xc8, 0xeb, 0x75, 0x31, 0x02, 0x05, 0xc3, 0xc9, + 0x9b, 0x9d, 0xf7, 0xde, 0xce, 0xbc, 0x37, 0x0a, 0x1c, 0xc6, 0x54, 0x9e, 0xe5, 0x33, 0x3c, 0x67, + 0x0b, 0x6f, 0xce, 0x52, 0x19, 0xd0, 0x94, 0x64, 0x61, 0xf3, 0x18, 0x70, 0xea, 0x09, 0x92, 0x2d, + 0xe9, 0x9c, 0x08, 0x2f, 0xa4, 0x51, 0xe4, 0x2d, 0x77, 0xd5, 0x17, 0xf3, 0x8c, 0x49, 0x86, 0xae, + 0xff, 0xc0, 0xe2, 0x1a, 0x87, 0x55, 0x7d, 0xb9, 0xeb, 0x8c, 0x62, 0x16, 0x33, 0x85, 0xf3, 0xca, + 0x53, 0x45, 0x71, 0xae, 0xc5, 0x8c, 0xc5, 0x09, 0xf1, 0xd4, 0xaf, 0x59, 0x1e, 0x79, 0x41, 0x5a, + 0xe8, 0xd2, 0x5e, 0xab, 0x7e, 0x64, 0xc1, 0x89, 0xf0, 0x16, 0x2c, 0x4f, 0xa5, 0xe6, 0x1d, 0xfc, + 0x05, 0x2f, 0x24, 0x62, 0x9e, 0x51, 0x2e, 0x59, 0x56, 0x91, 0xc7, 0x1f, 0x4d, 0xd8, 0x38, 0xe2, + 0x3c, 0x29, 0x7c, 0xf2, 0x3e, 0x27, 0x42, 0xa2, 0x3b, 0xd0, 0x2d, 0x27, 0xb0, 0x8d, 0x2d, 0x63, + 0x32, 0x9c, 0xde, 0xc0, 0x8d, 0x11, 0x95, 0x04, 0x3e, 0xbe, 0x94, 0xf0, 0x15, 0x12, 0x79, 0xd0, + 0x57, 0xed, 0x08, 0xdb, 0xdc, 0xb2, 0x26, 0xc3, 0xe9, 0xd5, 0x5f, 0x39, 0x4f, 0xcb, 0xba, 0xaf, + 0x61, 0xe8, 0x05, 0xfc, 0xcf, 0x83, 0x22, 0x61, 0x41, 0x28, 0x6c, 0x4b, 0x51, 0xee, 0xe3, 0x35, + 0x4e, 0xe2, 0x66, 0x7f, 0xf8, 0x99, 0x66, 0x3e, 0x4e, 0x65, 0x56, 0xf8, 0x97, 0x42, 0xce, 0x73, + 0xd8, 0xfc, 0xa9, 0x84, 0xae, 0x80, 0xf5, 0x8e, 0x14, 0x6a, 0x8e, 0x81, 0x5f, 0x1e, 0xd1, 0x0e, + 0xf4, 0x96, 0x41, 0x92, 0x13, 0xdb, 0x54, 0xb3, 0x8d, 0x70, 0x95, 0x05, 0xae, 0xb3, 0xc0, 0x47, + 0x69, 0xe1, 0x57, 0x90, 0x7d, 0xf3, 0x81, 0x31, 0x7e, 0x02, 0x9b, 0xfa, 0x69, 0xc1, 0x59, 0x2a, + 0x08, 0xda, 0x83, 0xff, 0x02, 0xce, 0x13, 0x4a, 0xc2, 0x56, 0xf6, 0xd4, 0xe0, 0xf1, 0x27, 0x13, + 0x86, 0xc7, 0x34, 0x8a, 0x6a, 0x8f, 0x6f, 0x41, 0x37, 0x21, 0x91, 0xb4, 0x8d, 0xf5, 0x7e, 0x29, + 0x10, 0xba, 0x0d, 0xbd, 0x8c, 0xc6, 0x67, 0xf2, 0x4f, 0xee, 0x56, 0x28, 0x74, 0x13, 0x60, 0x41, + 0x42, 0x1a, 0xbc, 0x2d, 0x6b, 0xb6, 0xa5, 0xa6, 0x1f, 0xa8, 0x9b, 0xd3, 0x82, 0x93, 0xd2, 0x95, + 0x8c, 0x44, 0x76, 0xb7, 0x72, 0x25, 0x23, 0x11, 0x3a, 0x81, 0x7e, 0x12, 0xcc, 0x48, 0x22, 0xec, + 0x9e, 0x7a, 0xe0, 0xde, 0xda, 0x2c, 0x1a, 0x63, 0xe0, 0x13, 0x45, 0xab, 0x82, 0xd0, 0x1a, 0xce, + 0x43, 0x18, 0x36, 0xae, 0x7f, 0x13, 0xc2, 0xa8, 0x19, 0xc2, 0xa0, 0x69, 0xf7, 0x21, 0x6c, 0x54, + 0xea, 0xda, 0xed, 0x7a, 0x13, 0xad, 0xb6, 0x9b, 0x38, 0xfd, 0x6c, 0x40, 0xb7, 0x94, 0x40, 0x6f, + 0xa0, 0xa7, 0x92, 0x43, 0xdb, 0xad, 0x17, 0xcb, 0xd9, 0x69, 0x03, 0xd5, 0xad, 0xbd, 0xd6, 0xef, + 0x4c, 0xda, 0x7a, 0xe5, 0x6c, 0xb7, 0x40, 0x56, 0xe2, 0x8f, 0x4e, 0xcf, 0x2f, 0xdc, 0xce, 0xd7, + 0x0b, 0xb7, 0xf3, 0x61, 0xe5, 0x1a, 0xe7, 0x2b, 0xd7, 0xf8, 0xb2, 0x72, 0x8d, 0x6f, 0x2b, 0xd7, + 0x78, 0xb5, 0xff, 0x4f, 0xff, 0x58, 0x07, 0xe5, 0xf7, 0x65, 0x67, 0xd6, 0x57, 0x7b, 0x7e, 0xf7, + 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x85, 0x25, 0xb8, 0xf8, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -400,6 +407,34 @@ func (m *ApplyRequest) MarshalTo(dAtA []byte) (int, error) { i += n } } + if len(m.Payloads) > 0 { + for k, _ := range m.Payloads { + dAtA[i] = 0x1a + i++ + v := m.Payloads[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovDiff(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovDiff(uint64(len(k))) + msgSize + i = encodeVarintDiff(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintDiff(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintDiff(dAtA, i, uint64(v.Size())) + n2, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + } + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -425,11 +460,11 @@ func (m *ApplyResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDiff(dAtA, i, uint64(m.Applied.Size())) - n2, err := m.Applied.MarshalTo(dAtA[i:]) + n3, err := m.Applied.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n3 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -530,11 +565,11 @@ func (m *DiffResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintDiff(dAtA, i, uint64(m.Diff.Size())) - n3, err := m.Diff.MarshalTo(dAtA[i:]) + n4, err := m.Diff.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n4 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -567,6 +602,19 @@ func (m *ApplyRequest) Size() (n int) { n += 1 + l + sovDiff(uint64(l)) } } + if len(m.Payloads) > 0 { + for k, v := range m.Payloads { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovDiff(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovDiff(uint64(len(k))) + l + n += mapEntrySize + 1 + sovDiff(uint64(mapEntrySize)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -662,9 +710,20 @@ func (this *ApplyRequest) String() string { if this == nil { return "nil" } + keysForPayloads := make([]string, 0, len(this.Payloads)) + for k, _ := range this.Payloads { + keysForPayloads = append(keysForPayloads, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPayloads) + mapStringForPayloads := "map[string]*types1.Any{" + for _, k := range keysForPayloads { + mapStringForPayloads += fmt.Sprintf("%v: %v,", k, this.Payloads[k]) + } + mapStringForPayloads += "}" s := strings.Join([]string{`&ApplyRequest{`, `Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`, `Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`, + `Payloads:` + mapStringForPayloads + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") @@ -824,6 +883,135 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payloads", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDiff + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDiff + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Payloads == nil { + m.Payloads = make(map[string]*types1.Any) + } + var mapkey string + var mapvalue *types1.Any + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthDiff + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthDiff + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthDiff + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthDiff + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &types1.Any{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipDiff(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDiff + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Payloads[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDiff(dAtA[iNdEx:]) diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto index 66d7ecb19..ae2707a25 100644 --- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto +++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package containerd.services.diff.v1; import weak "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; import "github.com/containerd/containerd/api/types/mount.proto"; import "github.com/containerd/containerd/api/types/descriptor.proto"; @@ -25,6 +26,8 @@ message ApplyRequest { containerd.types.Descriptor diff = 1; repeated containerd.types.Mount mounts = 2; + + map payloads = 3; } message ApplyResponse { diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go index 016ced4b7..a4e238685 100644 --- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go @@ -10,6 +10,7 @@ import ( rpc "github.com/gogo/googleapis/google/rpc" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + types1 "github.com/gogo/protobuf/types" grpc "google.golang.org/grpc" io "io" math "math" @@ -191,11 +192,51 @@ func (m *PluginsResponse) XXX_DiscardUnknown() { var xxx_messageInfo_PluginsResponse proto.InternalMessageInfo +type ServerResponse struct { + UUID string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerResponse) Reset() { *m = ServerResponse{} } +func (*ServerResponse) ProtoMessage() {} +func (*ServerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1a14fda866f10715, []int{3} +} +func (m *ServerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ServerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ServerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerResponse.Merge(m, src) +} +func (m *ServerResponse) XXX_Size() int { + return m.Size() +} +func (m *ServerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ServerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerResponse proto.InternalMessageInfo + func init() { proto.RegisterType((*Plugin)(nil), "containerd.services.introspection.v1.Plugin") proto.RegisterMapType((map[string]string)(nil), "containerd.services.introspection.v1.Plugin.ExportsEntry") proto.RegisterType((*PluginsRequest)(nil), "containerd.services.introspection.v1.PluginsRequest") proto.RegisterType((*PluginsResponse)(nil), "containerd.services.introspection.v1.PluginsResponse") + proto.RegisterType((*ServerResponse)(nil), "containerd.services.introspection.v1.ServerResponse") } func init() { @@ -203,38 +244,42 @@ func init() { } var fileDescriptor_1a14fda866f10715 = []byte{ - // 487 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x40, - 0x10, 0xcd, 0x3a, 0x69, 0xdc, 0x4c, 0xca, 0x87, 0x56, 0x15, 0x58, 0x3e, 0xb8, 0x51, 0xc4, 0x21, - 0x42, 0xb0, 0x56, 0x03, 0x48, 0xb4, 0x48, 0x1c, 0x22, 0x72, 0xa8, 0xd4, 0x43, 0xe5, 0x5e, 0x10, - 0x97, 0xca, 0x71, 0x36, 0x66, 0x85, 0xeb, 0xdd, 0xee, 0xae, 0x2d, 0x72, 0xe3, 0xc6, 0x5f, 0xcb, - 0x91, 0x23, 0xa7, 0x8a, 0xfa, 0x37, 0xf0, 0x03, 0x90, 0xbd, 0x76, 0x9b, 0xdc, 0x12, 0x71, 0x9b, - 0x79, 0x7e, 0x6f, 0xe6, 0xcd, 0x93, 0x17, 0x82, 0x98, 0xe9, 0xaf, 0xd9, 0x8c, 0x44, 0xfc, 0xda, - 0x8f, 0x78, 0xaa, 0x43, 0x96, 0x52, 0x39, 0x5f, 0x2f, 0x43, 0xc1, 0x7c, 0x45, 0x65, 0xce, 0x22, - 0xaa, 0x7c, 0x96, 0x6a, 0xc9, 0x95, 0xa0, 0x91, 0x66, 0x3c, 0xf5, 0xf3, 0xe3, 0x4d, 0x80, 0x08, - 0xc9, 0x35, 0xc7, 0x2f, 0x1e, 0xd4, 0xa4, 0x51, 0x92, 0x4d, 0x62, 0x7e, 0xec, 0x9e, 0x6c, 0xb5, - 0x59, 0x2f, 0x05, 0x55, 0xbe, 0x48, 0x42, 0xbd, 0xe0, 0xf2, 0xda, 0x2c, 0x70, 0x9f, 0xc7, 0x9c, - 0xc7, 0x09, 0xf5, 0xa5, 0x88, 0x7c, 0xa5, 0x43, 0x9d, 0xa9, 0xfa, 0xc3, 0x61, 0xcc, 0x63, 0x5e, - 0x95, 0x7e, 0x59, 0x19, 0x74, 0xf8, 0xd7, 0x82, 0xee, 0x45, 0x92, 0xc5, 0x2c, 0xc5, 0x18, 0x3a, - 0xe5, 0x44, 0x07, 0x0d, 0xd0, 0xa8, 0x17, 0x54, 0x35, 0x7e, 0x06, 0x16, 0x9b, 0x3b, 0x56, 0x89, - 0x4c, 0xba, 0xc5, 0xed, 0x91, 0x75, 0xf6, 0x29, 0xb0, 0xd8, 0x1c, 0xbb, 0xb0, 0x2f, 0xe9, 0x4d, - 0xc6, 0x24, 0x55, 0x4e, 0x7b, 0xd0, 0x1e, 0xf5, 0x82, 0xfb, 0x1e, 0x7f, 0x84, 0x5e, 0xe3, 0x49, - 0x39, 0x9d, 0x41, 0x7b, 0xd4, 0x1f, 0xbb, 0x64, 0xed, 0xec, 0xca, 0x36, 0xb9, 0xa8, 0x29, 0x93, - 0xce, 0xea, 0xf6, 0xa8, 0x15, 0x3c, 0x48, 0xf0, 0x25, 0xd8, 0xf4, 0xbb, 0xe0, 0x52, 0x2b, 0x67, - 0xaf, 0x52, 0x9f, 0x90, 0x6d, 0x42, 0x23, 0xe6, 0x0c, 0x32, 0x35, 0xda, 0x69, 0xaa, 0xe5, 0x32, - 0x68, 0x26, 0xe1, 0x21, 0x1c, 0x44, 0xa1, 0x08, 0x67, 0x2c, 0x61, 0x9a, 0x51, 0xe5, 0x74, 0x2b, - 0xd3, 0x1b, 0x18, 0x7e, 0x0d, 0xfb, 0x2c, 0x65, 0xfa, 0x8a, 0x4a, 0xe9, 0xd8, 0x03, 0x34, 0xea, - 0x8f, 0x31, 0x31, 0x69, 0x12, 0x29, 0x22, 0x72, 0x59, 0xa5, 0x19, 0xd8, 0x25, 0x67, 0x2a, 0xa5, - 0x7b, 0x0a, 0x07, 0xeb, 0xbb, 0xf0, 0x53, 0x68, 0x7f, 0xa3, 0xcb, 0x3a, 0xbe, 0xb2, 0xc4, 0x87, - 0xb0, 0x97, 0x87, 0x49, 0x46, 0x4d, 0x80, 0x81, 0x69, 0x4e, 0xad, 0xf7, 0x68, 0xf8, 0x12, 0x1e, - 0x1b, 0xbb, 0x2a, 0xa0, 0x37, 0x19, 0x55, 0x1a, 0x3b, 0x60, 0x2f, 0x58, 0xa2, 0xa9, 0x54, 0x0e, - 0xaa, 0xbc, 0x35, 0xed, 0xf0, 0x0a, 0x9e, 0xdc, 0x73, 0x95, 0xe0, 0xa9, 0xa2, 0xf8, 0x1c, 0x6c, - 0x61, 0xa0, 0x8a, 0xdc, 0x1f, 0xbf, 0xda, 0x25, 0xa2, 0x3a, 0xf2, 0x66, 0xc4, 0xf8, 0x27, 0x82, - 0x47, 0x67, 0xeb, 0x54, 0x9c, 0x83, 0x5d, 0xaf, 0xc4, 0x6f, 0x77, 0x99, 0xdc, 0x5c, 0xe3, 0xbe, - 0xdb, 0x51, 0x65, 0xee, 0x9a, 0x2c, 0x56, 0x77, 0x5e, 0xeb, 0xf7, 0x9d, 0xd7, 0xfa, 0x51, 0x78, - 0x68, 0x55, 0x78, 0xe8, 0x57, 0xe1, 0xa1, 0x3f, 0x85, 0x87, 0xbe, 0x9c, 0xff, 0xdf, 0x5b, 0xfc, - 0xb0, 0x01, 0x7c, 0xb6, 0x66, 0xdd, 0xea, 0xf7, 0x7f, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe6, - 0x72, 0xde, 0x35, 0xe4, 0x03, 0x00, 0x00, + // 549 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0xad, 0x9d, 0x34, 0x6e, 0x37, 0xa5, 0xa0, 0x55, 0x55, 0x2c, 0x83, 0x9c, 0x28, 0xe2, 0x10, + 0x21, 0x58, 0xab, 0x01, 0x24, 0x5a, 0x24, 0x0e, 0x51, 0x73, 0x88, 0xd4, 0x43, 0xe5, 0xa8, 0x08, + 0x71, 0xa9, 0x1c, 0x67, 0x63, 0x56, 0x38, 0xde, 0xed, 0xee, 0xda, 0x22, 0x37, 0x3e, 0x2f, 0x47, + 0x8e, 0x9c, 0x02, 0xf5, 0x37, 0xf0, 0x01, 0xc8, 0xbb, 0x76, 0x9a, 0xdc, 0x12, 0x71, 0x9b, 0x79, + 0x33, 0x6f, 0xe6, 0xcd, 0xf3, 0xca, 0xc0, 0x8f, 0x88, 0xfc, 0x9a, 0x8e, 0x51, 0x48, 0x67, 0x5e, + 0x48, 0x13, 0x19, 0x90, 0x04, 0xf3, 0xc9, 0x7a, 0x18, 0x30, 0xe2, 0x09, 0xcc, 0x33, 0x12, 0x62, + 0xe1, 0x91, 0x44, 0x72, 0x2a, 0x18, 0x0e, 0x25, 0xa1, 0x89, 0x97, 0x9d, 0x6d, 0x02, 0x88, 0x71, + 0x2a, 0x29, 0x7c, 0xf1, 0xc0, 0x46, 0x15, 0x13, 0x6d, 0x36, 0x66, 0x67, 0xce, 0xf9, 0x56, 0x9b, + 0xe5, 0x9c, 0x61, 0xe1, 0xb1, 0x38, 0x90, 0x53, 0xca, 0x67, 0x7a, 0x81, 0xf3, 0x34, 0xa2, 0x34, + 0x8a, 0xb1, 0xc7, 0x59, 0xe8, 0x09, 0x19, 0xc8, 0x54, 0x94, 0x85, 0x67, 0x65, 0x41, 0x65, 0xe3, + 0x74, 0xea, 0xe1, 0x19, 0x93, 0xf3, 0xb2, 0x78, 0x12, 0xd1, 0x88, 0xaa, 0xd0, 0x2b, 0x22, 0x8d, + 0x76, 0xfe, 0x9a, 0xa0, 0x71, 0x1d, 0xa7, 0x11, 0x49, 0x20, 0x04, 0xf5, 0x62, 0x9d, 0x6d, 0xb4, + 0x8d, 0xee, 0xa1, 0xaf, 0x62, 0x78, 0x0a, 0x4c, 0x32, 0xb1, 0xcd, 0x02, 0xe9, 0x37, 0xf2, 0x65, + 0xcb, 0x1c, 0x5e, 0xfa, 0x26, 0x99, 0x40, 0x07, 0x1c, 0x70, 0x7c, 0x97, 0x12, 0x8e, 0x85, 0x5d, + 0x6b, 0xd7, 0xba, 0x87, 0xfe, 0x2a, 0x87, 0x1f, 0xc1, 0x61, 0x25, 0x58, 0xd8, 0xf5, 0x76, 0xad, + 0xdb, 0xec, 0x39, 0x68, 0xcd, 0x13, 0x75, 0x13, 0xba, 0x2e, 0x5b, 0xfa, 0xf5, 0xc5, 0xb2, 0xb5, + 0xe7, 0x3f, 0x50, 0xe0, 0x08, 0x58, 0xf8, 0x3b, 0xa3, 0x5c, 0x0a, 0x7b, 0x5f, 0xb1, 0xcf, 0xd1, + 0x36, 0x8e, 0x22, 0x7d, 0x06, 0x1a, 0x68, 0xee, 0x20, 0x91, 0x7c, 0xee, 0x57, 0x93, 0x60, 0x07, + 0x1c, 0x85, 0x01, 0x0b, 0xc6, 0x24, 0x26, 0x92, 0x60, 0x61, 0x37, 0x94, 0xe8, 0x0d, 0x0c, 0xbe, + 0x06, 0x07, 0x24, 0x21, 0xf2, 0x16, 0x73, 0x6e, 0x5b, 0x6d, 0xa3, 0xdb, 0xec, 0x41, 0xa4, 0x1d, + 0x45, 0x9c, 0x85, 0x68, 0xa4, 0xac, 0xf6, 0xad, 0xa2, 0x67, 0xc0, 0xb9, 0x73, 0x01, 0x8e, 0xd6, + 0x77, 0xc1, 0x27, 0xa0, 0xf6, 0x0d, 0xcf, 0x4b, 0xfb, 0x8a, 0x10, 0x9e, 0x80, 0xfd, 0x2c, 0x88, + 0x53, 0xac, 0x0d, 0xf4, 0x75, 0x72, 0x61, 0xbe, 0x37, 0x3a, 0x2f, 0xc1, 0xb1, 0x96, 0x2b, 0x7c, + 0x7c, 0x97, 0x62, 0x21, 0xa1, 0x0d, 0xac, 0x29, 0x89, 0x25, 0xe6, 0xc2, 0x36, 0x94, 0xb6, 0x2a, + 0xed, 0xdc, 0x82, 0xc7, 0xab, 0x5e, 0xc1, 0x68, 0x22, 0x30, 0xbc, 0x02, 0x16, 0xd3, 0x90, 0x6a, + 0x6e, 0xf6, 0x5e, 0xed, 0x62, 0x51, 0x69, 0x79, 0x35, 0xa2, 0x83, 0xc0, 0xf1, 0x08, 0xf3, 0x0c, + 0xf3, 0xd5, 0xfc, 0xe7, 0xa0, 0x9e, 0xa6, 0x64, 0xa2, 0x6f, 0xe9, 0x1f, 0xe4, 0xcb, 0x56, 0xfd, + 0xe6, 0x66, 0x78, 0xe9, 0x2b, 0xb4, 0xf7, 0xdb, 0x00, 0x8f, 0x86, 0xeb, 0xa3, 0x61, 0x06, 0xac, + 0x52, 0x22, 0x7c, 0xbb, 0x8b, 0x92, 0xea, 0x7a, 0xe7, 0xdd, 0x8e, 0xac, 0x52, 0xe7, 0x27, 0xd0, + 0xd0, 0xca, 0xe1, 0x69, 0xf5, 0xa5, 0xaa, 0xb7, 0x8f, 0x06, 0xc5, 0xdb, 0x77, 0xb6, 0x94, 0xb3, + 0x79, 0x7f, 0x7f, 0xba, 0xb8, 0x77, 0xf7, 0x7e, 0xdd, 0xbb, 0x7b, 0x3f, 0x72, 0xd7, 0x58, 0xe4, + 0xae, 0xf1, 0x33, 0x77, 0x8d, 0x3f, 0xb9, 0x6b, 0x7c, 0xb9, 0xfa, 0xbf, 0x1f, 0xc6, 0x87, 0x0d, + 0xe0, 0x73, 0x6d, 0xdc, 0x50, 0x7a, 0xdf, 0xfc, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xb3, 0x50, + 0xdc, 0x89, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -254,6 +299,8 @@ type IntrospectionClient interface { // Clients can use this to detect features and capabilities when using // containerd. Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) + // Server returns information about the containerd server + Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error) } type introspectionClient struct { @@ -273,6 +320,15 @@ func (c *introspectionClient) Plugins(ctx context.Context, in *PluginsRequest, o return out, nil } +func (c *introspectionClient) Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error) { + out := new(ServerResponse) + err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Server", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // IntrospectionServer is the server API for Introspection service. type IntrospectionServer interface { // Plugins returns a list of plugins in containerd. @@ -280,6 +336,8 @@ type IntrospectionServer interface { // Clients can use this to detect features and capabilities when using // containerd. Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error) + // Server returns information about the containerd server + Server(context.Context, *types1.Empty) (*ServerResponse, error) } func RegisterIntrospectionServer(s *grpc.Server, srv IntrospectionServer) { @@ -304,6 +362,24 @@ func _Introspection_Plugins_Handler(srv interface{}, ctx context.Context, dec fu return interceptor(ctx, in, info, handler) } +func _Introspection_Server_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(types1.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IntrospectionServer).Server(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.introspection.v1.Introspection/Server", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IntrospectionServer).Server(ctx, req.(*types1.Empty)) + } + return interceptor(ctx, in, info, handler) +} + var _Introspection_serviceDesc = grpc.ServiceDesc{ ServiceName: "containerd.services.introspection.v1.Introspection", HandlerType: (*IntrospectionServer)(nil), @@ -312,6 +388,10 @@ var _Introspection_serviceDesc = grpc.ServiceDesc{ MethodName: "Plugins", Handler: _Introspection_Plugins_Handler, }, + { + MethodName: "Server", + Handler: _Introspection_Server_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", @@ -488,6 +568,33 @@ func (m *PluginsResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ServerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServerResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.UUID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintIntrospection(dAtA, i, uint64(len(m.UUID))) + i += copy(dAtA[i:], m.UUID) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -583,6 +690,22 @@ func (m *PluginsResponse) Size() (n int) { return n } +func (m *ServerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UUID) + if l > 0 { + n += 1 + l + sovIntrospection(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovIntrospection(x uint64) (n int) { for { n++ @@ -645,6 +768,17 @@ func (this *PluginsResponse) String() string { }, "") return s } +func (this *ServerResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServerResponse{`, + `UUID:` + fmt.Sprintf("%v", this.UUID) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} func valueToStringIntrospection(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -1206,6 +1340,92 @@ func (m *PluginsResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *ServerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIntrospection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIntrospection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthIntrospection + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthIntrospection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipIntrospection(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthIntrospection + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthIntrospection + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipIntrospection(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto index 95e804b9b..79cee9a57 100644 --- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto +++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto @@ -4,6 +4,7 @@ package containerd.services.introspection.v1; import "github.com/containerd/containerd/api/types/platform.proto"; import "google/rpc/status.proto"; +import "google/protobuf/empty.proto"; import weak "gogoproto/gogo.proto"; option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection"; @@ -14,6 +15,8 @@ service Introspection { // Clients can use this to detect features and capabilities when using // containerd. rpc Plugins(PluginsRequest) returns (PluginsResponse); + // Server returns information about the containerd server + rpc Server(google.protobuf.Empty) returns (ServerResponse); } message Plugin { @@ -79,3 +82,7 @@ message PluginsRequest { message PluginsResponse { repeated Plugin plugins = 1 [(gogoproto.nullable) = false]; } + +message ServerResponse { + string uuid = 1 [(gogoproto.customname) = "UUID"]; +} diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression.go b/vendor/github.com/containerd/containerd/archive/compression/compression.go index 60c80e98a..2338de6b9 100644 --- a/vendor/github.com/containerd/containerd/archive/compression/compression.go +++ b/vendor/github.com/containerd/containerd/archive/compression/compression.go @@ -180,7 +180,7 @@ func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { } } -// CompressStream compresseses the dest with specified compression algorithm. +// CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { switch compression { case Uncompressed: diff --git a/vendor/github.com/containerd/containerd/archive/time_unix.go b/vendor/github.com/containerd/containerd/archive/time_unix.go index fd8d98bf3..e05ca719c 100644 --- a/vendor/github.com/containerd/containerd/archive/time_unix.go +++ b/vendor/github.com/containerd/containerd/archive/time_unix.go @@ -32,7 +32,7 @@ func chtimes(path string, atime, mtime time.Time) error { utimes[1] = unix.NsecToTimespec(mtime.UnixNano()) if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil { - return errors.Wrap(err, "failed call to UtimesNanoAt") + return errors.Wrapf(err, "failed call to UtimesNanoAt for %s", path) } return nil diff --git a/vendor/github.com/containerd/containerd/cio/io.go b/vendor/github.com/containerd/containerd/cio/io.go index 133bfcdbe..c7cf4f0bc 100644 --- a/vendor/github.com/containerd/containerd/cio/io.go +++ b/vendor/github.com/containerd/containerd/cio/io.go @@ -18,10 +18,13 @@ package cio import ( "context" + "errors" "fmt" "io" "net/url" "os" + "path/filepath" + "strings" "sync" "github.com/containerd/containerd/defaults" @@ -242,17 +245,24 @@ func LogURI(uri *url.URL) Creator { // BinaryIO forwards container STDOUT|STDERR directly to a logging binary func BinaryIO(binary string, args map[string]string) Creator { return func(_ string) (IO, error) { + binary = filepath.Clean(binary) + if !strings.HasPrefix(binary, "/") { + return nil, errors.New("absolute path needed") + } uri := &url.URL{ Scheme: "binary", - Host: binary, + Path: binary, } + q := uri.Query() for k, v := range args { - uri.Query().Set(k, v) + q.Set(k, v) } + uri.RawQuery = q.Encode() + res := uri.String() return &logURI{ config: Config{ - Stdout: uri.String(), - Stderr: uri.String(), + Stdout: res, + Stderr: res, }, }, nil } @@ -262,14 +272,19 @@ func BinaryIO(binary string, args map[string]string) Creator { // If the log file already exists, the logs will be appended to the file. func LogFile(path string) Creator { return func(_ string) (IO, error) { + path = filepath.Clean(path) + if !strings.HasPrefix(path, "/") { + return nil, errors.New("absolute path needed") + } uri := &url.URL{ Scheme: "file", - Host: path, + Path: path, } + res := uri.String() return &logURI{ config: Config{ - Stdout: uri.String(), - Stderr: uri.String(), + Stdout: res, + Stderr: res, }, }, nil } diff --git a/vendor/github.com/containerd/containerd/client.go b/vendor/github.com/containerd/containerd/client.go index 3c248eb3f..76abed6c7 100644 --- a/vendor/github.com/containerd/containerd/client.go +++ b/vendor/github.com/containerd/containerd/client.go @@ -43,6 +43,7 @@ import ( "github.com/containerd/containerd/content" contentproxy "github.com/containerd/containerd/content/proxy" "github.com/containerd/containerd/defaults" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/events" "github.com/containerd/containerd/images" "github.com/containerd/containerd/leases" @@ -56,6 +57,7 @@ import ( "github.com/containerd/containerd/snapshots" snproxy "github.com/containerd/containerd/snapshots/proxy" "github.com/containerd/typeurl" + "github.com/gogo/protobuf/types" ptypes "github.com/gogo/protobuf/types" ocispec "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -86,13 +88,17 @@ func New(address string, opts ...ClientOpt) (*Client, error) { if copts.timeout == 0 { copts.timeout = 10 * time.Second } - rt := fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS) - if copts.defaultRuntime != "" { - rt = copts.defaultRuntime - } + c := &Client{ - runtime: rt, + defaultns: copts.defaultns, } + + if copts.defaultRuntime != "" { + c.runtime = copts.defaultRuntime + } else { + c.runtime = defaults.DefaultRuntime + } + if copts.services != nil { c.services = *copts.services } @@ -134,19 +140,15 @@ func New(address string, opts ...ClientOpt) (*Client, error) { c.conn, c.connector = conn, connector } if copts.services == nil && c.conn == nil { - return nil, errors.New("no grpc connection or services is available") + return nil, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection or services is available") } // check namespace labels for default runtime - if copts.defaultRuntime == "" && copts.defaultns != "" { - namespaces := c.NamespaceService() - ctx := context.Background() - if labels, err := namespaces.Labels(ctx, copts.defaultns); err == nil { - if defaultRuntime, ok := labels[defaults.DefaultRuntimeNSLabel]; ok { - c.runtime = defaultRuntime - } - } else { + if copts.defaultRuntime == "" && c.defaultns != "" { + if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil { return nil, err + } else if label != "" { + c.runtime = label } } @@ -163,20 +165,17 @@ func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) { } } c := &Client{ - conn: conn, - runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS), + defaultns: copts.defaultns, + conn: conn, + runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS), } // check namespace labels for default runtime - if copts.defaultRuntime == "" && copts.defaultns != "" { - namespaces := c.NamespaceService() - ctx := context.Background() - if labels, err := namespaces.Labels(ctx, copts.defaultns); err == nil { - if defaultRuntime, ok := labels[defaults.DefaultRuntimeNSLabel]; ok { - c.runtime = defaultRuntime - } - } else { + if copts.defaultRuntime == "" && c.defaultns != "" { + if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil { return nil, err + } else if label != "" { + c.runtime = label } } @@ -193,13 +192,14 @@ type Client struct { connMu sync.Mutex conn *grpc.ClientConn runtime string + defaultns string connector func() (*grpc.ClientConn, error) } // Reconnect re-establishes the GRPC connection to the containerd daemon func (c *Client) Reconnect() error { if c.connector == nil { - return errors.New("unable to reconnect to containerd, no connector available") + return errors.Wrap(errdefs.ErrUnavailable, "unable to reconnect to containerd, no connector available") } c.connMu.Lock() defer c.connMu.Unlock() @@ -222,7 +222,7 @@ func (c *Client) IsServing(ctx context.Context) (bool, error) { c.connMu.Lock() if c.conn == nil { c.connMu.Unlock() - return false, errors.New("no grpc connection available") + return false, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available") } c.connMu.Unlock() r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.WaitForReady(true)) @@ -339,7 +339,6 @@ func defaultRemoteContext() *RemoteContext { Resolver: docker.NewResolver(docker.ResolverOptions{ Client: http.DefaultClient, }), - Snapshotter: DefaultSnapshotter, } } @@ -354,7 +353,7 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag } if fetchCtx.Unpack { - return images.Image{}, errors.New("unpack on fetch not supported, try pull") + return images.Image{}, errors.Wrap(errdefs.ErrNotImplemented, "unpack on fetch not supported, try pull") } if fetchCtx.PlatformMatcher == nil { @@ -407,6 +406,11 @@ func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor, } } + // Annotate ref with digest to push only push tag for single digest + if !strings.Contains(ref, "@") { + ref = ref + "@" + desc.Digest.String() + } + pusher, err := pushCtx.Resolver.Pusher(ctx, ref) if err != nil { return err @@ -490,6 +494,27 @@ func writeIndex(ctx context.Context, index *ocispec.Index, client *Client, ref s return writeContent(ctx, client.ContentStore(), ocispec.MediaTypeImageIndex, ref, bytes.NewReader(data), content.WithLabels(labels)) } +// GetLabel gets a label value from namespace store +// If there is no default label, an empty string returned with nil error +func (c *Client) GetLabel(ctx context.Context, label string) (string, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + if c.defaultns == "" { + return "", err + } + ns = c.defaultns + } + + srv := c.NamespaceService() + labels, err := srv.Labels(ctx, ns) + if err != nil { + return "", err + } + + value := labels[label] + return value, nil +} + // Subscribe to events that match one or more of the provided filters. // // Callers should listen on both the envelope and errs channels. If the errs @@ -543,6 +568,10 @@ func (c *Client) ContentStore() content.Store { // SnapshotService returns the underlying snapshotter for the provided snapshotter name func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter { + snapshotterName, err := c.resolveSnapshotterName(context.Background(), snapshotterName) + if err != nil { + snapshotterName = DefaultSnapshotter + } if c.snapshotters != nil { return c.snapshotters[snapshotterName] } @@ -642,7 +671,7 @@ func (c *Client) Version(ctx context.Context) (Version, error) { c.connMu.Lock() if c.conn == nil { c.connMu.Unlock() - return Version{}, errors.New("no grpc connection available") + return Version{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available") } c.connMu.Unlock() response, err := c.VersionService().Version(ctx, &ptypes.Empty{}) @@ -655,6 +684,58 @@ func (c *Client) Version(ctx context.Context) (Version, error) { }, nil } +type ServerInfo struct { + UUID string +} + +func (c *Client) Server(ctx context.Context) (ServerInfo, error) { + c.connMu.Lock() + if c.conn == nil { + c.connMu.Unlock() + return ServerInfo{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available") + } + c.connMu.Unlock() + + response, err := c.IntrospectionService().Server(ctx, &types.Empty{}) + if err != nil { + return ServerInfo{}, err + } + return ServerInfo{ + UUID: response.UUID, + }, nil +} + +func (c *Client) resolveSnapshotterName(ctx context.Context, name string) (string, error) { + if name == "" { + label, err := c.GetLabel(ctx, defaults.DefaultSnapshotterNSLabel) + if err != nil { + return "", err + } + + if label != "" { + name = label + } else { + name = DefaultSnapshotter + } + } + + return name, nil +} + +func (c *Client) getSnapshotter(ctx context.Context, name string) (snapshots.Snapshotter, error) { + name, err := c.resolveSnapshotterName(ctx, name) + if err != nil { + return nil, err + } + + s := c.SnapshotService(name) + if s == nil { + return nil, errors.Wrapf(errdefs.ErrNotFound, "snapshotter %s was not found", name) + } + + return s, nil +} + // CheckRuntime returns true if the current runtime matches the expected // runtime. Providing various parts of the runtime schema will match those // parts of the expected runtime diff --git a/vendor/github.com/containerd/containerd/cmd/containerd/command/main.go b/vendor/github.com/containerd/containerd/cmd/containerd/command/main.go index f6321e51c..d4e3dfee5 100644 --- a/vendor/github.com/containerd/containerd/cmd/containerd/command/main.go +++ b/vendor/github.com/containerd/containerd/cmd/containerd/command/main.go @@ -27,6 +27,7 @@ import ( "runtime" "time" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/services/server" @@ -152,7 +153,7 @@ func App() *cli.App { ttrpcAddress = fmt.Sprintf("%s.ttrpc", config.GRPC.Address) ) if address == "" { - return errors.New("grpc address cannot be empty") + return errors.Wrap(errdefs.ErrInvalidArgument, "grpc address cannot be empty") } log.G(ctx).WithFields(logrus.Fields{ "version": version.Version, diff --git a/vendor/github.com/containerd/containerd/cmd/containerd/command/main_windows.go b/vendor/github.com/containerd/containerd/cmd/containerd/command/main_windows.go index da126cac5..2cd5a9122 100644 --- a/vendor/github.com/containerd/containerd/cmd/containerd/command/main_windows.go +++ b/vendor/github.com/containerd/containerd/cmd/containerd/command/main_windows.go @@ -93,7 +93,7 @@ func setupDumpStacks() { }() } -func etwCallback(sourceID *guid.GUID, state etw.ProviderState, level etw.Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr) { +func etwCallback(sourceID guid.GUID, state etw.ProviderState, level etw.Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr) { if state == etw.ProviderStateCaptureState { dumpStacks(false) } diff --git a/vendor/github.com/containerd/containerd/cmd/containerd/command/publish.go b/vendor/github.com/containerd/containerd/cmd/containerd/command/publish.go index 453d85ebf..fda424b18 100644 --- a/vendor/github.com/containerd/containerd/cmd/containerd/command/publish.go +++ b/vendor/github.com/containerd/containerd/cmd/containerd/command/publish.go @@ -51,7 +51,7 @@ var publishCommand = cli.Command{ ctx := namespaces.WithNamespace(gocontext.Background(), context.String("namespace")) topic := context.String("topic") if topic == "" { - return errors.New("topic required to publish event") + return errors.Wrap(errdefs.ErrInvalidArgument, "topic required to publish event") } payload, err := getEventPayload(os.Stdin) if err != nil { diff --git a/vendor/github.com/containerd/containerd/cmd/containerd/command/service_windows.go b/vendor/github.com/containerd/containerd/cmd/containerd/command/service_windows.go index 47722178c..7b417ba57 100644 --- a/vendor/github.com/containerd/containerd/cmd/containerd/command/service_windows.go +++ b/vendor/github.com/containerd/containerd/cmd/containerd/command/service_windows.go @@ -18,7 +18,6 @@ package command import ( "bytes" - "errors" "fmt" "io/ioutil" "log" @@ -28,7 +27,9 @@ import ( "time" "unsafe" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/services/server" + "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" "golang.org/x/sys/windows" @@ -44,7 +45,9 @@ var ( unregisterServiceFlag bool runServiceFlag bool - setStdHandle = windows.NewLazySystemDLL("kernel32.dll").NewProc("SetStdHandle") + kernel32 = windows.NewLazySystemDLL("kernel32.dll") + setStdHandle = kernel32.NewProc("SetStdHandle") + allocConsole = kernel32.NewProc("AllocConsole") oldStderr windows.Handle panicFile *os.File @@ -162,7 +165,7 @@ func (h *etwHook) Fire(e *logrus.Entry) error { etype = windows.EVENTLOG_INFORMATION_TYPE eid = eventDebug default: - return errors.New("unknown level") + return errors.Wrap(errdefs.ErrInvalidArgument, "unknown level") } // If there is additional data, include it as a second string. @@ -311,7 +314,7 @@ func registerUnregisterService(root string) (bool, error) { if unregisterServiceFlag { if registerServiceFlag { - return true, errors.New("--register-service and --unregister-service cannot be used together") + return true, errors.Wrap(errdefs.ErrInvalidArgument, "--register-service and --unregister-service cannot be used together") } return true, unregisterService() } @@ -321,6 +324,23 @@ func registerUnregisterService(root string) (bool, error) { } if runServiceFlag { + // Allocate a conhost for containerd here. We don't actually use this + // at all in containerd, but it will be inherited by any processes + // containerd executes, so they won't need to allocate their own + // conhosts. This is important for two reasons: + // - Creating a conhost slows down process launch. + // - We have seen reliability issues when launching many processes. + // Sometimes the process invocation will fail due to an error when + // creating the conhost. + // + // This needs to be done before initializing the panic file, as + // AllocConsole sets the stdio handles to point to the new conhost, + // and we want to make sure stderr goes to the panic file. + r, _, err := allocConsole.Call() + if r == 0 && err != nil { + return true, fmt.Errorf("error allocating conhost: %s", err) + } + if err := initPanicFile(filepath.Join(root, "panic.log")); err != nil { return true, err } @@ -340,7 +360,6 @@ func registerUnregisterService(root string) (bool, error) { logrus.AddHook(&etwHook{log}) logrus.SetOutput(ioutil.Discard) - } return false, nil } diff --git a/vendor/github.com/containerd/containerd/container.go b/vendor/github.com/containerd/containerd/container.go index 9bc43604e..46d51ecd9 100644 --- a/vendor/github.com/containerd/containerd/container.go +++ b/vendor/github.com/containerd/containerd/container.go @@ -233,7 +233,11 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N } // get the rootfs from the snapshotter and add it to the request - mounts, err := c.client.SnapshotService(r.Snapshotter).Mounts(ctx, r.SnapshotKey) + s, err := c.client.getSnapshotter(ctx, r.Snapshotter) + if err != nil { + return nil, err + } + mounts, err := s.Mounts(ctx, r.SnapshotKey) if err != nil { return nil, err } diff --git a/vendor/github.com/containerd/containerd/container_opts.go b/vendor/github.com/containerd/containerd/container_opts.go index a65f429ae..e36b47e2e 100644 --- a/vendor/github.com/containerd/containerd/container_opts.go +++ b/vendor/github.com/containerd/containerd/container_opts.go @@ -20,9 +20,7 @@ import ( "context" "github.com/containerd/containerd/containers" - "github.com/containerd/containerd/defaults" "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/oci" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/snapshots" @@ -118,9 +116,17 @@ func WithSnapshotter(name string) NewContainerOpts { // WithSnapshot uses an existing root filesystem for the container func WithSnapshot(id string) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { - setSnapshotterIfEmpty(ctx, client, c) // check that the snapshot exists, if not, fail on creation - if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil { + var err error + c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter) + if err != nil { + return err + } + s, err := client.getSnapshotter(ctx, c.Snapshotter) + if err != nil { + return err + } + if _, err := s.Mounts(ctx, id); err != nil { return err } c.SnapshotKey = id @@ -136,9 +142,17 @@ func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts if err != nil { return err } - setSnapshotterIfEmpty(ctx, client, c) + parent := identity.ChainID(diffIDs).String() - if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, parent, opts...); err != nil { + c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter) + if err != nil { + return err + } + s, err := client.getSnapshotter(ctx, c.Snapshotter) + if err != nil { + return err + } + if _, err := s.Prepare(ctx, id, parent, opts...); err != nil { return err } c.SnapshotKey = id @@ -153,7 +167,13 @@ func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Conta if c.Snapshotter == "" { return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set to cleanup rootfs snapshot") } - return client.SnapshotService(c.Snapshotter).Remove(ctx, c.SnapshotKey) + s, err := client.getSnapshotter(ctx, c.Snapshotter) + if err != nil { + return err + } + if err := s.Remove(ctx, c.SnapshotKey); err != nil && !errdefs.IsNotFound(err) { + return err + } } return nil } @@ -166,9 +186,17 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer if err != nil { return err } - setSnapshotterIfEmpty(ctx, client, c) + parent := identity.ChainID(diffIDs).String() - if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, parent, opts...); err != nil { + c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter) + if err != nil { + return err + } + s, err := client.getSnapshotter(ctx, c.Snapshotter) + if err != nil { + return err + } + if _, err := s.View(ctx, id, parent, opts...); err != nil { return err } c.SnapshotKey = id @@ -177,21 +205,6 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer } } -func setSnapshotterIfEmpty(ctx context.Context, client *Client, c *containers.Container) { - if c.Snapshotter == "" { - defaultSnapshotter := DefaultSnapshotter - namespaceService := client.NamespaceService() - if ns, err := namespaces.NamespaceRequired(ctx); err == nil { - if labels, err := namespaceService.Labels(ctx, ns); err == nil { - if snapshotLabel, ok := labels[defaults.DefaultSnapshotterNSLabel]; ok { - defaultSnapshotter = snapshotLabel - } - } - } - c.Snapshotter = defaultSnapshotter - } -} - // WithContainerExtension appends extension data to the container object. // Use this to decorate the container object with additional data for the client // integration. diff --git a/vendor/github.com/containerd/containerd/container_opts_unix.go b/vendor/github.com/containerd/containerd/container_opts_unix.go index 340a91857..af52d0422 100644 --- a/vendor/github.com/containerd/containerd/container_opts_unix.go +++ b/vendor/github.com/containerd/containerd/container_opts_unix.go @@ -50,13 +50,18 @@ func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool return err } - setSnapshotterIfEmpty(ctx, client, c) - var ( - snapshotter = client.SnapshotService(c.Snapshotter) - parent = identity.ChainID(diffIDs).String() - usernsID = fmt.Sprintf("%s-%d-%d", parent, uid, gid) + parent = identity.ChainID(diffIDs).String() + usernsID = fmt.Sprintf("%s-%d-%d", parent, uid, gid) ) + c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter) + if err != nil { + return err + } + snapshotter, err := client.getSnapshotter(ctx, c.Snapshotter) + if err != nil { + return err + } if _, err := snapshotter.Stat(ctx, usernsID); err == nil { if _, err := snapshotter.Prepare(ctx, id, usernsID); err == nil { c.SnapshotKey = id diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go index 6d5c76241..c1c204618 100644 --- a/vendor/github.com/containerd/containerd/content/helpers.go +++ b/vendor/github.com/containerd/containerd/content/helpers.go @@ -169,6 +169,28 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error { return err } +// CopyReader copies to a writer from a given reader, returning +// the number of bytes copied. +// Note: if the writer has a non-zero offset, the total number +// of bytes read may be greater than those copied if the reader +// is not an io.Seeker. +// This copy does not commit the writer. +func CopyReader(cw Writer, r io.Reader) (int64, error) { + ws, err := cw.Status() + if err != nil { + return 0, errors.Wrap(err, "failed to get status") + } + + if ws.Offset > 0 { + r, err = seekReader(r, ws.Offset, 0) + if err != nil { + return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref) + } + } + + return copyWithBuffer(cw, r) +} + // seekReader attempts to seek the reader to the given offset, either by // resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding // up to the given offset. diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go index 30ed42235..319e8777b 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go @@ -32,4 +32,6 @@ const ( // DefaultFIFODir is the default location used by client-side cio library // to store FIFOs. DefaultFIFODir = "/run/containerd/fifo" + // DefaultRuntime is the default linux runtime + DefaultRuntime = "io.containerd.runc.v2" ) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go index 16f1048ca..5eede8de8 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go @@ -40,4 +40,6 @@ const ( // DefaultFIFODir is the default location used by client-side cio library // to store FIFOs. Unused on Windows. DefaultFIFODir = "" + // DefaultRuntime is the default windows runtime + DefaultRuntime = "io.containerd.runhcs.v1" ) diff --git a/vendor/github.com/containerd/containerd/diff.go b/vendor/github.com/containerd/containerd/diff.go index 182362941..445df0192 100644 --- a/vendor/github.com/containerd/containerd/diff.go +++ b/vendor/github.com/containerd/containerd/diff.go @@ -48,13 +48,14 @@ type diffRemote struct { func (r *diffRemote) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (ocispec.Descriptor, error) { var config diff.ApplyConfig for _, opt := range opts { - if err := opt(&config); err != nil { + if err := opt(ctx, desc, &config); err != nil { return ocispec.Descriptor{}, err } } req := &diffapi.ApplyRequest{ - Diff: fromDescriptor(desc), - Mounts: fromMounts(mounts), + Diff: fromDescriptor(desc), + Mounts: fromMounts(mounts), + Payloads: config.ProcessorPayloads, } resp, err := r.client.Apply(ctx, req) if err != nil { diff --git a/vendor/github.com/containerd/containerd/diff/apply/apply.go b/vendor/github.com/containerd/containerd/diff/apply/apply.go index e4251026c..7a6b65c3e 100644 --- a/vendor/github.com/containerd/containerd/diff/apply/apply.go +++ b/vendor/github.com/containerd/containerd/diff/apply/apply.go @@ -23,11 +23,8 @@ import ( "time" "github.com/containerd/containerd/archive" - "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" "github.com/containerd/containerd/diff" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" digest "github.com/opencontainers/go-digest" @@ -66,54 +63,63 @@ func (s *fsApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts [ } }() - isCompressed, err := images.IsCompressedDiff(ctx, desc.MediaType) - if err != nil { - return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", desc.MediaType) + var config diff.ApplyConfig + for _, o := range opts { + if err := o(ctx, desc, &config); err != nil { + return emptyDesc, errors.Wrap(err, "failed to apply config opt") + } } - var ocidesc ocispec.Descriptor + ra, err := s.store.ReaderAt(ctx, desc) + if err != nil { + return emptyDesc, errors.Wrap(err, "failed to get reader from content store") + } + defer ra.Close() + + var processors []diff.StreamProcessor + processor := diff.NewProcessorChain(desc.MediaType, content.NewReader(ra)) + processors = append(processors, processor) + for { + if processor, err = diff.GetProcessor(ctx, processor, config.ProcessorPayloads); err != nil { + return emptyDesc, errors.Wrapf(err, "failed to get stream processor for %s", desc.MediaType) + } + processors = append(processors, processor) + if processor.MediaType() == ocispec.MediaTypeImageLayer { + break + } + } + defer processor.Close() + + digester := digest.Canonical.Digester() + rc := &readCounter{ + r: io.TeeReader(processor, digester.Hash()), + } if err := mount.WithTempMount(ctx, mounts, func(root string) error { - ra, err := s.store.ReaderAt(ctx, desc) - if err != nil { - return errors.Wrap(err, "failed to get reader from content store") - } - defer ra.Close() - - r := content.NewReader(ra) - if isCompressed { - ds, err := compression.DecompressStream(r) - if err != nil { - return err - } - defer ds.Close() - r = ds - } - - digester := digest.Canonical.Digester() - rc := &readCounter{ - r: io.TeeReader(r, digester.Hash()), - } - if _, err := archive.Apply(ctx, root, rc); err != nil { return err } // Read any trailing data - if _, err := io.Copy(ioutil.Discard, rc); err != nil { - return err - } - - ocidesc = ocispec.Descriptor{ - MediaType: ocispec.MediaTypeImageLayer, - Size: rc.c, - Digest: digester.Digest(), - } - return nil - + _, err := io.Copy(ioutil.Discard, rc) + return err }); err != nil { return emptyDesc, err } - return ocidesc, nil + + for _, p := range processors { + if ep, ok := p.(interface { + Err() error + }); ok { + if err := ep.Err(); err != nil { + return emptyDesc, err + } + } + } + return ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayer, + Size: rc.c, + Digest: digester.Digest(), + }, nil } type readCounter struct { diff --git a/vendor/github.com/containerd/containerd/diff/diff.go b/vendor/github.com/containerd/containerd/diff/diff.go index a62a4671a..17aab616e 100644 --- a/vendor/github.com/containerd/containerd/diff/diff.go +++ b/vendor/github.com/containerd/containerd/diff/diff.go @@ -20,6 +20,7 @@ import ( "context" "github.com/containerd/containerd/mount" + "github.com/gogo/protobuf/types" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -53,10 +54,12 @@ type Comparer interface { // ApplyConfig is used to hold parameters needed for a apply operation type ApplyConfig struct { + // ProcessorPayloads specifies the payload sent to various processors + ProcessorPayloads map[string]*types.Any } // ApplyOpt is used to configure an Apply operation -type ApplyOpt func(*ApplyConfig) error +type ApplyOpt func(context.Context, ocispec.Descriptor, *ApplyConfig) error // Applier allows applying diffs between mounts type Applier interface { @@ -94,3 +97,11 @@ func WithLabels(labels map[string]string) Opt { return nil } } + +// WithPayloads sets the apply processor payloads to the config +func WithPayloads(payloads map[string]*types.Any) ApplyOpt { + return func(_ context.Context, _ ocispec.Descriptor, c *ApplyConfig) error { + c.ProcessorPayloads = payloads + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/diff/stream.go b/vendor/github.com/containerd/containerd/diff/stream.go new file mode 100644 index 000000000..4b8f27f14 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/stream.go @@ -0,0 +1,187 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "context" + "io" + "os" + + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/images" + "github.com/gogo/protobuf/types" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + handlers []Handler + + // ErrNoProcessor is returned when no stream processor is available for a media-type + ErrNoProcessor = errors.New("no processor for media-type") +) + +func init() { + // register the default compression handler + RegisterProcessor(compressedHandler) +} + +// RegisterProcessor registers a stream processor for media-types +func RegisterProcessor(handler Handler) { + handlers = append(handlers, handler) +} + +// GetProcessor returns the processor for a media-type +func GetProcessor(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + // reverse this list so that user configured handlers come up first + for i := len(handlers) - 1; i >= 0; i-- { + processor, ok := handlers[i](ctx, stream.MediaType()) + if ok { + return processor(ctx, stream, payloads) + } + } + return nil, ErrNoProcessor +} + +// Handler checks a media-type and initializes the processor +type Handler func(ctx context.Context, mediaType string) (StreamProcessorInit, bool) + +// StaticHandler returns the processor init func for a static media-type +func StaticHandler(expectedMediaType string, fn StreamProcessorInit) Handler { + return func(ctx context.Context, mediaType string) (StreamProcessorInit, bool) { + if mediaType == expectedMediaType { + return fn, true + } + return nil, false + } +} + +// StreamProcessorInit returns the initialized stream processor +type StreamProcessorInit func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) + +// RawProcessor provides access to direct fd for processing +type RawProcessor interface { + // File returns the fd for the read stream of the underlying processor + File() *os.File +} + +// StreamProcessor handles processing a content stream and transforming it into a different media-type +type StreamProcessor interface { + io.ReadCloser + + // MediaType is the resulting media-type that the processor processes the stream into + MediaType() string +} + +func compressedHandler(ctx context.Context, mediaType string) (StreamProcessorInit, bool) { + compressed, err := images.IsCompressedDiff(ctx, mediaType) + if err != nil { + return nil, false + } + if compressed { + return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + ds, err := compression.DecompressStream(stream) + if err != nil { + return nil, err + } + + return &compressedProcessor{ + rc: ds, + }, nil + }, true + } + return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + return &stdProcessor{ + rc: stream, + }, nil + }, true +} + +// NewProcessorChain initialized the root StreamProcessor +func NewProcessorChain(mt string, r io.Reader) StreamProcessor { + return &processorChain{ + mt: mt, + rc: r, + } +} + +type processorChain struct { + mt string + rc io.Reader +} + +func (c *processorChain) MediaType() string { + return c.mt +} + +func (c *processorChain) Read(p []byte) (int, error) { + return c.rc.Read(p) +} + +func (c *processorChain) Close() error { + return nil +} + +type stdProcessor struct { + rc StreamProcessor +} + +func (c *stdProcessor) MediaType() string { + return ocispec.MediaTypeImageLayer +} + +func (c *stdProcessor) Read(p []byte) (int, error) { + return c.rc.Read(p) +} + +func (c *stdProcessor) Close() error { + return nil +} + +type compressedProcessor struct { + rc io.ReadCloser +} + +func (c *compressedProcessor) MediaType() string { + return ocispec.MediaTypeImageLayer +} + +func (c *compressedProcessor) Read(p []byte) (int, error) { + return c.rc.Read(p) +} + +func (c *compressedProcessor) Close() error { + return c.rc.Close() +} + +func BinaryHandler(id, returnsMediaType string, mediaTypes []string, path string, args []string) Handler { + set := make(map[string]struct{}, len(mediaTypes)) + for _, m := range mediaTypes { + set[m] = struct{}{} + } + return func(_ context.Context, mediaType string) (StreamProcessorInit, bool) { + if _, ok := set[mediaType]; ok { + return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + payload := payloads[id] + return NewBinaryProcessor(ctx, mediaType, returnsMediaType, stream, path, args, payload) + }, true + } + return nil, false + } +} + +const mediaTypeEnvVar = "STREAM_PROCESSOR_MEDIATYPE" diff --git a/vendor/github.com/containerd/containerd/diff/stream_unix.go b/vendor/github.com/containerd/containerd/diff/stream_unix.go new file mode 100644 index 000000000..28f38d998 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/stream_unix.go @@ -0,0 +1,146 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "sync" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" +) + +// NewBinaryProcessor returns a binary processor for use with processing content streams +func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) { + cmd := exec.CommandContext(ctx, name, args...) + cmd.Env = os.Environ() + + var payloadC io.Closer + if payload != nil { + data, err := proto.Marshal(payload) + if err != nil { + return nil, err + } + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + go func() { + io.Copy(w, bytes.NewReader(data)) + w.Close() + }() + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + payloadC = r + } + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt)) + var ( + stdin io.Reader + closer func() error + err error + ) + if f, ok := stream.(RawProcessor); ok { + stdin = f.File() + closer = f.File().Close + } else { + stdin = stream + } + cmd.Stdin = stdin + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + cmd.Stdout = w + + stderr := bytes.NewBuffer(nil) + cmd.Stderr = stderr + + if err := cmd.Start(); err != nil { + return nil, err + } + p := &binaryProcessor{ + cmd: cmd, + r: r, + mt: rmt, + stderr: stderr, + } + go p.wait() + + // close after start and dup + w.Close() + if closer != nil { + closer() + } + if payloadC != nil { + payloadC.Close() + } + return p, nil +} + +type binaryProcessor struct { + cmd *exec.Cmd + r *os.File + mt string + stderr *bytes.Buffer + + mu sync.Mutex + err error +} + +func (c *binaryProcessor) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *binaryProcessor) wait() { + if err := c.cmd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); ok { + c.mu.Lock() + c.err = errors.New(c.stderr.String()) + c.mu.Unlock() + } + } +} + +func (c *binaryProcessor) File() *os.File { + return c.r +} + +func (c *binaryProcessor) MediaType() string { + return c.mt +} + +func (c *binaryProcessor) Read(p []byte) (int, error) { + return c.r.Read(p) +} + +func (c *binaryProcessor) Close() error { + err := c.r.Close() + if kerr := c.cmd.Process.Kill(); err == nil { + err = kerr + } + return err +} diff --git a/vendor/github.com/containerd/containerd/diff/stream_windows.go b/vendor/github.com/containerd/containerd/diff/stream_windows.go new file mode 100644 index 000000000..8dadd72c9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/stream_windows.go @@ -0,0 +1,165 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sync" + + winio "github.com/Microsoft/go-winio" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const processorPipe = "STREAM_PROCESSOR_PIPE" + +// NewBinaryProcessor returns a binary processor for use with processing content streams +func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) { + cmd := exec.CommandContext(ctx, name, args...) + cmd.Env = os.Environ() + + if payload != nil { + data, err := proto.Marshal(payload) + if err != nil { + return nil, err + } + up, err := getUiqPath() + if err != nil { + return nil, err + } + path := fmt.Sprintf("\\\\.\\pipe\\containerd-processor-%s-pipe", up) + l, err := winio.ListenPipe(path, nil) + if err != nil { + return nil, err + } + go func() { + defer l.Close() + conn, err := l.Accept() + if err != nil { + logrus.WithError(err).Error("accept npipe connection") + return + } + io.Copy(conn, bytes.NewReader(data)) + conn.Close() + }() + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", processorPipe, path)) + } + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt)) + var ( + stdin io.Reader + closer func() error + err error + ) + if f, ok := stream.(RawProcessor); ok { + stdin = f.File() + closer = f.File().Close + } else { + stdin = stream + } + cmd.Stdin = stdin + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + cmd.Stdout = w + stderr := bytes.NewBuffer(nil) + cmd.Stderr = stderr + + if err := cmd.Start(); err != nil { + return nil, err + } + p := &binaryProcessor{ + cmd: cmd, + r: r, + mt: rmt, + stderr: stderr, + } + go p.wait() + + // close after start and dup + w.Close() + if closer != nil { + closer() + } + return p, nil +} + +type binaryProcessor struct { + cmd *exec.Cmd + r *os.File + mt string + stderr *bytes.Buffer + + mu sync.Mutex + err error +} + +func (c *binaryProcessor) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *binaryProcessor) wait() { + if err := c.cmd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); ok { + c.mu.Lock() + c.err = errors.New(c.stderr.String()) + c.mu.Unlock() + } + } +} + +func (c *binaryProcessor) File() *os.File { + return c.r +} + +func (c *binaryProcessor) MediaType() string { + return c.mt +} + +func (c *binaryProcessor) Read(p []byte) (int, error) { + return c.r.Read(p) +} + +func (c *binaryProcessor) Close() error { + err := c.r.Close() + if kerr := c.cmd.Process.Kill(); err == nil { + err = kerr + } + return err +} + +func getUiqPath() (string, error) { + dir, err := ioutil.TempDir("", "") + if err != nil { + return "", err + } + os.Remove(dir) + return filepath.Base(dir), nil +} diff --git a/vendor/github.com/containerd/containerd/diff/walking/differ.go b/vendor/github.com/containerd/containerd/diff/walking/differ.go index 1c82860d7..5ce35910c 100644 --- a/vendor/github.com/containerd/containerd/diff/walking/differ.go +++ b/vendor/github.com/containerd/containerd/diff/walking/differ.go @@ -150,7 +150,9 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o if err != nil { return errors.Wrap(err, "failed to get info from content store") } - + if info.Labels == nil { + info.Labels = make(map[string]string) + } // Set uncompressed label if digest already existed without label if _, ok := info.Labels[uncompressed]; !ok { info.Labels[uncompressed] = config.Labels[uncompressed] diff --git a/vendor/github.com/containerd/containerd/gc/gc.go b/vendor/github.com/containerd/containerd/gc/gc.go index 35a1712cb..c6fcf7910 100644 --- a/vendor/github.com/containerd/containerd/gc/gc.go +++ b/vendor/github.com/containerd/containerd/gc/gc.go @@ -30,6 +30,11 @@ import ( // ResourceType represents type of resource at a node type ResourceType uint8 +// ResourceMax represents the max resource. +// Upper bits are stripped out during the mark phase, allowing the upper 3 bits +// to be used by the caller reference function. +const ResourceMax = ResourceType(0x1F) + // Node presents a resource which has a type and key, // this node can be used to lookup other nodes. type Node struct { @@ -80,6 +85,8 @@ func Tricolor(roots []Node, refs func(ref Node) ([]Node, error)) (map[Node]struc } } + // strip bits above max resource type + id.Type = id.Type & ResourceMax // mark as black when done reachable[id] = struct{}{} } diff --git a/vendor/github.com/containerd/containerd/image.go b/vendor/github.com/containerd/containerd/image.go index 14bfea91b..9cfc03a30 100644 --- a/vendor/github.com/containerd/containerd/image.go +++ b/vendor/github.com/containerd/containerd/image.go @@ -21,11 +21,13 @@ import ( "fmt" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/rootfs" - digest "github.com/opencontainers/go-digest" + "github.com/containerd/containerd/snapshots" + "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -40,7 +42,7 @@ type Image interface { // Labels of the image Labels() map[string]string // Unpack unpacks the image's content into a snapshot - Unpack(context.Context, string) error + Unpack(context.Context, string, ...UnpackOpt) error // RootFS returns the unpacked diffids that make up images rootfs. RootFS(ctx context.Context) ([]digest.Digest, error) // Size returns the total size of the image's packed resources. @@ -108,7 +110,10 @@ func (i *image) Config(ctx context.Context) (ocispec.Descriptor, error) { } func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, error) { - sn := i.client.SnapshotService(snapshotterName) + sn, err := i.client.getSnapshotter(ctx, snapshotterName) + if err != nil { + return false, err + } cs := i.client.ContentStore() diffs, err := i.i.RootFS(ctx, cs, i.platform) @@ -127,28 +132,53 @@ func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, e return false, nil } -func (i *image) Unpack(ctx context.Context, snapshotterName string) error { +// UnpackConfig provides configuration for the unpack of an image +type UnpackConfig struct { + // ApplyOpts for applying a diff to a snapshotter + ApplyOpts []diff.ApplyOpt + // SnapshotOpts for configuring a snapshotter + SnapshotOpts []snapshots.Opt +} + +// UnpackOpt provides configuration for unpack +type UnpackOpt func(context.Context, *UnpackConfig) error + +func (i *image) Unpack(ctx context.Context, snapshotterName string, opts ...UnpackOpt) error { ctx, done, err := i.client.WithLease(ctx) if err != nil { return err } defer done(ctx) + var config UnpackConfig + for _, o := range opts { + if err := o(ctx, &config); err != nil { + return err + } + } + layers, err := i.getLayers(ctx, i.platform) if err != nil { return err } var ( - sn = i.client.SnapshotService(snapshotterName) a = i.client.DiffService() cs = i.client.ContentStore() chain []digest.Digest unpacked bool ) + snapshotterName, err = i.client.resolveSnapshotterName(ctx, snapshotterName) + if err != nil { + return err + } + sn, err := i.client.getSnapshotter(ctx, snapshotterName) + if err != nil { + return err + } for _, layer := range layers { - unpacked, err = rootfs.ApplyLayer(ctx, layer, chain, sn, a) + unpacked, err = rootfs.ApplyLayerWithOpts(ctx, layer, chain, sn, a, config.SnapshotOpts, config.ApplyOpts) if err != nil { return err } diff --git a/vendor/github.com/containerd/containerd/images/archive/exporter.go b/vendor/github.com/containerd/containerd/images/archive/exporter.go index 5440c0d1b..244ef3224 100644 --- a/vendor/github.com/containerd/containerd/images/archive/exporter.go +++ b/vendor/github.com/containerd/containerd/images/archive/exporter.go @@ -89,31 +89,29 @@ func WithImage(is images.Store, name string) ExportOpt { } // WithManifest adds a manifest to the exported archive. -// It is up to caller to put name annotation to on the manifest -// descriptor if needed. -func WithManifest(manifest ocispec.Descriptor) ExportOpt { +// When names are given they will be set on the manifest in the +// exported archive, creating an index record for each name. +// When no names are provided, it is up to caller to put name annotation to +// on the manifest descriptor if needed. +func WithManifest(manifest ocispec.Descriptor, names ...string) ExportOpt { return func(ctx context.Context, o *exportOptions) error { - o.manifests = append(o.manifests, manifest) - return nil - } -} - -// WithNamedManifest adds a manifest to the exported archive -// with the provided names. -func WithNamedManifest(manifest ocispec.Descriptor, names ...string) ExportOpt { - return func(ctx context.Context, o *exportOptions) error { - for _, name := range names { - manifest.Annotations = addNameAnnotation(name, manifest.Annotations) + if len(names) == 0 { o.manifests = append(o.manifests, manifest) } + for _, name := range names { + mc := manifest + mc.Annotations = addNameAnnotation(name, manifest.Annotations) + o.manifests = append(o.manifests, mc) + } return nil } } -func addNameAnnotation(name string, annotations map[string]string) map[string]string { - if annotations == nil { - annotations = map[string]string{} +func addNameAnnotation(name string, base map[string]string) map[string]string { + annotations := map[string]string{} + for k, v := range base { + annotations[k] = v } annotations[images.AnnotationImageName] = name diff --git a/vendor/github.com/containerd/containerd/images/archive/importer.go b/vendor/github.com/containerd/containerd/images/archive/importer.go index 0e6415911..539900685 100644 --- a/vendor/github.com/containerd/containerd/images/archive/importer.go +++ b/vendor/github.com/containerd/containerd/images/archive/importer.go @@ -22,12 +22,14 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "io/ioutil" "path" "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" digest "github.com/opencontainers/go-digest" @@ -36,6 +38,22 @@ import ( "github.com/pkg/errors" ) +type importOpts struct { + compress bool +} + +// ImportOpt is an option for importing an OCI index +type ImportOpt func(*importOpts) error + +// WithImportCompression compresses uncompressed layers on import. +// This is used for import formats which do not include the manifest. +func WithImportCompression() ImportOpt { + return func(io *importOpts) error { + io.compress = true + return nil + } +} + // ImportIndex imports an index from a tar archive image bundle // - implements Docker v1.1, v1.2 and OCI v1. // - prefers OCI v1 when provided @@ -43,8 +61,7 @@ import ( // - normalizes Docker references and adds as OCI ref name // e.g. alpine:latest -> docker.io/library/alpine:latest // - existing OCI reference names are untouched -// - TODO: support option to compress layers on ingest -func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) { +func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opts ...ImportOpt) (ocispec.Descriptor, error) { var ( tr = tar.NewReader(reader) @@ -56,7 +73,15 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc } symlinks = make(map[string]string) blobs = make(map[string]ocispec.Descriptor) + iopts importOpts ) + + for _, o := range opts { + if err := o(&iopts); err != nil { + return ocispec.Descriptor{}, err + } + } + for { hdr, err := tr.Next() if err == io.EOF { @@ -137,19 +162,23 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc if !ok { return ocispec.Descriptor{}, errors.Errorf("image config %q not found", mfst.Config) } - config.MediaType = ocispec.MediaTypeImageConfig + config.MediaType = images.MediaTypeDockerSchema2Config - layers, err := resolveLayers(ctx, store, mfst.Layers, blobs) + layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress) if err != nil { return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers") } - manifest := ocispec.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: config, - Layers: layers, + manifest := struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Config ocispec.Descriptor `json:"config"` + Layers []ocispec.Descriptor `json:"layers"` + }{ + SchemaVersion: 2, + MediaType: images.MediaTypeDockerSchema2Manifest, + Config: config, + Layers: layers, } desc, err := writeManifest(ctx, store, manifest, ocispec.MediaTypeImageManifest) @@ -211,36 +240,118 @@ func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size return dgstr.Digest(), nil } -func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor) ([]ocispec.Descriptor, error) { - var layers []ocispec.Descriptor - for _, f := range layerFiles { +func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor, compress bool) ([]ocispec.Descriptor, error) { + layers := make([]ocispec.Descriptor, len(layerFiles)) + descs := map[digest.Digest]*ocispec.Descriptor{} + filters := []string{} + for i, f := range layerFiles { desc, ok := blobs[f] if !ok { return nil, errors.Errorf("layer %q not found", f) } + layers[i] = desc + descs[desc.Digest] = &layers[i] + filters = append(filters, "labels.\"containerd.io/uncompressed\"=="+desc.Digest.String()) + } + err := store.Walk(ctx, func(info content.Info) error { + dgst, ok := info.Labels["containerd.io/uncompressed"] + if ok { + desc := descs[digest.Digest(dgst)] + if desc != nil { + desc.MediaType = images.MediaTypeDockerSchema2LayerGzip + desc.Digest = info.Digest + desc.Size = info.Size + } + } + return nil + }, filters...) + if err != nil { + return nil, errors.Wrap(err, "failure checking for compressed blobs") + } + + for i, desc := range layers { + if desc.MediaType != "" { + continue + } // Open blob, resolve media type ra, err := store.ReaderAt(ctx, desc) if err != nil { - return nil, errors.Wrapf(err, "failed to open %q (%s)", f, desc.Digest) + return nil, errors.Wrapf(err, "failed to open %q (%s)", layerFiles[i], desc.Digest) } s, err := compression.DecompressStream(content.NewReader(ra)) if err != nil { - return nil, errors.Wrapf(err, "failed to detect compression for %q", f) + return nil, errors.Wrapf(err, "failed to detect compression for %q", layerFiles[i]) } if s.GetCompression() == compression.Uncompressed { - // TODO: Support compressing and writing back to content store - desc.MediaType = ocispec.MediaTypeImageLayer + if compress { + ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + labels := map[string]string{ + "containerd.io/uncompressed": desc.Digest.String(), + } + layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels)) + if err != nil { + s.Close() + return nil, err + } + layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip + } else { + layers[i].MediaType = images.MediaTypeDockerSchema2Layer + } } else { - desc.MediaType = ocispec.MediaTypeImageLayerGzip + layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip } s.Close() - layers = append(layers, desc) } return layers, nil } +func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string, opts ...content.Opt) (desc ocispec.Descriptor, err error) { + w, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to open writer") + } + + defer func() { + w.Close() + if err != nil { + cs.Abort(ctx, ref) + } + }() + if err := w.Truncate(0); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to truncate writer") + } + + cw, err := compression.CompressStream(w, compression.Gzip) + if err != nil { + return ocispec.Descriptor{}, err + } + + if _, err := io.Copy(cw, r); err != nil { + return ocispec.Descriptor{}, err + } + if err := cw.Close(); err != nil { + return ocispec.Descriptor{}, err + } + + cst, err := w.Status() + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to get writer status") + } + + desc.Digest = w.Digest() + desc.Size = cst.Offset + + if err := w.Commit(ctx, desc.Size, desc.Digest, opts...); err != nil { + if !errdefs.IsAlreadyExists(err) { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to commit") + } + } + + return desc, nil +} + func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{}, mediaType string) (ocispec.Descriptor, error) { manifestBytes, err := json.Marshal(manifest) if err != nil { diff --git a/vendor/github.com/containerd/containerd/images/archive/reference.go b/vendor/github.com/containerd/containerd/images/archive/reference.go index 19f419eaa..cd63517e5 100644 --- a/vendor/github.com/containerd/containerd/images/archive/reference.go +++ b/vendor/github.com/containerd/containerd/images/archive/reference.go @@ -91,7 +91,7 @@ func familiarizeReference(ref string) (string, error) { func ociReferenceName(name string) string { // OCI defines the reference name as only a tag excluding the // repository. The containerd annotation contains the full image name - // since the tag is insufficent for correctly naming and referring to an + // since the tag is insufficient for correctly naming and referring to an // image var ociRef string if spec, err := reference.Parse(name); err == nil { diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go index f72684d82..81c92ee4f 100644 --- a/vendor/github.com/containerd/containerd/images/image.go +++ b/vendor/github.com/containerd/containerd/images/image.go @@ -142,6 +142,7 @@ type platformManifest struct { // this direction because this abstraction is not needed.` func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) { var ( + limit = 1 m []platformManifest wasIndex bool ) @@ -210,10 +211,22 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc } } + sort.SliceStable(descs, func(i, j int) bool { + if descs[i].Platform == nil { + return false + } + if descs[j].Platform == nil { + return true + } + return platform.Less(*descs[i].Platform, *descs[j].Platform) + }) + wasIndex = true + if len(descs) > limit { + return descs[:limit], nil + } return descs, nil - } return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest) }), image); err != nil { @@ -227,17 +240,6 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc } return ocispec.Manifest{}, err } - - sort.SliceStable(m, func(i, j int) bool { - if m[i].p == nil { - return false - } - if m[j].p == nil { - return true - } - return platform.Less(*m[i].p, *m[j].p) - }) - return *m[0].m, nil } @@ -357,6 +359,7 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr descs = append(descs, index.Manifests...) case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, + MediaTypeDockerSchema2LayerEnc, MediaTypeDockerSchema2LayerGzipEnc, MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip, MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip, @@ -406,3 +409,53 @@ func IsCompressedDiff(ctx context.Context, mediaType string) (bool, error) { } return false, nil } + +// GetImageLayerDescriptors gets the image layer Descriptors of an image; the array contains +// a list of Descriptors belonging to one platform followed by lists of other platforms +func GetImageLayerDescriptors(ctx context.Context, cs content.Store, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + var lis []ocispec.Descriptor + + ds := platforms.DefaultSpec() + platform := &ds + + switch desc.MediaType { + case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex, + MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + children, err := Children(ctx, cs, desc) + if err != nil { + if errdefs.IsNotFound(err) { + return []ocispec.Descriptor{}, nil + } + return []ocispec.Descriptor{}, err + } + + if desc.Platform != nil { + platform = desc.Platform + } + + for _, child := range children { + var tmp []ocispec.Descriptor + + switch child.MediaType { + case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2Layer, + ocispec.MediaTypeImageLayerGzip, ocispec.MediaTypeImageLayer, + MediaTypeDockerSchema2LayerGzipEnc, MediaTypeDockerSchema2LayerEnc: + tdesc := child + tdesc.Platform = platform + tmp = append(tmp, tdesc) + default: + tmp, err = GetImageLayerDescriptors(ctx, cs, child) + } + + if err != nil { + return []ocispec.Descriptor{}, err + } + + lis = append(lis, tmp...) + } + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + default: + return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "GetImageLayerInfo: unhandled media type %s", desc.MediaType) + } + return lis, nil +} diff --git a/vendor/github.com/containerd/containerd/images/mediatypes.go b/vendor/github.com/containerd/containerd/images/mediatypes.go index 186a3b673..5fee7467a 100644 --- a/vendor/github.com/containerd/containerd/images/mediatypes.go +++ b/vendor/github.com/containerd/containerd/images/mediatypes.go @@ -22,8 +22,10 @@ package images // here for clarity. const ( MediaTypeDockerSchema2Layer = "application/vnd.docker.image.rootfs.diff.tar" + MediaTypeDockerSchema2LayerEnc = "application/vnd.docker.image.rootfs.diff.tar+enc" MediaTypeDockerSchema2LayerForeign = "application/vnd.docker.image.rootfs.foreign.diff.tar" MediaTypeDockerSchema2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip" + MediaTypeDockerSchema2LayerGzipEnc = "application/vnd.docker.image.rootfs.diff.tar.gzip+enc" MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json" MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json" diff --git a/vendor/github.com/containerd/containerd/import.go b/vendor/github.com/containerd/containerd/import.go index 0e1531ab4..8dc61218c 100644 --- a/vendor/github.com/containerd/containerd/import.go +++ b/vendor/github.com/containerd/containerd/import.go @@ -35,6 +35,7 @@ type importOpts struct { imageRefT func(string) string dgstRefT func(digest.Digest) string allPlatforms bool + compress bool } // ImportOpt allows the caller to specify import specific options @@ -74,6 +75,15 @@ func WithAllPlatforms(allPlatforms bool) ImportOpt { } } +// WithImportCompression compresses uncompressed layers on import. +// This is used for import formats which do not include the manifest. +func WithImportCompression() ImportOpt { + return func(c *importOpts) error { + c.compress = true + return nil + } +} + // Import imports an image from a Tar stream using reader. // Caller needs to specify importer. Future version may use oci.v1 as the default. // Note that unreferrenced blobs may be imported to the content store as well. @@ -91,7 +101,12 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt } defer done(ctx) - index, err := archive.ImportIndex(ctx, c.ContentStore(), reader) + var aio []archive.ImportOpt + if iopts.compress { + aio = append(aio, archive.WithImportCompression()) + } + + index, err := archive.ImportIndex(ctx, c.ContentStore(), reader, aio...) if err != nil { return nil, err } diff --git a/vendor/github.com/containerd/containerd/metadata/content.go b/vendor/github.com/containerd/containerd/metadata/content.go index a60f51ae9..4a07a256b 100644 --- a/vendor/github.com/containerd/containerd/metadata/content.go +++ b/vendor/github.com/containerd/containerd/metadata/content.go @@ -637,11 +637,11 @@ func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64, return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q failed size validation: %v != %v", nw.ref, status.Offset, size) } size = status.Offset - actual = nw.w.Digest() if err := nw.w.Commit(ctx, size, expected); err != nil && !errdefs.IsAlreadyExists(err) { return "", err } + actual = nw.w.Digest() } bkt, err := createBlobBucket(tx, nw.namespace, actual) diff --git a/vendor/github.com/containerd/containerd/metadata/gc.go b/vendor/github.com/containerd/containerd/metadata/gc.go index 6afaa1772..afe16c922 100644 --- a/vendor/github.com/containerd/containerd/metadata/gc.go +++ b/vendor/github.com/containerd/containerd/metadata/gc.go @@ -46,11 +46,17 @@ const ( ResourceIngest ) +const ( + resourceContentFlat = ResourceContent | 0x20 + resourceSnapshotFlat = ResourceSnapshot | 0x20 +) + var ( labelGCRoot = []byte("containerd.io/gc.root") labelGCSnapRef = []byte("containerd.io/gc.ref.snapshot.") labelGCContentRef = []byte("containerd.io/gc.ref.content") labelGCExpire = []byte("containerd.io/gc.expire") + labelGCFlat = []byte("containerd.io/gc.flat") ) func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { @@ -90,6 +96,7 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { return nil } libkt := lbkt.Bucket(k) + var flat bool if lblbkt := libkt.Bucket(bucketKeyObjectLabels); lblbkt != nil { if expV := lblbkt.Get(labelGCExpire); expV != nil { @@ -102,6 +109,10 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { return nil } } + + if flatV := lblbkt.Get(labelGCFlat); flatV != nil { + flat = true + } } fn(gcnode(ResourceLease, ns, string(k))) @@ -111,16 +122,26 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { // no need to allow the lookup to be recursive, handling here // therefore reduces the number of database seeks. + ctype := ResourceContent + if flat { + ctype = resourceContentFlat + } + cbkt := libkt.Bucket(bucketKeyObjectContent) if cbkt != nil { if err := cbkt.ForEach(func(k, v []byte) error { - fn(gcnode(ResourceContent, ns, string(k))) + fn(gcnode(ctype, ns, string(k))) return nil }); err != nil { return err } } + stype := ResourceSnapshot + if flat { + stype = resourceSnapshotFlat + } + sbkt := libkt.Bucket(bucketKeyObjectSnapshots) if sbkt != nil { if err := sbkt.ForEach(func(sk, sv []byte) error { @@ -130,7 +151,7 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { snbkt := sbkt.Bucket(sk) return snbkt.ForEach(func(k, v []byte) error { - fn(gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", sk, k))) + fn(gcnode(stype, ns, fmt.Sprintf("%s/%s", sk, k))) return nil }) }); err != nil { @@ -257,7 +278,8 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { } func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)) error { - if node.Type == ResourceContent { + switch node.Type { + case ResourceContent: bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(node.Key)) if bkt == nil { // Node may be created from dead edge @@ -265,7 +287,7 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node) } return sendLabelRefs(node.Namespace, bkt, fn) - } else if node.Type == ResourceSnapshot { + case ResourceSnapshot, resourceSnapshotFlat: parts := strings.SplitN(node.Key, "/", 2) if len(parts) != 2 { return errors.Errorf("invalid snapshot gc key %s", node.Key) @@ -280,11 +302,16 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node) } if pv := bkt.Get(bucketKeyParent); len(pv) > 0 { - fn(gcnode(ResourceSnapshot, node.Namespace, fmt.Sprintf("%s/%s", ss, pv))) + fn(gcnode(node.Type, node.Namespace, fmt.Sprintf("%s/%s", ss, pv))) + } + + // Do not send labeled references for flat snapshot refs + if node.Type == resourceSnapshotFlat { + return nil } return sendLabelRefs(node.Namespace, bkt, fn) - } else if node.Type == ResourceIngest { + case ResourceIngest: // Send expected value bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectIngests, []byte(node.Key)) if bkt == nil { diff --git a/vendor/github.com/containerd/containerd/metadata/namespaces.go b/vendor/github.com/containerd/containerd/metadata/namespaces.go index 74951eb5c..25d0e1578 100644 --- a/vendor/github.com/containerd/containerd/metadata/namespaces.go +++ b/vendor/github.com/containerd/containerd/metadata/namespaces.go @@ -129,7 +129,15 @@ func (s *namespaceStore) List(ctx context.Context) ([]string, error) { return namespaces, nil } -func (s *namespaceStore) Delete(ctx context.Context, namespace string) error { +func (s *namespaceStore) Delete(ctx context.Context, namespace string, opts ...namespaces.DeleteOpts) error { + i := &namespaces.DeleteInfo{ + Name: namespace, + } + for _, o := range opts { + if err := o(ctx, i); err != nil { + return err + } + } bkt := getBucket(s.tx, bucketKeyVersion) if empty, err := s.namespaceEmpty(ctx, namespace); err != nil { return err diff --git a/vendor/github.com/containerd/containerd/metadata/snapshot.go b/vendor/github.com/containerd/containerd/metadata/snapshot.go index 542091719..23976636f 100644 --- a/vendor/github.com/containerd/containerd/metadata/snapshot.go +++ b/vendor/github.com/containerd/containerd/metadata/snapshot.go @@ -34,6 +34,10 @@ import ( bolt "go.etcd.io/bbolt" ) +const ( + inheritedLabelsPrefix = "containerd.io/snapshot/" +) + type snapshotter struct { snapshots.Snapshotter name string @@ -209,6 +213,15 @@ func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpath bkey = string(sbkt.Get(bucketKeyName)) local.Parent = string(sbkt.Get(bucketKeyParent)) + inner := snapshots.Info{ + Name: bkey, + Labels: filterInheritedLabels(local.Labels), + } + + if _, err := s.Snapshotter.Update(ctx, inner, fieldpaths...); err != nil { + return err + } + return nil }); err != nil { return snapshots.Info{}, err @@ -338,12 +351,14 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re return err } + inheritedOpt := snapshots.WithLabels(filterInheritedLabels(base.Labels)) + // TODO: Consider doing this outside of transaction to lessen // metadata lock time if readonly { - m, err = s.Snapshotter.View(ctx, bkey, bparent) + m, err = s.Snapshotter.View(ctx, bkey, bparent, inheritedOpt) } else { - m, err = s.Snapshotter.Prepare(ctx, bkey, bparent) + m, err = s.Snapshotter.Prepare(ctx, bkey, bparent, inheritedOpt) } return err }); err != nil { @@ -445,9 +460,11 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap return err } + inheritedOpt := snapshots.WithLabels(filterInheritedLabels(base.Labels)) + // TODO: Consider doing this outside of transaction to lessen // metadata lock time - return s.Snapshotter.Commit(ctx, nameKey, bkey) + return s.Snapshotter.Commit(ctx, nameKey, bkey, inheritedOpt) }) } @@ -761,3 +778,19 @@ func (s *snapshotter) pruneBranch(ctx context.Context, node *treeNode) error { func (s *snapshotter) Close() error { return s.Snapshotter.Close() } + +// filterInheritedLabels filters the provided labels by removing any key which doesn't have +// a prefix of "containerd.io/snapshot/". +func filterInheritedLabels(labels map[string]string) map[string]string { + if labels == nil { + return nil + } + + filtered := make(map[string]string) + for k, v := range labels { + if strings.HasPrefix(k, inheritedLabelsPrefix) { + filtered[k] = v + } + } + return filtered +} diff --git a/vendor/github.com/containerd/containerd/namespaces.go b/vendor/github.com/containerd/containerd/namespaces.go index eea70ca33..4c66406b0 100644 --- a/vendor/github.com/containerd/containerd/namespaces.go +++ b/vendor/github.com/containerd/containerd/namespaces.go @@ -100,10 +100,18 @@ func (r *remoteNamespaces) List(ctx context.Context) ([]string, error) { return namespaces, nil } -func (r *remoteNamespaces) Delete(ctx context.Context, namespace string) error { - var req api.DeleteNamespaceRequest - - req.Name = namespace +func (r *remoteNamespaces) Delete(ctx context.Context, namespace string, opts ...namespaces.DeleteOpts) error { + i := namespaces.DeleteInfo{ + Name: namespace, + } + for _, o := range opts { + if err := o(ctx, &i); err != nil { + return err + } + } + req := api.DeleteNamespaceRequest{ + Name: namespace, + } _, err := r.client.Delete(ctx, &req) if err != nil { return errdefs.FromGRPC(err) diff --git a/vendor/github.com/containerd/containerd/namespaces/store.go b/vendor/github.com/containerd/containerd/namespaces/store.go index 0b5c98569..5936772cb 100644 --- a/vendor/github.com/containerd/containerd/namespaces/store.go +++ b/vendor/github.com/containerd/containerd/namespaces/store.go @@ -33,5 +33,14 @@ type Store interface { List(ctx context.Context) ([]string, error) // Delete removes the namespace. The namespace must be empty to be deleted. - Delete(ctx context.Context, namespace string) error + Delete(ctx context.Context, namespace string, opts ...DeleteOpts) error } + +// DeleteInfo specifies information for the deletion of a namespace +type DeleteInfo struct { + // Name of the namespace + Name string +} + +// DeleteOpts allows the caller to set options for namespace deletion +type DeleteOpts func(context.Context, *DeleteInfo) error diff --git a/vendor/github.com/containerd/containerd/namespaces/ttrpc.go b/vendor/github.com/containerd/containerd/namespaces/ttrpc.go index 28e039417..2224334d1 100644 --- a/vendor/github.com/containerd/containerd/namespaces/ttrpc.go +++ b/vendor/github.com/containerd/containerd/namespaces/ttrpc.go @@ -30,7 +30,7 @@ const ( func withTTRPCNamespaceHeader(ctx context.Context, namespace string) context.Context { md, ok := ttrpc.GetMetadata(ctx) if !ok { - md = ttrpc.Metadata{} + md = ttrpc.MD{} } md.Set(TTRPCHeader, namespace) return ttrpc.WithMetadata(ctx, md) diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/process.go b/vendor/github.com/containerd/containerd/namespaces_opts_linux.go similarity index 54% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/process.go rename to vendor/github.com/containerd/containerd/namespaces_opts_linux.go index 53252ec60..6b8cc8f85 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/process.go +++ b/vendor/github.com/containerd/containerd/namespaces_opts_linux.go @@ -1,5 +1,3 @@ -// +build !windows - /* Copyright The containerd Authors. @@ -16,27 +14,23 @@ limitations under the License. */ -package proc +package containerd import ( - "github.com/pkg/errors" + "context" + + "github.com/containerd/cgroups" + "github.com/containerd/containerd/namespaces" ) -// RuncRoot is the path to the root runc state directory -const RuncRoot = "/run/containerd/runc" - -func stateName(v interface{}) string { - switch v.(type) { - case *runningState, *execRunningState: - return "running" - case *createdState, *execCreatedState, *createdCheckpointState: - return "created" - case *pausedState: - return "paused" - case *deletedState: - return "deleted" - case *stoppedState: - return "stopped" +// WithNamespaceCgroupDeletion removes the cgroup directory that was created for the namespace +func WithNamespaceCgroupDeletion(ctx context.Context, i *namespaces.DeleteInfo) error { + cg, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(i.Name)) + if err != nil { + if err == cgroups.ErrCgroupDeleted { + return nil + } + return err } - panic(errors.Errorf("invalid state %v", v)) + return cg.Delete() } diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts.go b/vendor/github.com/containerd/containerd/oci/spec_opts.go index ce756108a..8fe3247b5 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts.go @@ -76,6 +76,20 @@ func setLinux(s *Spec) { } } +// nolint +func setResources(s *Spec) { + if s.Linux != nil { + if s.Linux.Resources == nil { + s.Linux.Resources = &specs.LinuxResources{} + } + } + if s.Windows != nil { + if s.Windows.Resources == nil { + s.Windows.Resources = &specs.WindowsResources{} + } + } +} + // setCapabilities sets Linux Capabilities to empty if unset func setCapabilities(s *Spec) { setProcess(s) @@ -1139,3 +1153,39 @@ func WithAnnotations(annotations map[string]string) SpecOpts { return nil } } + +// WithLinuxDevices adds the provided linux devices to the spec +func WithLinuxDevices(devices []specs.LinuxDevice) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + s.Linux.Devices = append(s.Linux.Devices, devices...) + return nil + } +} + +var ErrNotADevice = errors.New("not a device node") + +// WithLinuxDevice adds the device specified by path to the spec +func WithLinuxDevice(path, permissions string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + setResources(s) + + dev, err := deviceFromPath(path, permissions) + if err != nil { + return err + } + + s.Linux.Devices = append(s.Linux.Devices, *dev) + + s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, specs.LinuxDeviceCgroup{ + Type: dev.Type, + Allow: true, + Major: &dev.Major, + Minor: &dev.Minor, + Access: permissions, + }) + + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go b/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go new file mode 100644 index 000000000..d4b95e7b1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go @@ -0,0 +1,63 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "os" + + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/sys/unix" +) + +func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) { + var stat unix.Stat_t + if err := unix.Lstat(path, &stat); err != nil { + return nil, err + } + + var ( + devNumber = stat.Rdev + major = unix.Major(devNumber) + minor = unix.Minor(devNumber) + ) + if major == 0 { + return nil, ErrNotADevice + } + + var ( + devType string + mode = stat.Mode + ) + switch { + case mode&unix.S_IFBLK == unix.S_IFBLK: + devType = "b" + case mode&unix.S_IFCHR == unix.S_IFCHR: + devType = "c" + } + fm := os.FileMode(mode) + return &specs.LinuxDevice{ + Type: devType, + Path: path, + Major: int64(major), + Minor: int64(minor), + FileMode: &fm, + UID: &stat.Uid, + GID: &stat.Gid, + }, nil +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go new file mode 100644 index 000000000..3f63dfd16 --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go @@ -0,0 +1,63 @@ +// +build !linux,!windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "os" + + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/sys/unix" +) + +func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) { + var stat unix.Stat_t + if err := unix.Lstat(path, &stat); err != nil { + return nil, err + } + + var ( + devNumber = uint64(stat.Rdev) + major = unix.Major(devNumber) + minor = unix.Minor(devNumber) + ) + if major == 0 { + return nil, ErrNotADevice + } + + var ( + devType string + mode = stat.Mode + ) + switch { + case mode&unix.S_IFBLK == unix.S_IFBLK: + devType = "b" + case mode&unix.S_IFCHR == unix.S_IFCHR: + devType = "c" + } + fm := os.FileMode(mode) + return &specs.LinuxDevice{ + Type: devType, + Path: path, + Major: int64(major), + Minor: int64(minor), + FileMode: &fm, + UID: &stat.Uid, + GID: &stat.Gid, + }, nil +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go index fbe1cb33c..d265d544d 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go @@ -23,6 +23,7 @@ import ( "github.com/containerd/containerd/containers" specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" ) // WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the @@ -65,3 +66,7 @@ func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts { return nil } } + +func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) { + return nil, errors.New("device from path not supported on Windows") +} diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/deleted_state.go b/vendor/github.com/containerd/containerd/pkg/process/deleted_state.go similarity index 95% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/deleted_state.go rename to vendor/github.com/containerd/containerd/pkg/process/deleted_state.go index fe9d7bf55..95ad138e0 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/deleted_state.go +++ b/vendor/github.com/containerd/containerd/pkg/process/deleted_state.go @@ -16,14 +16,13 @@ limitations under the License. */ -package proc +package process import ( "context" "github.com/containerd/console" "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/runtime/proc" google_protobuf "github.com/gogo/protobuf/types" "github.com/pkg/errors" ) @@ -67,6 +66,6 @@ func (s *deletedState) SetExited(status int) { // no op } -func (s *deletedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *deletedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return nil, errors.Errorf("cannot exec in a deleted state") } diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec.go b/vendor/github.com/containerd/containerd/pkg/process/exec.go similarity index 91% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec.go rename to vendor/github.com/containerd/containerd/pkg/process/exec.go index 5ab232ae7..4175dcd5a 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec.go +++ b/vendor/github.com/containerd/containerd/pkg/process/exec.go @@ -16,7 +16,7 @@ limitations under the License. */ -package proc +package process import ( "context" @@ -31,7 +31,8 @@ import ( "golang.org/x/sys/unix" "github.com/containerd/console" - "github.com/containerd/containerd/runtime/proc" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/pkg/stdio" "github.com/containerd/fifo" runc "github.com/containerd/go-runc" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -49,10 +50,10 @@ type execProcess struct { io *processIO status int exited time.Time - pid *safePid + pid safePid closers []io.Closer stdin io.Closer - stdio proc.Stdio + stdio stdio.Stdio path string spec specs.Process @@ -95,6 +96,7 @@ func (e *execProcess) setExited(status int) { e.status = status e.exited = time.Now() e.parent.Platform.ShutdownConsole(context.Background(), e.console) + e.pid.set(StoppedPID) close(e.waitBlock) } @@ -106,7 +108,7 @@ func (e *execProcess) Delete(ctx context.Context) error { } func (e *execProcess) delete(ctx context.Context) error { - e.wg.Wait() + waitTimeout(ctx, &e.wg, 2*time.Second) if e.io != nil { for _, c := range e.closers { c.Close() @@ -142,7 +144,12 @@ func (e *execProcess) Kill(ctx context.Context, sig uint32, _ bool) error { func (e *execProcess) kill(ctx context.Context, sig uint32, _ bool) error { pid := e.pid.get() - if pid != 0 { + switch { + case pid == 0: + return errors.Wrap(errdefs.ErrFailedPrecondition, "process not created") + case pid < 0: + return errors.Wrapf(errdefs.ErrNotFound, "process already finished") + default: if err := unix.Kill(pid, syscall.Signal(sig)); err != nil { return errors.Wrapf(checkKillError(err), "exec kill error") } @@ -154,7 +161,7 @@ func (e *execProcess) Stdin() io.Closer { return e.stdin } -func (e *execProcess) Stdio() proc.Stdio { +func (e *execProcess) Stdio() stdio.Stdio { return e.stdio } @@ -254,10 +261,13 @@ func (e *execProcess) Status(ctx context.Context) (string, error) { } e.mu.Lock() defer e.mu.Unlock() - // if we don't have a pid then the exec process has just been created + // if we don't have a pid(pid=0) then the exec process has just been created if e.pid.get() == 0 { return "created", nil } + if e.pid.get() == StoppedPID { + return "stopped", nil + } // if we have a pid and it can be signaled, the process is running if err := unix.Kill(e.pid.get(), 0); err == nil { return "running", nil diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec_state.go b/vendor/github.com/containerd/containerd/pkg/process/exec_state.go similarity index 99% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec_state.go rename to vendor/github.com/containerd/containerd/pkg/process/exec_state.go index 12489501b..a8b44bb8b 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec_state.go +++ b/vendor/github.com/containerd/containerd/pkg/process/exec_state.go @@ -16,7 +16,7 @@ limitations under the License. */ -package proc +package process import ( "context" diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go b/vendor/github.com/containerd/containerd/pkg/process/init.go similarity index 95% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go rename to vendor/github.com/containerd/containerd/pkg/process/init.go index c6a09575a..7861bdd8b 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go +++ b/vendor/github.com/containerd/containerd/pkg/process/init.go @@ -16,7 +16,7 @@ limitations under the License. */ -package proc +package process import ( "context" @@ -33,7 +33,7 @@ import ( "github.com/containerd/console" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/runtime/proc" + "github.com/containerd/containerd/pkg/stdio" "github.com/containerd/fifo" runc "github.com/containerd/go-runc" google_protobuf "github.com/gogo/protobuf/types" @@ -59,15 +59,15 @@ type Init struct { id string Bundle string console console.Console - Platform proc.Platform + Platform stdio.Platform io *processIO runtime *runc.Runc status int exited time.Time - pid int + pid safePid closers []io.Closer stdin io.Closer - stdio proc.Stdio + stdio stdio.Stdio Rootfs string IoUID int IoGID int @@ -93,7 +93,7 @@ func NewRunc(root, path, namespace, runtime, criu string, systemd bool) *runc.Ru } // New returns a new process -func New(id string, runtime *runc.Runc, stdio proc.Stdio) *Init { +func New(id string, runtime *runc.Runc, stdio stdio.Stdio) *Init { p := &Init{ id: id, runtime: runtime, @@ -113,6 +113,9 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error { pio *processIO pidFile = newPidFile(p.Bundle) ) + p.pid.Lock() + defer p.pid.Unlock() + if r.Terminal { if socket, err = runc.NewTempConsoleSocket(); err != nil { return errors.Wrap(err, "failed to create OCI runtime console socket") @@ -167,7 +170,7 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error { if err != nil { return errors.Wrap(err, "failed to retrieve OCI runtime container pid") } - p.pid = pid + p.pid.pid = pid return nil } @@ -213,7 +216,7 @@ func (p *Init) ID() string { // Pid of the process func (p *Init) Pid() int { - return p.pid + return p.pid.get() } // ExitStatus of the process @@ -272,6 +275,7 @@ func (p *Init) setExited(status int) { p.exited = time.Now() p.status = status p.Platform.ShutdownConsole(context.Background(), p.console) + p.pid.set(StoppedPID) close(p.waitBlock) } @@ -284,7 +288,7 @@ func (p *Init) Delete(ctx context.Context) error { } func (p *Init) delete(ctx context.Context) error { - p.wg.Wait() + waitTimeout(ctx, &p.wg, 2*time.Second) err := p.runtime.Delete(ctx, p.id, nil) // ignore errors if a runtime has already deleted the process // but we still hold metadata and pipes @@ -377,7 +381,7 @@ func (p *Init) Runtime() *runc.Runc { } // Exec returns a new child process -func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { p.mu.Lock() defer p.mu.Unlock() @@ -385,7 +389,7 @@ func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Proce } // exec returns a new exec'd process -func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { // process exec request var spec specs.Process if err := json.Unmarshal(r.Spec.Value, &spec); err != nil { @@ -398,14 +402,13 @@ func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Proce path: path, parent: p, spec: spec, - stdio: proc.Stdio{ + stdio: stdio.Stdio{ Stdin: r.Stdin, Stdout: r.Stdout, Stderr: r.Stderr, Terminal: r.Terminal, }, waitBlock: make(chan struct{}), - pid: &safePid{}, } e.execState = &execCreatedState{p: e} return e, nil @@ -465,7 +468,7 @@ func (p *Init) update(ctx context.Context, r *google_protobuf.Any) error { } // Stdio of the process -func (p *Init) Stdio() proc.Stdio { +func (p *Init) Stdio() stdio.Stdio { return p.stdio } @@ -485,7 +488,7 @@ func (p *Init) runtimeError(rErr error, msg string) error { } } -func withConditionalIO(c proc.Stdio) runc.IOOpt { +func withConditionalIO(c stdio.Stdio) runc.IOOpt { return func(o *runc.IOOption) { o.OpenStdin = c.Stdin != "" o.OpenStdout = c.Stdout != "" diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init_state.go b/vendor/github.com/containerd/containerd/pkg/process/init_state.go similarity index 96% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init_state.go rename to vendor/github.com/containerd/containerd/pkg/process/init_state.go index 0d1c8dcf2..9ec1d17be 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init_state.go +++ b/vendor/github.com/containerd/containerd/pkg/process/init_state.go @@ -16,12 +16,11 @@ limitations under the License. */ -package proc +package process import ( "context" - "github.com/containerd/containerd/runtime/proc" runc "github.com/containerd/go-runc" google_protobuf "github.com/gogo/protobuf/types" "github.com/pkg/errors" @@ -35,7 +34,7 @@ type initState interface { Resume(context.Context) error Update(context.Context, *google_protobuf.Any) error Checkpoint(context.Context, *CheckpointConfig) error - Exec(context.Context, string, *ExecConfig) (proc.Process, error) + Exec(context.Context, string, *ExecConfig) (Process, error) Kill(context.Context, uint32, bool) error SetExited(int) } @@ -100,7 +99,7 @@ func (s *createdState) SetExited(status int) { } } -func (s *createdState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *createdState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return s.p.exec(ctx, path, r) } @@ -143,6 +142,9 @@ func (s *createdCheckpointState) Start(ctx context.Context) error { p := s.p sio := p.stdio + p.pid.Lock() + defer p.pid.Unlock() + var ( err error socket *runc.Socket @@ -182,7 +184,7 @@ func (s *createdCheckpointState) Start(ctx context.Context) error { if err != nil { return errors.Wrap(err, "failed to retrieve OCI runtime container pid") } - p.pid = pid + p.pid.pid = pid return s.transition("running") } @@ -205,7 +207,7 @@ func (s *createdCheckpointState) SetExited(status int) { } } -func (s *createdCheckpointState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *createdCheckpointState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return nil, errors.Errorf("cannot exec in a created state") } @@ -265,7 +267,7 @@ func (s *runningState) SetExited(status int) { } } -func (s *runningState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *runningState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return s.p.exec(ctx, path, r) } @@ -329,7 +331,7 @@ func (s *pausedState) SetExited(status int) { } } -func (s *pausedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *pausedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return nil, errors.Errorf("cannot exec in a paused state") } @@ -382,6 +384,6 @@ func (s *stoppedState) SetExited(status int) { // no op } -func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { +func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { return nil, errors.Errorf("cannot exec in a stopped state") } diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go b/vendor/github.com/containerd/containerd/pkg/process/io.go similarity index 89% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go rename to vendor/github.com/containerd/containerd/pkg/process/io.go index 0096db716..169f6c8e2 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go +++ b/vendor/github.com/containerd/containerd/pkg/process/io.go @@ -16,7 +16,7 @@ limitations under the License. */ -package proc +package process import ( "context" @@ -32,7 +32,7 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/runtime/proc" + "github.com/containerd/containerd/pkg/stdio" "github.com/containerd/fifo" runc "github.com/containerd/go-runc" "github.com/pkg/errors" @@ -50,7 +50,7 @@ type processIO struct { uri *url.URL copy bool - stdio proc.Stdio + stdio stdio.Stdio } func (p *processIO) Close() error { @@ -76,7 +76,7 @@ func (p *processIO) Copy(ctx context.Context, wg *sync.WaitGroup) error { return nil } -func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio proc.Stdio) (*processIO, error) { +func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio stdio.Stdio) (*processIO, error) { pio := &processIO{ stdio: stdio, } @@ -101,17 +101,20 @@ func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio proc.Stdio pio.copy = true pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio)) case "binary": - pio.io, err = newBinaryIO(ctx, id, u) + pio.io, err = NewBinaryIO(ctx, id, u) case "file": - if err := os.MkdirAll(filepath.Dir(u.Host), 0755); err != nil { + filePath := u.Path + if err := os.MkdirAll(filepath.Dir(filePath), 0755); err != nil { return nil, err } var f *os.File - f, err = os.OpenFile(u.Host, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + f, err = os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return nil, err } f.Close() + pio.stdio.Stdout = filePath + pio.stdio.Stderr = filePath pio.copy = true pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio)) default: @@ -179,10 +182,10 @@ func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, w ) if ok { if fw, err = fifo.OpenFifo(ctx, i.name, syscall.O_WRONLY, 0); err != nil { - return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err) + return errors.Wrapf(err, "containerd-shim: opening w/o fifo %q failed", i.name) } if fr, err = fifo.OpenFifo(ctx, i.name, syscall.O_RDONLY, 0); err != nil { - return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err) + return errors.Wrapf(err, "containerd-shim: opening r/o fifo %q failed", i.name) } } else { if sameFile != nil { @@ -191,7 +194,7 @@ func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, w continue } if fw, err = os.OpenFile(i.name, syscall.O_WRONLY|syscall.O_APPEND, 0); err != nil { - return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err) + return errors.Wrapf(err, "containerd-shim: opening file %q failed", i.name) } if stdout == stderr { sameFile = &countingWriteCloser{ @@ -251,7 +254,8 @@ func isFifo(path string) (bool, error) { return false, nil } -func newBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error) { +// NewBinaryIO runs a custom binary process for pluggable shim logging +func NewBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error) { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { return nil, err @@ -264,7 +268,7 @@ func newBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error) } } ctx, cancel := context.WithCancel(ctx) - cmd := exec.CommandContext(ctx, uri.Host, args...) + cmd := exec.CommandContext(ctx, uri.Path, args...) cmd.Env = append(cmd.Env, "CONTAINER_ID="+id, "CONTAINER_NAMESPACE="+ns, diff --git a/vendor/github.com/containerd/containerd/runtime/proc/proc.go b/vendor/github.com/containerd/containerd/pkg/process/process.go similarity index 70% rename from vendor/github.com/containerd/containerd/runtime/proc/proc.go rename to vendor/github.com/containerd/containerd/pkg/process/process.go index 0e8d21b74..7cebb9b30 100644 --- a/vendor/github.com/containerd/containerd/runtime/proc/proc.go +++ b/vendor/github.com/containerd/containerd/pkg/process/process.go @@ -14,30 +14,17 @@ limitations under the License. */ -package proc +package process import ( "context" "io" - "sync" "time" "github.com/containerd/console" + "github.com/containerd/containerd/pkg/stdio" ) -// Stdio of a process -type Stdio struct { - Stdin string - Stdout string - Stderr string - Terminal bool -} - -// IsNull returns true if the stdio is not defined -func (s Stdio) IsNull() bool { - return s.Stdin == "" && s.Stdout == "" && s.Stderr == "" -} - // Process on a system type Process interface { // ID returns the id for the process @@ -51,7 +38,7 @@ type Process interface { // Stdin returns the process STDIN Stdin() io.Closer // Stdio returns io information for the container - Stdio() Stdio + Stdio() stdio.Stdio // Status returns the process status Status(context.Context) (string, error) // Wait blocks until the process has exited @@ -67,12 +54,3 @@ type Process interface { // SetExited sets the exit status for the process SetExited(status int) } - -// Platform handles platform-specific behavior that may differs across -// platform implementations -type Platform interface { - CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, - wg *sync.WaitGroup) (console.Console, error) - ShutdownConsole(ctx context.Context, console console.Console) error - Close() error -} diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/types.go b/vendor/github.com/containerd/containerd/pkg/process/types.go similarity index 99% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/types.go rename to vendor/github.com/containerd/containerd/pkg/process/types.go index 5d705c030..03477038a 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/types.go +++ b/vendor/github.com/containerd/containerd/pkg/process/types.go @@ -14,7 +14,7 @@ limitations under the License. */ -package proc +package process import ( google_protobuf "github.com/gogo/protobuf/types" diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/utils.go b/vendor/github.com/containerd/containerd/pkg/process/utils.go similarity index 70% rename from vendor/github.com/containerd/containerd/runtime/v1/linux/proc/utils.go rename to vendor/github.com/containerd/containerd/pkg/process/utils.go index 2d085e62f..b0ac6333c 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/utils.go +++ b/vendor/github.com/containerd/containerd/pkg/process/utils.go @@ -16,9 +16,10 @@ limitations under the License. */ -package proc +package process import ( + "context" "encoding/json" "fmt" "io" @@ -34,6 +35,15 @@ import ( "golang.org/x/sys/unix" ) +const ( + // RuncRoot is the path to the root runc state directory + RuncRoot = "/run/containerd/runc" + // StoppedPID is the pid assigned after a container has run and stopped + StoppedPID = -1 + // InitPidFile name of the file that contains the init pid + InitPidFile = "init.pid" +) + // safePid is a thread safe wrapper for pid. type safePid struct { sync.Mutex @@ -46,6 +56,12 @@ func (s *safePid) get() int { return s.pid } +func (s *safePid) set(pid int) { + s.Lock() + s.pid = pid + s.Unlock() +} + // TODO(mlaventure): move to runc package? func getLastRuntimeError(r *runc.Runc) (string, error) { if r.Log == "" { @@ -117,9 +133,6 @@ func checkKillError(err error) error { return errors.Wrapf(err, "unknown error after kill") } -// InitPidFile name of the file that contains the init pid -const InitPidFile = "init.pid" - func newPidFile(bundle string) *pidFile { return &pidFile{ path: filepath.Join(bundle, InitPidFile), @@ -143,3 +156,37 @@ func (p *pidFile) Path() string { func (p *pidFile) Read() (int, error) { return runc.ReadPidFile(p.path) } + +// waitTimeout handles waiting on a waitgroup with a specified timeout. +// this is commonly used for waiting on IO to finish after a process has exited +func waitTimeout(ctx context.Context, wg *sync.WaitGroup, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + done := make(chan struct{}, 1) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func stateName(v interface{}) string { + switch v.(type) { + case *runningState, *execRunningState: + return "running" + case *createdState, *execCreatedState, *createdCheckpointState: + return "created" + case *pausedState: + return "paused" + case *deletedState: + return "deleted" + case *stoppedState: + return "stopped" + } + panic(errors.Errorf("invalid state %v", v)) +} diff --git a/vendor/github.com/containerd/containerd/pkg/stdio/platform.go b/vendor/github.com/containerd/containerd/pkg/stdio/platform.go new file mode 100644 index 000000000..6e1b27cfa --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/stdio/platform.go @@ -0,0 +1,33 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package stdio + +import ( + "context" + "sync" + + "github.com/containerd/console" +) + +// Platform handles platform-specific behavior that may differs across +// platform implementations +type Platform interface { + CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, + wg *sync.WaitGroup) (console.Console, error) + ShutdownConsole(ctx context.Context, console console.Console) error + Close() error +} diff --git a/vendor/github.com/containerd/containerd/pkg/stdio/stdio.go b/vendor/github.com/containerd/containerd/pkg/stdio/stdio.go new file mode 100644 index 000000000..b02e77dcd --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/stdio/stdio.go @@ -0,0 +1,30 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package stdio + +// Stdio of a process +type Stdio struct { + Stdin string + Stdout string + Stderr string + Terminal bool +} + +// IsNull returns true if the stdio is not defined +func (s Stdio) IsNull() bool { + return s.Stdin == "" && s.Stdout == "" && s.Stderr == "" +} diff --git a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_windows.go b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_windows.go index 1fc4fc164..b8c69ef0a 100644 --- a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_windows.go +++ b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_windows.go @@ -19,6 +19,7 @@ package ttrpcutil import ( + "context" "net" "os" "time" @@ -28,33 +29,31 @@ import ( ) func ttrpcDial(address string, timeout time.Duration) (net.Conn, error) { - var c net.Conn - var lastError error - timedOutError := errors.Errorf("timed out waiting for npipe %s", address) - start := time.Now() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // If there is nobody serving the pipe we limit the timeout for this case to + // 5 seconds because any shim that would serve this endpoint should serve it + // within 5 seconds. + serveTimer := time.NewTimer(5 * time.Second) + defer serveTimer.Stop() for { - remaining := timeout - time.Since(start) - if remaining <= 0 { - lastError = timedOutError - break + c, err := winio.DialPipeContext(ctx, address) + if err != nil { + if os.IsNotExist(err) { + select { + case <-serveTimer.C: + return nil, errors.Wrap(os.ErrNotExist, "pipe not found before timeout") + default: + // Wait 10ms for the shim to serve and try again. + time.Sleep(10 * time.Millisecond) + continue + } + } else if err == context.DeadlineExceeded { + return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address) + } + return nil, err } - c, lastError = winio.DialPipe(address, &remaining) - if lastError == nil { - break - } - if !os.IsNotExist(lastError) { - break - } - // There is nobody serving the pipe. We limit the timeout for this case - // to 5 seconds because any shim that would serve this endpoint should - // serve it within 5 seconds. We use the passed in timeout for the - // `DialPipe` timeout if the pipe exists however to give the pipe time - // to `Accept` the connection. - if time.Since(start) >= 5*time.Second { - lastError = timedOutError - break - } - time.Sleep(10 * time.Millisecond) + return c, nil } - return c, lastError } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go index 3097cc3b5..9652d3ac1 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go @@ -31,7 +31,6 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - "github.com/containerd/containerd/version" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/context/ctxhttp" @@ -41,128 +40,278 @@ type dockerAuthorizer struct { credentials func(string) (string, string, error) client *http.Client - ua string + header http.Header mu sync.Mutex - auth map[string]string + // indexed by host name + handlers map[string]*authHandler } // NewAuthorizer creates a Docker authorizer using the provided function to // get credentials for the token server or basic auth. +// Deprecated: Use NewDockerAuthorizer func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer { - if client == nil { - client = http.DefaultClient - } - return &dockerAuthorizer{ - credentials: f, - client: client, - ua: "containerd/" + version.Version, - auth: map[string]string{}, + return NewDockerAuthorizer(WithAuthClient(client), WithAuthCreds(f)) +} + +type authorizerConfig struct { + credentials func(string) (string, string, error) + client *http.Client + header http.Header +} + +// AuthorizerOpt configures an authorizer +type AuthorizerOpt func(*authorizerConfig) + +// WithAuthClient provides the HTTP client for the authorizer +func WithAuthClient(client *http.Client) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.client = client } } -func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error { - // TODO: Lookup matching challenge and scope rather than just host - if auth := a.getAuth(req.URL.Host); auth != "" { - req.Header.Set("Authorization", auth) +// WithAuthCreds provides a credential function to the authorizer +func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.credentials = creds + } +} + +// WithAuthHeader provides HTTP headers for authorization +func WithAuthHeader(hdr http.Header) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.header = hdr + } +} + +// NewDockerAuthorizer creates an authorizer using Docker's registry +// authentication spec. +// See https://docs.docker.com/registry/spec/auth/ +func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer { + var ao authorizerConfig + for _, opt := range opts { + opt(&ao) } + if ao.client == nil { + ao.client = http.DefaultClient + } + + return &dockerAuthorizer{ + credentials: ao.credentials, + client: ao.client, + header: ao.header, + handlers: make(map[string]*authHandler), + } +} + +// Authorize handles auth request. +func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error { + // skip if there is no auth handler + ah := a.getAuthHandler(req.URL.Host) + if ah == nil { + return nil + } + + auth, err := ah.authorize(ctx) + if err != nil { + return err + } + + req.Header.Set("Authorization", auth) return nil } +func (a *dockerAuthorizer) getAuthHandler(host string) *authHandler { + a.mu.Lock() + defer a.mu.Unlock() + + return a.handlers[host] +} + func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error { last := responses[len(responses)-1] host := last.Request.URL.Host + + a.mu.Lock() + defer a.mu.Unlock() for _, c := range parseAuthHeader(last.Header) { if c.scheme == bearerAuth { if err := invalidAuthorization(c, responses); err != nil { - // TODO: Clear token - a.setAuth(host, "") + delete(a.handlers, host) return err } - // TODO(dmcg): Store challenge, not token - // Move token fetching to authorize - return a.setTokenAuth(ctx, host, c.parameters) + // reuse existing handler + // + // assume that one registry will return the common + // challenge information, including realm and service. + // and the resource scope is only different part + // which can be provided by each request. + if _, ok := a.handlers[host]; ok { + return nil + } + + common, err := a.generateTokenOptions(ctx, host, c) + if err != nil { + return err + } + + a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common) + return nil } else if c.scheme == basicAuth && a.credentials != nil { - // TODO: Resolve credentials on authorize username, secret, err := a.credentials(host) if err != nil { return err } + if username != "" && secret != "" { - auth := username + ":" + secret - a.setAuth(host, fmt.Sprintf("Basic %s", base64.StdEncoding.EncodeToString([]byte(auth)))) + common := tokenOptions{ + username: username, + secret: secret, + } + + a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common) return nil } } } - return errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") } -func (a *dockerAuthorizer) getAuth(host string) string { - a.mu.Lock() - defer a.mu.Unlock() - - return a.auth[host] -} - -func (a *dockerAuthorizer) setAuth(host string, auth string) bool { - a.mu.Lock() - defer a.mu.Unlock() - - changed := a.auth[host] != auth - a.auth[host] = auth - - return changed -} - -func (a *dockerAuthorizer) setTokenAuth(ctx context.Context, host string, params map[string]string) error { - realm, ok := params["realm"] +func (a *dockerAuthorizer) generateTokenOptions(ctx context.Context, host string, c challenge) (tokenOptions, error) { + realm, ok := c.parameters["realm"] if !ok { - return errors.New("no realm specified for token auth challenge") + return tokenOptions{}, errors.New("no realm specified for token auth challenge") } realmURL, err := url.Parse(realm) if err != nil { - return errors.Wrap(err, "invalid token auth challenge realm") + return tokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm") } to := tokenOptions{ realm: realmURL.String(), - service: params["service"], + service: c.parameters["service"], } - to.scopes = getTokenScopes(ctx, params) - if len(to.scopes) == 0 { - return errors.Errorf("no scope specified for token auth challenge") + scope, ok := c.parameters["scope"] + if !ok { + return tokenOptions{}, errors.Errorf("no scope specified for token auth challenge") } + to.scopes = append(to.scopes, scope) if a.credentials != nil { to.username, to.secret, err = a.credentials(host) if err != nil { - return err + return tokenOptions{}, err } } + return to, nil +} - var token string +// authResult is used to control limit rate. +type authResult struct { + sync.WaitGroup + token string + err error +} + +// authHandler is used to handle auth request per registry server. +type authHandler struct { + sync.Mutex + + header http.Header + + client *http.Client + + // only support basic and bearer schemes + scheme authenticationScheme + + // common contains common challenge answer + common tokenOptions + + // scopedTokens caches token indexed by scopes, which used in + // bearer auth case + scopedTokens map[string]*authResult +} + +func newAuthHandler(client *http.Client, hdr http.Header, scheme authenticationScheme, opts tokenOptions) *authHandler { + return &authHandler{ + header: hdr, + client: client, + scheme: scheme, + common: opts, + scopedTokens: map[string]*authResult{}, + } +} + +func (ah *authHandler) authorize(ctx context.Context) (string, error) { + switch ah.scheme { + case basicAuth: + return ah.doBasicAuth(ctx) + case bearerAuth: + return ah.doBearerAuth(ctx) + default: + return "", errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") + } +} + +func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) { + username, secret := ah.common.username, ah.common.secret + + if username == "" || secret == "" { + return "", fmt.Errorf("failed to handle basic auth because missing username or secret") + } + + auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret)) + return fmt.Sprintf("Basic %s", auth), nil +} + +func (ah *authHandler) doBearerAuth(ctx context.Context) (string, error) { + // copy common tokenOptions + to := ah.common + + to.scopes = getTokenScopes(ctx, to.scopes) + if len(to.scopes) == 0 { + return "", errors.Errorf("no scope specified for token auth challenge") + } + + // Docs: https://docs.docker.com/registry/spec/auth/scope + scoped := strings.Join(to.scopes, " ") + + ah.Lock() + if r, exist := ah.scopedTokens[scoped]; exist { + ah.Unlock() + r.Wait() + return r.token, r.err + } + + // only one fetch token job + r := new(authResult) + r.Add(1) + ah.scopedTokens[scoped] = r + ah.Unlock() + + // fetch token for the resource scope + var ( + token string + err error + ) if to.secret != "" { - // Credential information is provided, use oauth POST endpoint - token, err = a.fetchTokenWithOAuth(ctx, to) - if err != nil { - return errors.Wrap(err, "failed to fetch oauth token") - } + // credential information is provided, use oauth POST endpoint + token, err = ah.fetchTokenWithOAuth(ctx, to) + err = errors.Wrap(err, "failed to fetch oauth token") } else { - // Do request anonymously - token, err = a.fetchToken(ctx, to) - if err != nil { - return errors.Wrap(err, "failed to fetch anonymous token") - } + // do request anonymously + token, err = ah.fetchToken(ctx, to) + err = errors.Wrap(err, "failed to fetch anonymous token") } - a.setAuth(host, fmt.Sprintf("Bearer %s", token)) + token = fmt.Sprintf("Bearer %s", token) - return nil + r.token, r.err = token, err + r.Done() + return r.token, r.err } type tokenOptions struct { @@ -181,7 +330,7 @@ type postTokenResponse struct { Scope string `json:"scope"` } -func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) { +func (ah *authHandler) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) { form := url.Values{} form.Set("scope", strings.Join(to.scopes, " ")) form.Set("service", to.service) @@ -202,11 +351,13 @@ func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOpti return "", err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - if a.ua != "" { - req.Header.Set("User-Agent", a.ua) + if ah.header != nil { + for k, v := range ah.header { + req.Header[k] = append(req.Header[k], v...) + } } - resp, err := ctxhttp.Do(ctx, a.client, req) + resp, err := ctxhttp.Do(ctx, ah.client, req) if err != nil { return "", err } @@ -216,7 +367,7 @@ func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOpti // As of September 2017, GCR is known to return 404. // As of February 2018, JFrog Artifactory is known to return 401. if (resp.StatusCode == 405 && to.username != "") || resp.StatusCode == 404 || resp.StatusCode == 401 { - return a.fetchToken(ctx, to) + return ah.fetchToken(ctx, to) } else if resp.StatusCode < 200 || resp.StatusCode >= 400 { b, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB log.G(ctx).WithFields(logrus.Fields{ @@ -245,15 +396,17 @@ type getTokenResponse struct { RefreshToken string `json:"refresh_token"` } -// getToken fetches a token using a GET request -func (a *dockerAuthorizer) fetchToken(ctx context.Context, to tokenOptions) (string, error) { +// fetchToken fetches a token using a GET request +func (ah *authHandler) fetchToken(ctx context.Context, to tokenOptions) (string, error) { req, err := http.NewRequest("GET", to.realm, nil) if err != nil { return "", err } - if a.ua != "" { - req.Header.Set("User-Agent", a.ua) + if ah.header != nil { + for k, v := range ah.header { + req.Header[k] = append(req.Header[k], v...) + } } reqParams := req.URL.Query() @@ -272,7 +425,7 @@ func (a *dockerAuthorizer) fetchToken(ctx context.Context, to tokenOptions) (str req.URL.RawQuery = reqParams.Encode() - resp, err := ctxhttp.Do(ctx, a.client, req) + resp, err := ctxhttp.Do(ctx, ah.client, req) if err != nil { return "", err } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go index 6f06b0e50..ce3da5524 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go @@ -23,7 +23,7 @@ import ( "io" "io/ioutil" "net/http" - "path" + "net/url" "strings" "github.com/containerd/containerd/errdefs" @@ -32,7 +32,6 @@ import ( "github.com/docker/distribution/registry/api/errcode" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type dockerFetcher struct { @@ -40,26 +39,46 @@ type dockerFetcher struct { } func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithFields( - logrus.Fields{ - "base": r.base.String(), - "digest": desc.Digest, - }, - )) + ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", desc.Digest)) - urls, err := r.getV2URLPaths(ctx, desc) - if err != nil { - return nil, err + hosts := r.filterHosts(HostCapabilityPull) + if len(hosts) == 0 { + return nil, errors.Wrap(errdefs.ErrNotFound, "no pull hosts") } - ctx, err = contextWithRepositoryScope(ctx, r.refspec, false) + ctx, err := contextWithRepositoryScope(ctx, r.refspec, false) if err != nil { return nil, err } return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) { - for _, u := range urls { - rc, err := r.open(ctx, u, desc.MediaType, offset) + // firstly try fetch via external urls + for _, us := range desc.URLs { + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", us)) + + u, err := url.Parse(us) + if err != nil { + log.G(ctx).WithError(err).Debug("failed to parse") + continue + } + log.G(ctx).Debug("trying alternative url") + + // Try this first, parse it + host := RegistryHost{ + Client: http.DefaultClient, + Host: u.Host, + Scheme: u.Scheme, + Path: u.Path, + Capabilities: HostCapabilityPull, + } + req := r.request(host, http.MethodGet) + // Strip namespace from base + req.path = u.Path + if u.RawQuery != "" { + req.path = req.path + "?" + u.RawQuery + } + + rc, err := r.open(ctx, req, desc.MediaType, offset) if err != nil { if errdefs.IsNotFound(err) { continue // try one of the other urls. @@ -71,6 +90,44 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R return rc, nil } + // Try manifests endpoints for manifests types + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, + images.MediaTypeDockerSchema1Manifest, + ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: + + for _, host := range r.hosts { + req := r.request(host, http.MethodGet, "manifests", desc.Digest.String()) + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + if errdefs.IsNotFound(err) { + continue // try another host + } + + return nil, err + } + + return rc, nil + } + } + + // Finally use blobs endpoints + for _, host := range r.hosts { + req := r.request(host, http.MethodGet, "blobs", desc.Digest.String()) + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + if errdefs.IsNotFound(err) { + continue // try another host + } + + return nil, err + } + + return rc, nil + } + return nil, errors.Wrapf(errdefs.ErrNotFound, "could not fetch content descriptor %v (%v) from remote", desc.Digest, desc.MediaType) @@ -78,22 +135,17 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R }) } -func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int64) (io.ReadCloser, error) { - req, err := http.NewRequest(http.MethodGet, u, nil) - if err != nil { - return nil, err - } - - req.Header.Set("Accept", strings.Join([]string{mediatype, `*`}, ", ")) +func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (io.ReadCloser, error) { + req.header.Set("Accept", strings.Join([]string{mediatype, `*`}, ", ")) if offset > 0 { // Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints // will return the header without supporting the range. The content // range must always be checked. - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) } - resp, err := r.doRequestWithRetries(ctx, req, nil) + resp, err := req.doWithRetries(ctx, nil) if err != nil { return nil, err } @@ -106,13 +158,13 @@ func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound { - return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", u) + return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", req.String()) } var registryErr errcode.Errors if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 { - return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status) + return nil, errors.Errorf("unexpected status code %v: %v", req.String(), resp.Status) } - return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", u, resp.Status, registryErr.Error()) + return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error()) } if offset > 0 { cr := resp.Header.Get("content-range") @@ -141,30 +193,3 @@ func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int return resp.Body, nil } - -// getV2URLPaths generates the candidate urls paths for the object based on the -// set of hints and the provided object id. URLs are returned in the order of -// most to least likely succeed. -func (r *dockerFetcher) getV2URLPaths(ctx context.Context, desc ocispec.Descriptor) ([]string, error) { - var urls []string - - if len(desc.URLs) > 0 { - // handle fetch via external urls. - for _, u := range desc.URLs { - log.G(ctx).WithField("url", u).Debug("adding alternative url") - urls = append(urls, u) - } - } - - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, - images.MediaTypeDockerSchema1Manifest, - ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: - urls = append(urls, r.url(path.Join("manifests", desc.Digest.String()))) - } - - // always fallback to attempting to get the object out of the blobs store. - urls = append(urls, r.url(path.Join("blobs", desc.Digest.String()))) - - return urls, nil -} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/handler.go b/vendor/github.com/containerd/containerd/remotes/docker/handler.go index 1a355783b..529cfbc27 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/handler.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/handler.go @@ -110,3 +110,45 @@ func appendDistributionSourceLabel(originLabel, repo string) string { func distributionSourceLabelKey(source string) string { return fmt.Sprintf("%s.%s", labelDistributionSource, source) } + +// selectRepositoryMountCandidate will select the repo which has longest +// common prefix components as the candidate. +func selectRepositoryMountCandidate(refspec reference.Spec, sources map[string]string) string { + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + // NOTE: basically, it won't be error here + return "" + } + + source, target := u.Hostname(), strings.TrimPrefix(u.Path, "/") + repoLabel, ok := sources[distributionSourceLabelKey(source)] + if !ok || repoLabel == "" { + return "" + } + + n, match := 0, "" + components := strings.Split(target, "/") + for _, repo := range strings.Split(repoLabel, ",") { + // the target repo is not a candidate + if repo == target { + continue + } + + if l := commonPrefixComponents(components, repo); l >= n { + n, match = l, repo + } + } + return match +} + +func commonPrefixComponents(components []string, target string) int { + targetComponents := strings.Split(target, "/") + + i := 0 + for ; i < len(components) && i < len(targetComponents); i++ { + if components[i] != targetComponents[i] { + break + } + } + return i +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go index c3c0923f0..7100e11c6 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go @@ -21,7 +21,7 @@ import ( "io" "io/ioutil" "net/http" - "path" + "net/url" "strings" "time" @@ -37,7 +37,7 @@ import ( type dockerPusher struct { *dockerBase - tag string + object string // TODO: namespace tracker tracker StatusTracker @@ -59,31 +59,32 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten return nil, errors.Wrap(err, "failed to get status") } + hosts := p.filterHosts(HostCapabilityPush) + if len(hosts) == 0 { + return nil, errors.Wrap(errdefs.ErrNotFound, "no push hosts") + } + var ( isManifest bool - existCheck string + existCheck []string + host = hosts[0] ) switch desc.MediaType { case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: isManifest = true - if p.tag == "" { - existCheck = path.Join("manifests", desc.Digest.String()) - } else { - existCheck = path.Join("manifests", p.tag) - } + existCheck = getManifestPath(p.object, desc.Digest) default: - existCheck = path.Join("blobs", desc.Digest.String()) + existCheck = []string{"blobs", desc.Digest.String()} } - req, err := http.NewRequest(http.MethodHead, p.url(existCheck), nil) - if err != nil { - return nil, err - } + req := p.request(host, http.MethodHead, existCheck...) + req.header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", ")) - req.Header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", ")) - resp, err := p.doRequestWithRetries(ctx, req, nil) + log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to") + + resp, err := req.doWithRetries(ctx, nil) if err != nil { if errors.Cause(err) != ErrInvalidAuthorization { return nil, err @@ -92,7 +93,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten } else { if resp.StatusCode == http.StatusOK { var exists bool - if isManifest && p.tag != "" { + if isManifest && existCheck[1] != desc.Digest.String() { dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) if dgstHeader == desc.Digest { exists = true @@ -116,67 +117,95 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten } } - // TODO: Lookup related objects for cross repository push - if isManifest { - var putPath string - if p.tag != "" { - putPath = path.Join("manifests", p.tag) - } else { - putPath = path.Join("manifests", desc.Digest.String()) - } - - req, err = http.NewRequest(http.MethodPut, p.url(putPath), nil) - if err != nil { - return nil, err - } - req.Header.Add("Content-Type", desc.MediaType) + putPath := getManifestPath(p.object, desc.Digest) + req = p.request(host, http.MethodPut, putPath...) + req.header.Add("Content-Type", desc.MediaType) } else { - // TODO: Do monolithic upload if size is small - // Start upload request - req, err = http.NewRequest(http.MethodPost, p.url("blobs", "uploads")+"/", nil) - if err != nil { - return nil, err + req = p.request(host, http.MethodPost, "blobs", "uploads/") + + var resp *http.Response + if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" { + preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo) + pctx := contextWithAppendPullRepositoryScope(ctx, fromRepo) + + // NOTE: the fromRepo might be private repo and + // auth service still can grant token without error. + // but the post request will fail because of 401. + // + // for the private repo, we should remove mount-from + // query and send the request again. + resp, err = preq.do(pctx) + //resp, err = p.doRequest(pctx, req) + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusUnauthorized { + log.G(ctx).Debugf("failed to mount from repository %s", fromRepo) + + resp.Body.Close() + resp = nil + } } - resp, err := p.doRequestWithRetries(ctx, req, nil) - if err != nil { - return nil, err + if resp == nil { + resp, err = req.doWithRetries(ctx, nil) + if err != nil { + return nil, err + } } switch resp.StatusCode { case http.StatusOK, http.StatusAccepted, http.StatusNoContent: + case http.StatusCreated: + p.tracker.SetStatus(ref, Status{ + Status: content.Status{ + Ref: ref, + }, + }) + return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) default: // TODO: log error return nil, errors.Errorf("unexpected response: %s", resp.Status) } - location := resp.Header.Get("Location") + var ( + location = resp.Header.Get("Location") + lurl *url.URL + lhost = host + ) // Support paths without host in location if strings.HasPrefix(location, "/") { - // Support location string containing path and query - qmIndex := strings.Index(location, "?") - if qmIndex > 0 { - u := p.base - u.Path = location[:qmIndex] - u.RawQuery = location[qmIndex+1:] - location = u.String() - } else { - u := p.base - u.Path = location - location = u.String() + lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse location %v", location) + } + } else { + if !strings.Contains(location, "://") { + location = lhost.Scheme + "://" + location + } + lurl, err = url.Parse(location) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse location %v", location) + } + + if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme { + + lhost.Scheme = lurl.Scheme + lhost.Host = lurl.Host + log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination") + + // Strip authorizer if change to host or scheme + lhost.Authorizer = nil } } - - req, err = http.NewRequest(http.MethodPut, location, nil) - if err != nil { - return nil, err - } - q := req.URL.Query() + q := lurl.Query() q.Add("digest", desc.Digest.String()) - req.URL.RawQuery = q.Encode() + req = p.request(lhost, http.MethodPut) + req.path = lurl.Path + "?" + q.Encode() } p.tracker.SetStatus(ref, Status{ Status: content.Status{ @@ -191,13 +220,22 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten pr, pw := io.Pipe() respC := make(chan *http.Response, 1) + body := ioutil.NopCloser(pr) - req.Body = ioutil.NopCloser(pr) - req.ContentLength = desc.Size + req.body = func() (io.ReadCloser, error) { + if body == nil { + return nil, errors.New("cannot reuse body, request must be retried") + } + // Only use the body once since pipe cannot be seeked + ob := body + body = nil + return ob, nil + } + req.size = desc.Size go func() { defer close(respC) - resp, err = p.doRequest(ctx, req) + resp, err = req.do(ctx) if err != nil { pr.CloseWithError(err) return @@ -223,6 +261,25 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten }, nil } +func getManifestPath(object string, dgst digest.Digest) []string { + if i := strings.IndexByte(object, '@'); i >= 0 { + if object[i+1:] != dgst.String() { + // use digest, not tag + object = "" + } else { + // strip @ for registry path to make tag + object = object[:i] + } + + } + + if object == "" { + return []string{"manifests", dgst.String()} + } + + return []string{"manifests", object} +} + type pushWriter struct { base *dockerBase ref string @@ -296,7 +353,7 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di } if size > 0 && size != status.Offset { - return errors.Errorf("unxpected size %d, expected %d", status.Offset, size) + return errors.Errorf("unexpected size %d, expected %d", status.Offset, size) } if expected == "" { @@ -320,3 +377,16 @@ func (pw *pushWriter) Truncate(size int64) error { // TODO: always error on manifest return errors.New("cannot truncate remote upload") } + +func requestWithMountFrom(req *request, mount, from string) *request { + creq := *req + + sep := "?" + if strings.Contains(creq.path, sep) { + sep = "&" + } + + creq.path = creq.path + sep + "mount=" + mount + "&from=" + from + + return &creq +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/registry.go b/vendor/github.com/containerd/containerd/remotes/docker/registry.go new file mode 100644 index 000000000..ae24f41e1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/registry.go @@ -0,0 +1,202 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "net/http" +) + +// HostCapabilities represent the capabilities of the registry +// host. This also represents the set of operations for which +// the registry host may be trusted to perform. +// +// For example pushing is a capability which should only be +// performed on an upstream source, not a mirror. +// Resolving (the process of converting a name into a digest) +// must be considered a trusted operation and only done by +// a host which is trusted (or more preferably by secure process +// which can prove the provenance of the mapping). A public +// mirror should never be trusted to do a resolve action. +// +// | Registry Type | Pull | Resolve | Push | +// |------------------|------|---------|------| +// | Public Registry | yes | yes | yes | +// | Private Registry | yes | yes | yes | +// | Public Mirror | yes | no | no | +// | Private Mirror | yes | yes | no | +type HostCapabilities uint8 + +const ( + // HostCapabilityPull represents the capability to fetch manifests + // and blobs by digest + HostCapabilityPull HostCapabilities = 1 << iota + + // HostCapabilityResolve represents the capability to fetch manifests + // by name + HostCapabilityResolve + + // HostCapabilityPush represents the capability to push blobs and + // manifests + HostCapabilityPush + + // Reserved for future capabilities (i.e. search, catalog, remove) +) + +func (c HostCapabilities) Has(t HostCapabilities) bool { + return c&t == t +} + +// RegistryHost represents a complete configuration for a registry +// host, representing the capabilities, authorizations, connection +// configuration, and location. +type RegistryHost struct { + Client *http.Client + Authorizer Authorizer + Host string + Scheme string + Path string + Capabilities HostCapabilities +} + +// RegistryHosts fetches the registry hosts for a given namespace, +// provided by the host component of an distribution image reference. +type RegistryHosts func(string) ([]RegistryHost, error) + +// Registries joins multiple registry configuration functions, using the same +// order as provided within the arguments. When an empty registry configuration +// is returned with a nil error, the next function will be called. +// NOTE: This function will not join configurations, as soon as a non-empty +// configuration is returned from a configuration function, it will be returned +// to the caller. +func Registries(registries ...RegistryHosts) RegistryHosts { + return func(host string) ([]RegistryHost, error) { + for _, registry := range registries { + config, err := registry(host) + if err != nil { + return config, err + } + if len(config) > 0 { + return config, nil + } + } + return nil, nil + } +} + +type registryOpts struct { + authorizer Authorizer + plainHTTP func(string) (bool, error) + host func(string) (string, error) + client *http.Client +} + +// RegistryOpt defines a registry default option +type RegistryOpt func(*registryOpts) + +// WithPlainHTTP configures registries to use plaintext http scheme +// for the provided host match function. +func WithPlainHTTP(f func(string) (bool, error)) RegistryOpt { + return func(opts *registryOpts) { + opts.plainHTTP = f + } +} + +// WithAuthorizer configures the default authorizer for a registry +func WithAuthorizer(a Authorizer) RegistryOpt { + return func(opts *registryOpts) { + opts.authorizer = a + } +} + +// WithHostTranslator defines the default translator to use for registry hosts +func WithHostTranslator(h func(string) (string, error)) RegistryOpt { + return func(opts *registryOpts) { + opts.host = h + } +} + +// WithClient configures the default http client for a registry +func WithClient(c *http.Client) RegistryOpt { + return func(opts *registryOpts) { + opts.client = c + } +} + +// ConfigureDefaultRegistries is used to create a default configuration for +// registries. For more advanced configurations or per-domain setups, +// the RegistryHosts interface should be used directly. +// NOTE: This function will always return a non-empty value or error +func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts { + var opts registryOpts + for _, opt := range ropts { + opt(&opts) + } + + return func(host string) ([]RegistryHost, error) { + config := RegistryHost{ + Client: opts.client, + Authorizer: opts.authorizer, + Host: host, + Scheme: "https", + Path: "/v2", + Capabilities: HostCapabilityPull | HostCapabilityResolve | HostCapabilityPush, + } + + if config.Client == nil { + config.Client = http.DefaultClient + } + + if opts.plainHTTP != nil { + match, err := opts.plainHTTP(host) + if err != nil { + return nil, err + } + if match { + config.Scheme = "http" + } + } + + if opts.host != nil { + var err error + config.Host, err = opts.host(config.Host) + if err != nil { + return nil, err + } + } else if host == "docker.io" { + config.Host = "registry-1.docker.io" + } + + return []RegistryHost{config}, nil + } +} + +// MatchAllHosts is a host match function which is always true. +func MatchAllHosts(string) (bool, error) { + return true, nil +} + +// MatchLocalhost is a host match function which returns true for +// localhost. +func MatchLocalhost(host string) (bool, error) { + for _, s := range []string{"localhost", "127.0.0.1", "[::1]"} { + if len(host) >= len(s) && host[0:len(s)] == s && (len(host) == len(s) || host[len(s)] == ':') { + return true, nil + } + } + return host == "::1", nil + +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go index 45282fb51..8e65d8ccc 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -18,9 +18,10 @@ package docker import ( "context" + "fmt" "io" + "io/ioutil" "net/http" - "net/url" "path" "strings" @@ -46,6 +47,19 @@ var ( // ErrInvalidAuthorization is used when credentials are passed to a server but // those credentials are rejected. ErrInvalidAuthorization = errors.New("authorization failed") + + // MaxManifestSize represents the largest size accepted from a registry + // during resolution. Larger manifests may be accepted using a + // resolution method other than the registry. + // + // NOTE: The max supported layers by some runtimes is 128 and individual + // layers will not contribute more than 256 bytes, making a + // reasonable limit for a large image manifests of 32K bytes. + // 4M bytes represents a much larger upper bound for images which may + // contain large annotations or be non-images. A proper manifest + // design puts large metadata in subobjects, as is consistent the + // intent of the manifest design. + MaxManifestSize int64 = 4 * 1048 * 1048 ) // Authorizer is used to authorize HTTP requests based on 401 HTTP responses. @@ -72,31 +86,38 @@ type Authorizer interface { // ResolverOptions are used to configured a new Docker register resolver type ResolverOptions struct { - // Authorizer is used to authorize registry requests - Authorizer Authorizer - - // Credentials provides username and secret given a host. - // If username is empty but a secret is given, that secret - // is interpreted as a long lived token. - // Deprecated: use Authorizer - Credentials func(string) (string, string, error) - - // Host provides the hostname given a namespace. - Host func(string) (string, error) + // Hosts returns registry host configurations for a namespace. + Hosts RegistryHosts // Headers are the HTTP request header fields sent by the resolver Headers http.Header - // PlainHTTP specifies to use plain http and not https - PlainHTTP bool - - // Client is the http client to used when making registry requests - Client *http.Client - // Tracker is used to track uploads to the registry. This is used // since the registry does not have upload tracking and the existing // mechanism for getting blob upload status is expensive. Tracker StatusTracker + + // Authorizer is used to authorize registry requests + // Deprecated: use Hosts + Authorizer Authorizer + + // Credentials provides username and secret given a host. + // If username is empty but a secret is given, that secret + // is interpreted as a long lived token. + // Deprecated: use Hosts + Credentials func(string) (string, string, error) + + // Host provides the hostname given a namespace. + // Deprecated: use Hosts + Host func(string) (string, error) + + // PlainHTTP specifies to use plain http and not https + // Deprecated: use Hosts + PlainHTTP bool + + // Client is the http client to used when making registry requests + // Deprecated: use Hosts + Client *http.Client } // DefaultHost is the default host function. @@ -108,13 +129,10 @@ func DefaultHost(ns string) (string, error) { } type dockerResolver struct { - auth Authorizer - host func(string) (string, error) - headers http.Header - uagent string - plainHTTP bool - client *http.Client - tracker StatusTracker + hosts RegistryHosts + header http.Header + resolveHeader http.Header + tracker StatusTracker } // NewResolver returns a new resolver to a Docker registry @@ -122,39 +140,56 @@ func NewResolver(options ResolverOptions) remotes.Resolver { if options.Tracker == nil { options.Tracker = NewInMemoryTracker() } - if options.Host == nil { - options.Host = DefaultHost - } + if options.Headers == nil { options.Headers = make(http.Header) } + if _, ok := options.Headers["User-Agent"]; !ok { + options.Headers.Set("User-Agent", "containerd/"+version.Version) + } + + resolveHeader := http.Header{} if _, ok := options.Headers["Accept"]; !ok { // set headers for all the types we support for resolution. - options.Headers.Set("Accept", strings.Join([]string{ + resolveHeader.Set("Accept", strings.Join([]string{ images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex, "*"}, ", ")) - } - ua := options.Headers.Get("User-Agent") - if ua != "" { - options.Headers.Del("User-Agent") } else { - ua = "containerd/" + version.Version + resolveHeader["Accept"] = options.Headers["Accept"] + delete(options.Headers, "Accept") } - if options.Authorizer == nil { - options.Authorizer = NewAuthorizer(options.Client, options.Credentials) - options.Authorizer.(*dockerAuthorizer).ua = ua + if options.Hosts == nil { + opts := []RegistryOpt{} + if options.Host != nil { + opts = append(opts, WithHostTranslator(options.Host)) + } + + if options.Authorizer == nil { + options.Authorizer = NewDockerAuthorizer( + WithAuthClient(options.Client), + WithAuthHeader(options.Headers), + WithAuthCreds(options.Credentials)) + } + opts = append(opts, WithAuthorizer(options.Authorizer)) + + if options.Client != nil { + opts = append(opts, WithClient(options.Client)) + } + if options.PlainHTTP { + opts = append(opts, WithPlainHTTP(MatchAllHosts)) + } else { + opts = append(opts, WithPlainHTTP(MatchLocalhost)) + } + options.Hosts = ConfigureDefaultRegistries(opts...) } return &dockerResolver{ - auth: options.Authorizer, - host: options.Host, - headers: options.Headers, - uagent: ua, - plainHTTP: options.PlainHTTP, - client: options.Client, - tracker: options.Tracker, + hosts: options.Hosts, + header: options.Headers, + resolveHeader: resolveHeader, + tracker: options.Tracker, } } @@ -201,13 +236,11 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp return "", ocispec.Descriptor{}, err } - fetcher := dockerFetcher{ - dockerBase: base, - } - var ( - urls []string - dgst = refspec.Digest() + lastErr error + paths [][]string + dgst = refspec.Digest() + caps = HostCapabilityPull ) if dgst != "" { @@ -218,100 +251,130 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp } // turns out, we have a valid digest, make a url. - urls = append(urls, fetcher.url("manifests", dgst.String())) + paths = append(paths, []string{"manifests", dgst.String()}) // fallback to blobs on not found. - urls = append(urls, fetcher.url("blobs", dgst.String())) + paths = append(paths, []string{"blobs", dgst.String()}) } else { - urls = append(urls, fetcher.url("manifests", refspec.Object)) + // Add + paths = append(paths, []string{"manifests", refspec.Object}) + caps |= HostCapabilityResolve + } + + hosts := base.filterHosts(caps) + if len(hosts) == 0 { + return "", ocispec.Descriptor{}, errors.Wrap(errdefs.ErrNotFound, "no resolve hosts") } ctx, err = contextWithRepositoryScope(ctx, refspec, false) if err != nil { return "", ocispec.Descriptor{}, err } - for _, u := range urls { - req, err := http.NewRequest(http.MethodHead, u, nil) - if err != nil { - return "", ocispec.Descriptor{}, err - } - req.Header = r.headers + for _, u := range paths { + for _, host := range hosts { + ctx := log.WithLogger(ctx, log.G(ctx).WithField("host", host.Host)) - log.G(ctx).Debug("resolving") - resp, err := fetcher.doRequestWithRetries(ctx, req, nil) - if err != nil { - if errors.Cause(err) == ErrInvalidAuthorization { - err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization") + req := base.request(host, http.MethodHead, u...) + for key, value := range r.resolveHeader { + req.header[key] = append(req.header[key], value...) } - return "", ocispec.Descriptor{}, err - } - resp.Body.Close() // don't care about body contents. - if resp.StatusCode > 299 { - if resp.StatusCode == http.StatusNotFound { + log.G(ctx).Debug("resolving") + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + if errors.Cause(err) == ErrInvalidAuthorization { + err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization") + } + return "", ocispec.Descriptor{}, err + } + resp.Body.Close() // don't care about body contents. + + if resp.StatusCode > 299 { + if resp.StatusCode == http.StatusNotFound { + continue + } + return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status) + } + size := resp.ContentLength + contentType := getManifestMediaType(resp) + + // if no digest was provided, then only a resolve + // trusted registry was contacted, in this case use + // the digest header (or content from GET) + if dgst == "" { + // this is the only point at which we trust the registry. we use the + // content headers to assemble a descriptor for the name. when this becomes + // more robust, we mostly get this information from a secure trust store. + dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) + + if dgstHeader != "" && size != -1 { + if err := dgstHeader.Validate(); err != nil { + return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader) + } + dgst = dgstHeader + } + } + if dgst == "" || size == -1 { + log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead") + + req = base.request(host, http.MethodGet, u...) + for key, value := range r.resolveHeader { + req.header[key] = append(req.header[key], value...) + } + + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + return "", ocispec.Descriptor{}, err + } + defer resp.Body.Close() + + bodyReader := countingReader{reader: resp.Body} + + contentType = getManifestMediaType(resp) + if dgst == "" { + if contentType == images.MediaTypeDockerSchema1Manifest { + b, err := schema1.ReadStripSignature(&bodyReader) + if err != nil { + return "", ocispec.Descriptor{}, err + } + + dgst = digest.FromBytes(b) + } else { + dgst, err = digest.FromReader(&bodyReader) + if err != nil { + return "", ocispec.Descriptor{}, err + } + } + } else if _, err := io.Copy(ioutil.Discard, &bodyReader); err != nil { + return "", ocispec.Descriptor{}, err + } + size = bodyReader.bytesRead + } + // Prevent resolving to excessively large manifests + if size > MaxManifestSize { + if lastErr == nil { + lastErr = errors.Wrapf(errdefs.ErrNotFound, "rejecting %d byte manifest for %s", size, ref) + } continue } - return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status) + + desc := ocispec.Descriptor{ + Digest: dgst, + MediaType: contentType, + Size: size, + } + + log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved") + return ref, desc, nil } - size := resp.ContentLength - - // this is the only point at which we trust the registry. we use the - // content headers to assemble a descriptor for the name. when this becomes - // more robust, we mostly get this information from a secure trust store. - dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) - contentType := getManifestMediaType(resp) - - if dgstHeader != "" && size != -1 { - if err := dgstHeader.Validate(); err != nil { - return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader) - } - dgst = dgstHeader - } else { - log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead") - - req, err := http.NewRequest(http.MethodGet, u, nil) - if err != nil { - return "", ocispec.Descriptor{}, err - } - req.Header = r.headers - - resp, err := fetcher.doRequestWithRetries(ctx, req, nil) - if err != nil { - return "", ocispec.Descriptor{}, err - } - defer resp.Body.Close() - - bodyReader := countingReader{reader: resp.Body} - - contentType = getManifestMediaType(resp) - if contentType == images.MediaTypeDockerSchema1Manifest { - b, err := schema1.ReadStripSignature(&bodyReader) - if err != nil { - return "", ocispec.Descriptor{}, err - } - - dgst = digest.FromBytes(b) - } else { - dgst, err = digest.FromReader(&bodyReader) - if err != nil { - return "", ocispec.Descriptor{}, err - } - } - size = bodyReader.bytesRead - } - - desc := ocispec.Descriptor{ - Digest: dgst, - MediaType: contentType, - Size: size, - } - - log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved") - return ref, desc, nil } - return "", ocispec.Descriptor{}, errors.Errorf("%v not found", ref) + if lastErr == nil { + lastErr = errors.Wrap(errdefs.ErrNotFound, ref) + } + + return "", ocispec.Descriptor{}, lastErr } func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { @@ -336,13 +399,6 @@ func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher return nil, err } - // Manifests can be pushed by digest like any other object, but the passed in - // reference cannot take a digest without the associated content. A tag is allowed - // and will be used to tag pushed manifests. - if refspec.Object != "" && strings.Contains(refspec.Object, "@") { - return nil, errors.New("cannot use digest reference for push locator") - } - base, err := r.base(refspec) if err != nil { return nil, err @@ -350,62 +406,64 @@ func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher return dockerPusher{ dockerBase: base, - tag: refspec.Object, + object: refspec.Object, tracker: r.tracker, }, nil } type dockerBase struct { - refspec reference.Spec - base url.URL - uagent string - - client *http.Client - auth Authorizer + refspec reference.Spec + namespace string + hosts []RegistryHost + header http.Header } func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) { - var ( - err error - base url.URL - ) - host := refspec.Hostname() - base.Host = host - if r.host != nil { - base.Host, err = r.host(host) - if err != nil { - return nil, err - } + hosts, err := r.hosts(host) + if err != nil { + return nil, err } - - base.Scheme = "https" - if r.plainHTTP || strings.HasPrefix(base.Host, "localhost:") { - base.Scheme = "http" - } - - prefix := strings.TrimPrefix(refspec.Locator, host+"/") - base.Path = path.Join("/v2", prefix) - return &dockerBase{ - refspec: refspec, - base: base, - uagent: r.uagent, - client: r.client, - auth: r.auth, + refspec: refspec, + namespace: strings.TrimPrefix(refspec.Locator, host+"/"), + hosts: hosts, + header: r.header, }, nil } -func (r *dockerBase) url(ps ...string) string { - url := r.base - url.Path = path.Join(url.Path, path.Join(ps...)) - return url.String() +func (r *dockerBase) filterHosts(caps HostCapabilities) (hosts []RegistryHost) { + for _, host := range r.hosts { + if host.Capabilities.Has(caps) { + hosts = append(hosts, host) + } + } + return } -func (r *dockerBase) authorize(ctx context.Context, req *http.Request) error { +func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *request { + header := http.Header{} + for key, value := range r.header { + header[key] = append(header[key], value...) + } + parts := append([]string{"/", host.Path, r.namespace}, ps...) + p := path.Join(parts...) + // Join strips trailing slash, re-add ending "/" if included + if len(parts) > 0 && strings.HasSuffix(parts[len(parts)-1], "/") { + p = p + "/" + } + return &request{ + method: method, + path: p, + header: header, + host: host, + } +} + +func (r *request) authorize(ctx context.Context, req *http.Request) error { // Check if has header for host - if r.auth != nil { - if err := r.auth.Authorize(ctx, req); err != nil { + if r.host.Authorizer != nil { + if err := r.host.Authorizer.Authorize(ctx, req); err != nil { return err } } @@ -413,81 +471,137 @@ func (r *dockerBase) authorize(ctx context.Context, req *http.Request) error { return nil } -func (r *dockerBase) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String())) - log.G(ctx).WithField("request.headers", req.Header).WithField("request.method", req.Method).Debug("do request") - req.Header.Set("User-Agent", r.uagent) +type request struct { + method string + path string + header http.Header + host RegistryHost + body func() (io.ReadCloser, error) + size int64 +} + +func (r *request) do(ctx context.Context) (*http.Response, error) { + u := r.host.Scheme + "://" + r.host.Host + r.path + req, err := http.NewRequest(r.method, u, nil) + if err != nil { + return nil, err + } + req.Header = r.header + if r.body != nil { + body, err := r.body() + if err != nil { + return nil, err + } + req.Body = body + req.GetBody = r.body + if r.size > 0 { + req.ContentLength = r.size + } + } + + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u)) + log.G(ctx).WithFields(requestFields(req)).Debug("do request") if err := r.authorize(ctx, req); err != nil { return nil, errors.Wrap(err, "failed to authorize") } - resp, err := ctxhttp.Do(ctx, r.client, req) + resp, err := ctxhttp.Do(ctx, r.host.Client, req) if err != nil { return nil, errors.Wrap(err, "failed to do request") } - log.G(ctx).WithFields(logrus.Fields{ - "status": resp.Status, - "response.headers": resp.Header, - }).Debug("fetch response received") + log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received") return resp, nil } -func (r *dockerBase) doRequestWithRetries(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Response, error) { - resp, err := r.doRequest(ctx, req) +func (r *request) doWithRetries(ctx context.Context, responses []*http.Response) (*http.Response, error) { + resp, err := r.do(ctx) if err != nil { return nil, err } responses = append(responses, resp) - req, err = r.retryRequest(ctx, req, responses) + retry, err := r.retryRequest(ctx, responses) if err != nil { resp.Body.Close() return nil, err } - if req != nil { + if retry { resp.Body.Close() - return r.doRequestWithRetries(ctx, req, responses) + return r.doWithRetries(ctx, responses) } return resp, err } -func (r *dockerBase) retryRequest(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Request, error) { +func (r *request) retryRequest(ctx context.Context, responses []*http.Response) (bool, error) { if len(responses) > 5 { - return nil, nil + return false, nil } last := responses[len(responses)-1] - if last.StatusCode == http.StatusUnauthorized { + switch last.StatusCode { + case http.StatusUnauthorized: log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized") - if r.auth != nil { - if err := r.auth.AddResponses(ctx, responses); err == nil { - return copyRequest(req) + if r.host.Authorizer != nil { + if err := r.host.Authorizer.AddResponses(ctx, responses); err == nil { + return true, nil } else if !errdefs.IsNotImplemented(err) { - return nil, err + return false, err } } - return nil, nil - } else if last.StatusCode == http.StatusMethodNotAllowed && req.Method == http.MethodHead { + return false, nil + case http.StatusMethodNotAllowed: // Support registries which have not properly implemented the HEAD method for // manifests endpoint - if strings.Contains(req.URL.Path, "/manifests/") { - // TODO: copy request? - req.Method = http.MethodGet - return copyRequest(req) + if r.method == http.MethodHead && strings.Contains(r.path, "/manifests/") { + r.method = http.MethodGet + return true, nil } + case http.StatusRequestTimeout, http.StatusTooManyRequests: + return true, nil } // TODO: Handle 50x errors accounting for attempt history - return nil, nil + return false, nil } -func copyRequest(req *http.Request) (*http.Request, error) { - ireq := *req - if ireq.GetBody != nil { - var err error - ireq.Body, err = ireq.GetBody() - if err != nil { - return nil, err +func (r *request) String() string { + return r.host.Scheme + "://" + r.host.Host + r.path +} + +func requestFields(req *http.Request) logrus.Fields { + fields := map[string]interface{}{ + "request.method": req.Method, + } + for k, vals := range req.Header { + k = strings.ToLower(k) + if k == "authorization" { + continue + } + for i, v := range vals { + field := "request.header." + k + if i > 0 { + field = fmt.Sprintf("%s.%d", field, i) + } + fields[field] = v } } - return &ireq, nil + + return logrus.Fields(fields) +} + +func responseFields(resp *http.Response) logrus.Fields { + fields := map[string]interface{}{ + "response.status": resp.Status, + } + for k, vals := range resp.Header { + k = strings.ToLower(k) + for i, v := range vals { + field := "response.header." + k + if i > 0 { + field = fmt.Sprintf("%s.%d", field, i) + } + fields[field] = v + } + } + + return logrus.Fields(fields) } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go index 29b41cc14..8314c01d5 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go @@ -216,12 +216,12 @@ func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.De ref := remotes.MakeRefKey(ctx, desc) if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to write config") + return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image manifest") } ref = remotes.MakeRefKey(ctx, config) if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to write config") + return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image config") } return desc, nil diff --git a/vendor/github.com/containerd/containerd/remotes/docker/scope.go b/vendor/github.com/containerd/containerd/remotes/docker/scope.go index 52c244311..86bd81bf5 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/scope.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/scope.go @@ -18,6 +18,7 @@ package docker import ( "context" + "fmt" "net/url" "sort" "strings" @@ -53,24 +54,38 @@ func contextWithRepositoryScope(ctx context.Context, refspec reference.Spec, pus return context.WithValue(ctx, tokenScopesKey{}, []string{s}), nil } -// getTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and params["scope"]. -func getTokenScopes(ctx context.Context, params map[string]string) []string { +// contextWithAppendPullRepositoryScope is used to append repository pull +// scope into existing scopes indexed by the tokenScopesKey{}. +func contextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context { + var scopes []string + + if v := ctx.Value(tokenScopesKey{}); v != nil { + scopes = append(scopes, v.([]string)...) + } + scopes = append(scopes, fmt.Sprintf("repository:%s:pull", repo)) + return context.WithValue(ctx, tokenScopesKey{}, scopes) +} + +// getTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and common scopes. +func getTokenScopes(ctx context.Context, common []string) []string { var scopes []string if x := ctx.Value(tokenScopesKey{}); x != nil { scopes = append(scopes, x.([]string)...) } - if scope, ok := params["scope"]; ok { - for _, s := range scopes { - // Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/) - // So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal. - if s == scope { - // already appended - goto Sort - } - } - scopes = append(scopes, scope) - } -Sort: + + scopes = append(scopes, common...) sort.Strings(scopes) - return scopes + + l := 0 + for idx := 1; idx < len(scopes); idx++ { + // Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/) + // So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal. + if scopes[l] == scopes[idx] { + continue + } + + l++ + scopes[l] = scopes[idx] + } + return scopes[:l+1] } diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go index 0ee56c887..4dee1f61c 100644 --- a/vendor/github.com/containerd/containerd/remotes/handlers.go +++ b/vendor/github.com/containerd/containerd/remotes/handlers.go @@ -48,7 +48,8 @@ func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip, ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip, - ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip: + ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip, + images.MediaTypeDockerSchema2LayerEnc, images.MediaTypeDockerSchema2LayerGzipEnc: return "layer-" + desc.Digest.String() case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: return "config-" + desc.Digest.String() @@ -156,7 +157,7 @@ func push(ctx context.Context, provider content.Provider, pusher Pusher, desc oc // // Base handlers can be provided which will be called before any push specific // handlers. -func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, provider content.Provider, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { +func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Store, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { var m sync.Mutex manifestStack := []ocispec.Descriptor{} @@ -173,10 +174,14 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, pr } }) - pushHandler := PushHandler(pusher, provider) + pushHandler := PushHandler(pusher, store) + + platformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform) + + annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, store) var handler images.Handler = images.Handlers( - images.FilterPlatforms(images.ChildrenHandler(provider), platform), + annotateHandler, filterHandler, pushHandler, ) @@ -241,3 +246,45 @@ func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) return descs, nil } } + +// annotateDistributionSourceHandler add distribution source label into +// annotation of config or blob descriptor. +func annotateDistributionSourceHandler(f images.HandlerFunc, manager content.Manager) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // only add distribution source for the config or blob data descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + default: + return children, nil + } + + for i := range children { + child := children[i] + + info, err := manager.Info(ctx, child.Digest) + if err != nil { + return nil, err + } + + for k, v := range info.Labels { + if !strings.HasPrefix(k, "containerd.io/distribution.source.") { + continue + } + + if child.Annotations == nil { + child.Annotations = map[string]string{} + } + child.Annotations[k] = v + } + + children[i] = child + } + return children, nil + } +} diff --git a/vendor/github.com/containerd/containerd/rootfs/apply.go b/vendor/github.com/containerd/containerd/rootfs/apply.go index 3ea830f6b..73d4ccca5 100644 --- a/vendor/github.com/containerd/containerd/rootfs/apply.go +++ b/vendor/github.com/containerd/containerd/rootfs/apply.go @@ -48,6 +48,14 @@ type Layer struct { // Layers are applied in order they are given, making the first layer the // bottom-most layer in the layer chain. func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier) (digest.Digest, error) { + return ApplyLayersWithOpts(ctx, layers, sn, a, nil) +} + +// ApplyLayersWithOpts applies all the layers using the given snapshotter, applier, and apply opts. +// The returned result is a chain id digest representing all the applied layers. +// Layers are applied in order they are given, making the first layer the +// bottom-most layer in the layer chain. +func ApplyLayersWithOpts(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier, applyOpts []diff.ApplyOpt) (digest.Digest, error) { chain := make([]digest.Digest, len(layers)) for i, layer := range layers { chain[i] = layer.Diff.Digest @@ -63,7 +71,7 @@ func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, return "", errors.Wrapf(err, "failed to stat snapshot %s", chainID) } - if err := applyLayers(ctx, layers, chain, sn, a); err != nil && !errdefs.IsAlreadyExists(err) { + if err := applyLayers(ctx, layers, chain, sn, a, nil, applyOpts); err != nil && !errdefs.IsAlreadyExists(err) { return "", err } } @@ -75,6 +83,13 @@ func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, // using the provided snapshotter and applier. If the layer was unpacked true // is returned, if the layer already exists false is returned. func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) (bool, error) { + return ApplyLayerWithOpts(ctx, layer, chain, sn, a, opts, nil) +} + +// ApplyLayerWithOpts applies a single layer on top of the given provided layer chain, +// using the provided snapshotter, applier, and apply opts. If the layer was unpacked true +// is returned, if the layer already exists false is returned. +func ApplyLayerWithOpts(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts []snapshots.Opt, applyOpts []diff.ApplyOpt) (bool, error) { var ( chainID = identity.ChainID(append(chain, layer.Diff.Digest)).String() applied bool @@ -84,7 +99,7 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap return false, errors.Wrapf(err, "failed to stat snapshot %s", chainID) } - if err := applyLayers(ctx, []Layer{layer}, append(chain, layer.Diff.Digest), sn, a, opts...); err != nil { + if err := applyLayers(ctx, []Layer{layer}, append(chain, layer.Diff.Digest), sn, a, opts, applyOpts); err != nil { if !errdefs.IsAlreadyExists(err) { return false, err } @@ -93,9 +108,10 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap } } return applied, nil + } -func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) error { +func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts []snapshots.Opt, applyOpts []diff.ApplyOpt) error { var ( parent = identity.ChainID(chain[:len(chain)-1]) chainID = identity.ChainID(chain) @@ -113,7 +129,7 @@ func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn mounts, err = sn.Prepare(ctx, key, parent.String(), opts...) if err != nil { if errdefs.IsNotFound(err) && len(layers) > 1 { - if err := applyLayers(ctx, layers[:len(layers)-1], chain[:len(chain)-1], sn, a); err != nil { + if err := applyLayers(ctx, layers[:len(layers)-1], chain[:len(chain)-1], sn, a, nil, applyOpts); err != nil { if !errdefs.IsAlreadyExists(err) { return err } @@ -144,7 +160,7 @@ func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn } }() - diff, err = a.Apply(ctx, layer.Blob, mounts) + diff, err = a.Apply(ctx, layer.Blob, mounts, applyOpts...) if err != nil { err = errors.Wrapf(err, "failed to extract layer %s", layer.Diff.Digest) return err diff --git a/vendor/github.com/containerd/containerd/runtime/task.go b/vendor/github.com/containerd/containerd/runtime/task.go index 981e290c6..ab9017ba5 100644 --- a/vendor/github.com/containerd/containerd/runtime/task.go +++ b/vendor/github.com/containerd/containerd/runtime/task.go @@ -33,6 +33,7 @@ type TaskInfo struct { // Process is a runtime object for an executing process inside a container type Process interface { + // ID of the process ID() string // State returns the process state State(context.Context) (State, error) @@ -54,6 +55,8 @@ type Process interface { type Task interface { Process + // PID of the process + PID() uint32 // Namespace that the task exists in Namespace() string // Pause pauses the container process diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go b/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go index a85dcd918..0243c3986 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go +++ b/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go @@ -37,12 +37,12 @@ import ( "github.com/containerd/containerd/metadata" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/process" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime/linux/runctypes" - "github.com/containerd/containerd/runtime/v1" - "github.com/containerd/containerd/runtime/v1/linux/proc" + v1 "github.com/containerd/containerd/runtime/v1" shim "github.com/containerd/containerd/runtime/v1/shim/v1" runc "github.com/containerd/go-runc" "github.com/containerd/typeurl" @@ -335,7 +335,7 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { filepath.Join(r.root, ns, id), ) ctx = namespaces.WithNamespace(ctx, ns) - pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, proc.InitPidFile)) + pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, process.InitPidFile)) shimExit := make(chan struct{}) s, err := bundle.NewShimClient(ctx, ns, ShimConnect(r.config, func() { defer close(shimExit) @@ -422,7 +422,7 @@ func (r *Runtime) cleanupAfterDeadShim(ctx context.Context, bundle *bundle, ns, "namespace": ns, }).Warn("cleaning up after shim dead") - pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, proc.InitPidFile)) + pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, process.InitPidFile)) ctx = namespaces.WithNamespace(ctx, ns) if err := r.terminate(ctx, bundle, ns, id); err != nil { if r.config.ShimDebug { @@ -487,7 +487,7 @@ func (r *Runtime) getRuntime(ctx context.Context, ns, id string) (*runc.Runc, er var ( cmd = r.config.Runtime - root = proc.RuncRoot + root = process.RuncRoot ) if ropts != nil { if ropts.Runtime != "" { diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/task.go b/vendor/github.com/containerd/containerd/runtime/v1/linux/task.go index e13255e95..0970c3ea3 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/linux/task.go +++ b/vendor/github.com/containerd/containerd/runtime/v1/linux/task.go @@ -84,6 +84,11 @@ func (t *Task) Namespace() string { return t.namespace } +// PID of the task +func (t *Task) PID() uint32 { + return uint32(t.pid) +} + // Delete the task and return the exit status func (t *Task) Delete(ctx context.Context) (*runtime.Exit, error) { rsp, err := t.shim.Delete(ctx, empty) @@ -124,11 +129,15 @@ func (t *Task) Start(ctx context.Context) error { t.pid = int(r.Pid) if !hasCgroup { cg, err := cgroups.Load(cgroups.V1, cgroups.PidPath(t.pid)) - if err != nil { + if err != nil && err != cgroups.ErrCgroupDeleted { return err } t.mu.Lock() - t.cg = cg + if err == cgroups.ErrCgroupDeleted { + t.cg = nil + } else { + t.cg = cg + } t.mu.Unlock() } t.events.Publish(ctx, runtime.TaskStartEventTopic, &eventstypes.TaskStart{ diff --git a/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go b/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go index 9b4b77c37..7c68248c5 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go +++ b/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go @@ -127,8 +127,8 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa "address": address, }).Infof("shim placed in cgroup %s", cgroup) } - if err = sys.SetOOMScore(cmd.Process.Pid, sys.OOMScoreMaxKillable); err != nil { - return nil, nil, errors.Wrap(err, "failed to set OOM Score on shim") + if err = setupOOMScore(cmd.Process.Pid); err != nil { + return nil, nil, err } c, clo, err := WithConnect(address, func() {})(ctx, config) if err != nil { @@ -138,6 +138,21 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa } } +// setupOOMScore gets containerd's oom score and adds +1 to it +// to ensure a shim has a lower* score than the daemons +func setupOOMScore(shimPid int) error { + pid := os.Getpid() + score, err := sys.GetOOMScoreAdj(pid) + if err != nil { + return errors.Wrap(err, "get daemon OOM score") + } + shimScore := score + 1 + if err := sys.SetOOMScore(shimPid, shimScore); err != nil { + return errors.Wrap(err, "set shim OOM score") + } + return nil +} + func newCommand(binary, daemonAddress string, debug bool, config shim.Config, socket *os.File, stdout, stderr io.Writer) (*exec.Cmd, error) { selfExe, err := os.Executable() if err != nil { @@ -283,7 +298,7 @@ func (c *Client) KillShim(ctx context.Context) error { return c.signalShim(ctx, unix.SIGKILL) } -// Close the cient connection +// Close the client connection func (c *Client) Close() error { if c.c == nil { return nil diff --git a/vendor/github.com/containerd/containerd/runtime/v1/shim/service.go b/vendor/github.com/containerd/containerd/runtime/v1/shim/service.go index 6e87f052a..f55700135 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/shim/service.go +++ b/vendor/github.com/containerd/containerd/runtime/v1/shim/service.go @@ -35,10 +35,10 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/process" + "github.com/containerd/containerd/pkg/stdio" "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime/linux/runctypes" - rproc "github.com/containerd/containerd/runtime/proc" - "github.com/containerd/containerd/runtime/v1/linux/proc" shimapi "github.com/containerd/containerd/runtime/v1/shim/v1" runc "github.com/containerd/go-runc" "github.com/containerd/typeurl" @@ -84,7 +84,7 @@ func NewService(config Config, publisher events.Publisher) (*Service, error) { s := &Service{ config: config, context: ctx, - processes: make(map[string]rproc.Process), + processes: make(map[string]process.Process), events: make(chan interface{}, 128), ec: Default.Subscribe(), } @@ -102,9 +102,9 @@ type Service struct { config Config context context.Context - processes map[string]rproc.Process + processes map[string]process.Process events chan interface{} - platform rproc.Platform + platform stdio.Platform ec chan runc.Exit // Filled by Create() @@ -114,9 +114,9 @@ type Service struct { // Create a new initial process and container with the underlying OCI runtime func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ *shimapi.CreateTaskResponse, err error) { - var mounts []proc.Mount + var mounts []process.Mount for _, m := range r.Rootfs { - mounts = append(mounts, proc.Mount{ + mounts = append(mounts, process.Mount{ Type: m.Type, Source: m.Source, Target: m.Target, @@ -132,7 +132,7 @@ func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ * } } - config := &proc.CreateConfig{ + config := &process.CreateConfig{ ID: r.ID, Bundle: r.Bundle, Runtime: r.Runtime, @@ -266,7 +266,7 @@ func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*pty return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") } - process, err := p.(*proc.Init).Exec(ctx, s.config.Path, &proc.ExecConfig{ + process, err := p.(*process.Init).Exec(ctx, s.config.Path, &process.ExecConfig{ ID: r.ID, Terminal: r.Terminal, Stdin: r.Stdin, @@ -348,7 +348,7 @@ func (s *Service) Pause(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, er if err != nil { return nil, err } - if err := p.(*proc.Init).Pause(ctx); err != nil { + if err := p.(*process.Init).Pause(ctx); err != nil { return nil, err } return empty, nil @@ -360,7 +360,7 @@ func (s *Service) Resume(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, e if err != nil { return nil, err } - if err := p.(*proc.Init).Resume(ctx); err != nil { + if err := p.(*process.Init).Resume(ctx); err != nil { return nil, err } return empty, nil @@ -448,7 +448,7 @@ func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskReque } options = *v.(*runctypes.CheckpointOptions) } - if err := p.(*proc.Init).Checkpoint(ctx, &proc.CheckpointConfig{ + if err := p.(*process.Init).Checkpoint(ctx, &process.CheckpointConfig{ Path: r.Path, Exit: options.Exit, AllowOpenTCP: options.OpenTcp, @@ -476,7 +476,7 @@ func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*pt if err != nil { return nil, err } - if err := p.(*proc.Init).Update(ctx, r.Resources); err != nil { + if err := p.(*process.Init).Update(ctx, r.Resources); err != nil { return nil, errdefs.ToGRPC(err) } return empty, nil @@ -502,11 +502,11 @@ func (s *Service) processExits() { } } -func (s *Service) allProcesses() []rproc.Process { +func (s *Service) allProcesses() []process.Process { s.mu.Lock() defer s.mu.Unlock() - res := make([]rproc.Process, 0, len(s.processes)) + res := make([]process.Process, 0, len(s.processes)) for _, p := range s.processes { res = append(res, p) } @@ -523,7 +523,7 @@ func (s *Service) checkProcesses(e runc.Exit) { if p.Pid() == e.Pid { if shouldKillAll { - if ip, ok := p.(*proc.Init); ok { + if ip, ok := p.(*process.Init); ok { // Ensure all children are killed if err := ip.KillAll(s.context); err != nil { log.G(s.context).WithError(err).WithField("id", ip.ID()). @@ -569,7 +569,7 @@ func (s *Service) getContainerPids(ctx context.Context, id string) ([]uint32, er return nil, err } - ps, err := p.(*proc.Init).Runtime().Ps(ctx, id) + ps, err := p.(*process.Init).Runtime().Ps(ctx, id) if err != nil { return nil, err } @@ -589,7 +589,7 @@ func (s *Service) forward(publisher events.Publisher) { } // getInitProcess returns initial process -func (s *Service) getInitProcess() (rproc.Process, error) { +func (s *Service) getInitProcess() (process.Process, error) { s.mu.Lock() defer s.mu.Unlock() @@ -601,7 +601,7 @@ func (s *Service) getInitProcess() (rproc.Process, error) { } // getExecProcess returns exec process -func (s *Service) getExecProcess(id string) (rproc.Process, error) { +func (s *Service) getExecProcess(id string) (process.Process, error) { s.mu.Lock() defer s.mu.Unlock() @@ -640,7 +640,7 @@ func getTopic(ctx context.Context, e interface{}) string { return runtime.TaskUnknownTopic } -func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace, criu string, systemdCgroup bool, platform rproc.Platform, r *proc.CreateConfig, rootfs string) (*proc.Init, error) { +func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace, criu string, systemdCgroup bool, platform stdio.Platform, r *process.CreateConfig, rootfs string) (*process.Init, error) { var options runctypes.CreateOptions if r.Options != nil { v, err := typeurl.UnmarshalAny(r.Options) @@ -650,8 +650,8 @@ func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace, criu st options = *v.(*runctypes.CreateOptions) } - runtime := proc.NewRunc(runtimeRoot, path, namespace, r.Runtime, criu, systemdCgroup) - p := proc.New(r.ID, runtime, rproc.Stdio{ + runtime := process.NewRunc(runtimeRoot, path, namespace, r.Runtime, criu, systemdCgroup) + p := process.New(r.ID, runtime, stdio.Stdio{ Stdin: r.Stdin, Stdout: r.Stdout, Stderr: r.Stderr, diff --git a/vendor/github.com/containerd/containerd/runtime/v2/binary.go b/vendor/github.com/containerd/containerd/runtime/v2/binary.go index 2087509ba..22869a25c 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/binary.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/binary.go @@ -87,7 +87,7 @@ func (b *binary) Start(ctx context.Context, opts *types.Any, onClose func()) (_ defer f.Close() if _, err := io.Copy(os.Stderr, f); err != nil { // When using a multi-container shim the 2nd to Nth container in the - // shim will not have a seperate log pipe. Ignore the failure log + // shim will not have a separate log pipe. Ignore the failure log // message here when the shim connect times out. if !os.IsNotExist(errors.Cause(err)) { log.G(ctx).WithError(err).Error("copy shim log") diff --git a/vendor/github.com/containerd/containerd/runtime/v2/manager.go b/vendor/github.com/containerd/containerd/runtime/v2/manager.go index 0ef854e89..481a44033 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/manager.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/manager.go @@ -140,6 +140,11 @@ func (m *TaskManager) Create(ctx context.Context, id string, opts runtime.Create return } cleanupAfterDeadShim(context.Background(), id, ns, m.events, b) + // Remove self from the runtime task list. Even though the cleanupAfterDeadShim() + // would publish taskExit event, but the shim.Delete() would always failed with ttrpc + // disconnect and there is no chance to remove this dead task from runtime task lists. + // Thus it's better to delete it here. + m.tasks.Delete(ctx, id) }) if err != nil { return nil, err @@ -258,6 +263,8 @@ func (m *TaskManager) loadTasks(ctx context.Context) error { return } cleanupAfterDeadShim(context.Background(), id, ns, m.events, binaryCall) + // Remove self from the runtime task list. + m.tasks.Delete(ctx, id) }) if err != nil { cleanupAfterDeadShim(ctx, id, ns, m.events, binaryCall) diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim.go b/vendor/github.com/containerd/containerd/runtime/v2/shim.go index c652d3af5..7014de9ff 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim.go @@ -79,7 +79,7 @@ func loadShim(ctx context.Context, bundle *Bundle, events *exchange.Exchange, rt defer f.Close() if _, err := io.Copy(os.Stderr, f); err != nil { // When using a multi-container shim the 2nd to Nth container in the - // shim will not have a seperate log pipe. Ignore the failure log + // shim will not have a separate log pipe. Ignore the failure log // message here when the shim connect times out. if !os.IsNotExist(errors.Cause(err)) { log.G(ctx).WithError(err).Error("copy shim log") @@ -100,6 +100,8 @@ func loadShim(ctx context.Context, bundle *Bundle, events *exchange.Exchange, rt events: events, rtTasks: rt, } + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() if err := s.Connect(ctx); err != nil { return nil, err } @@ -193,6 +195,11 @@ func (s *shim) ID() string { return s.bundle.ID } +// PID of the task +func (s *shim) PID() uint32 { + return uint32(s.taskPid) +} + func (s *shim) Namespace() string { return s.bundle.Namespace } @@ -214,6 +221,7 @@ func (s *shim) Delete(ctx context.Context) (*runtime.Exit, error) { if err := s.waitShutdown(ctx); err != nil { log.G(ctx).WithError(err).Error("failed to shutdown shim") } + s.Close() if err := s.bundle.Delete(); err != nil { log.G(ctx).WithError(err).Error("failed to delete bundle") } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go index 505d734f1..7339eb2a2 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go @@ -19,269 +19,39 @@ package shim import ( - "bytes" "context" - "fmt" "io" "net" "os" - "sync" - "unsafe" - winio "github.com/Microsoft/go-winio" - "github.com/containerd/containerd/namespaces" "github.com/containerd/ttrpc" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" ) -// setupSignals creates a new signal handler for all signals func setupSignals(config Config) (chan os.Signal, error) { - signals := make(chan os.Signal, 32) - return signals, nil + return nil, errors.New("not supported") } func newServer() (*ttrpc.Server, error) { - return ttrpc.NewServer() + return nil, errors.New("not supported") } func subreaper() error { - return nil -} - -type fakeSignal struct { -} - -func (fs *fakeSignal) String() string { - return "" -} - -func (fs *fakeSignal) Signal() { + return errors.New("not supported") } func setupDumpStacks(dump chan<- os.Signal) { - // Windows does not support signals like *nix systems. So instead of - // trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be - // signaled. ACL'd to builtin administrators and local system - event := "Global\\containerd-shim-runhcs-v1-" + fmt.Sprint(os.Getpid()) - ev, _ := windows.UTF16PtrFromString(event) - sd, err := winio.SddlToSecurityDescriptor("D:P(A;;GA;;;BA)(A;;GA;;;SY)") - if err != nil { - logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", event, err.Error()) - return - } - var sa windows.SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) - h, err := windows.CreateEvent(&sa, 0, 0, ev) - if h == 0 || err != nil { - logrus.Errorf("failed to create debug stackdump event %s: %s", event, err.Error()) - return - } - go func() { - logrus.Debugf("Stackdump - waiting signal at %s", event) - for { - windows.WaitForSingleObject(h, windows.INFINITE) - dump <- new(fakeSignal) - } - }() } -// serve serves the ttrpc API over a unix socket at the provided path -// this function does not block func serveListener(path string) (net.Listener, error) { - if path == "" { - return nil, errors.New("'socket' must be npipe path") - } - l, err := winio.ListenPipe(path, nil) - if err != nil { - return nil, err - } - logrus.WithField("socket", path).Debug("serving api on npipe socket") - return l, nil + return nil, errors.New("not supported") } func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { - logger.Info("starting signal loop") - - for { - select { - case <-ctx.Done(): - return ctx.Err() - case s := <-signals: - switch s { - case os.Interrupt: - return nil - } - } - } + return errors.New("not supported") } -var _ = (io.WriterTo)(&blockingBuffer{}) -var _ = (io.Writer)(&blockingBuffer{}) - -// blockingBuffer implements the `io.Writer` and `io.WriterTo` interfaces. Once -// `capacity` is reached the calls to `Write` will block until a successful call -// to `WriterTo` frees up the buffer space. -// -// Note: This has the same threadding semantics as bytes.Buffer with no -// additional locking so multithreading is not supported. -type blockingBuffer struct { - c *sync.Cond - - capacity int - - buffer bytes.Buffer -} - -func newBlockingBuffer(capacity int) *blockingBuffer { - return &blockingBuffer{ - c: sync.NewCond(&sync.Mutex{}), - capacity: capacity, - } -} - -func (bb *blockingBuffer) Len() int { - bb.c.L.Lock() - defer bb.c.L.Unlock() - return bb.buffer.Len() -} - -func (bb *blockingBuffer) Write(p []byte) (int, error) { - if len(p) > bb.capacity { - return 0, errors.Errorf("len(p) (%d) too large for capacity (%d)", len(p), bb.capacity) - } - - bb.c.L.Lock() - for bb.buffer.Len()+len(p) > bb.capacity { - bb.c.Wait() - } - defer bb.c.L.Unlock() - return bb.buffer.Write(p) -} - -func (bb *blockingBuffer) WriteTo(w io.Writer) (int64, error) { - bb.c.L.Lock() - defer bb.c.L.Unlock() - defer bb.c.Signal() - return bb.buffer.WriteTo(w) -} - -// deferredShimWriteLogger exists to solve the upstream loggin issue presented -// by using Windows Named Pipes for logging. When containerd restarts it tries -// to reconnect to any shims. This means that the connection to the logger will -// be severed but when containerd starts up it should reconnect and start -// logging again. We abstract all of this logic behind what looks like a simple -// `io.Writer` that can reconnect in the lifetime and buffers logs while -// disconnected. -type deferredShimWriteLogger struct { - mu sync.Mutex - - ctx context.Context - - connected bool - aborted bool - - buffer *blockingBuffer - - l net.Listener - c net.Conn - conerr error -} - -// beginAccept issues an accept to wait for a connection. Once a connection -// occurs drains any outstanding buffer. While draining the buffer any writes -// are blocked. If the buffer fails to fully drain due to a connection drop a -// call to `beginAccept` is re-issued waiting for another connection from -// containerd. -func (dswl *deferredShimWriteLogger) beginAccept() { - dswl.mu.Lock() - if dswl.connected { - return - } - dswl.mu.Unlock() - - c, err := dswl.l.Accept() - if err == winio.ErrPipeListenerClosed { - dswl.mu.Lock() - dswl.aborted = true - dswl.l.Close() - dswl.conerr = errors.New("connection closed") - dswl.mu.Unlock() - return - } - dswl.mu.Lock() - dswl.connected = true - dswl.c = c - - // Drain the buffer - if dswl.buffer.Len() > 0 { - _, err := dswl.buffer.WriteTo(dswl.c) - if err != nil { - // We lost our connection draining the buffer. - dswl.connected = false - dswl.c.Close() - go dswl.beginAccept() - } - } - dswl.mu.Unlock() -} - -func (dswl *deferredShimWriteLogger) Write(p []byte) (int, error) { - dswl.mu.Lock() - defer dswl.mu.Unlock() - - if dswl.aborted { - return 0, dswl.conerr - } - - if dswl.connected { - // We have a connection. beginAccept would have drained the buffer so we just write our data to - // the connection directly. - written, err := dswl.c.Write(p) - if err != nil { - // We lost the connection. - dswl.connected = false - dswl.c.Close() - go dswl.beginAccept() - - // We weren't able to write the full `p` bytes. Buffer the rest - if written != len(p) { - w, err := dswl.buffer.Write(p[written:]) - if err != nil { - // We failed to buffer. Return this error - return written + w, err - } - written += w - } - } - - return written, nil - } - - // We are disconnected. Buffer the contents. - return dswl.buffer.Write(p) -} - -// openLog on Windows acts as the server of the log pipe. This allows the -// containerd daemon to independently restart and reconnect to the logs. -func openLog(ctx context.Context, id string) (io.Writer, error) { - ns, err := namespaces.NamespaceRequired(ctx) - if err != nil { - return nil, err - } - - dswl := &deferredShimWriteLogger{ - ctx: ctx, - buffer: newBlockingBuffer(64 * 1024), // 64KB, - } - l, err := winio.ListenPipe(fmt.Sprintf("\\\\.\\pipe\\containerd-shim-%s-%s-log", ns, id), nil) - if err != nil { - return nil, err - } - dswl.l = l - go dswl.beginAccept() - return dswl, nil +func openLog(ctx context.Context, _ string) (io.Writer, error) { + return nil, errors.New("not supported") } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go index 6b9ae4eea..700c5b36b 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go @@ -23,6 +23,7 @@ import ( "crypto/sha256" "fmt" "net" + "os" "path/filepath" "strings" "syscall" @@ -46,6 +47,21 @@ func SetScore(pid int) error { return sys.SetOOMScore(pid, sys.OOMScoreMaxKillable) } +// AdjustOOMScore sets the OOM score for the process to the parents OOM score +1 +// to ensure that they parent has a lower* score than the shim +func AdjustOOMScore(pid int) error { + parent := os.Getppid() + score, err := sys.GetOOMScoreAdj(parent) + if err != nil { + return errors.Wrap(err, "get parent OOM score") + } + shimScore := score + 1 + if err := sys.SetOOMScore(pid, shimScore); err != nil { + return errors.Wrap(err, "set shim OOM score") + } + return nil +} + // SocketAddress returns an abstract socket address func SocketAddress(ctx context.Context, id string) (string, error) { ns, err := namespaces.NamespaceRequired(ctx) diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go index ef26ebe22..1950ea4a8 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go @@ -18,14 +18,12 @@ package shim import ( "context" - "fmt" "net" "os" "syscall" "time" winio "github.com/Microsoft/go-winio" - "github.com/containerd/containerd/namespaces" "github.com/pkg/errors" ) @@ -35,58 +33,33 @@ func getSysProcAttr() *syscall.SysProcAttr { return nil } -// SetScore sets the oom score for a process -func SetScore(pid int) error { - return nil -} - -// SocketAddress returns a npipe address -func SocketAddress(ctx context.Context, id string) (string, error) { - ns, err := namespaces.NamespaceRequired(ctx) - if err != nil { - return "", err - } - return fmt.Sprintf("\\\\.\\pipe\\containerd-shim-%s-%s-pipe", ns, id), nil -} - // AnonDialer returns a dialer for a npipe func AnonDialer(address string, timeout time.Duration) (net.Conn, error) { - var c net.Conn - var lastError error - timedOutError := errors.Errorf("timed out waiting for npipe %s", address) - start := time.Now() - for { - remaining := timeout - time.Since(start) - if remaining <= 0 { - lastError = timedOutError - break - } - c, lastError = winio.DialPipe(address, &remaining) - if lastError == nil { - break - } - if !os.IsNotExist(lastError) { - break - } - // There is nobody serving the pipe. We limit the timeout for this case - // to 5 seconds because any shim that would serve this endpoint should - // serve it within 5 seconds. We use the passed in timeout for the - // `DialPipe` timeout if the pipe exists however to give the pipe time - // to `Accept` the connection. - if time.Since(start) >= 5*time.Second { - lastError = timedOutError - break - } - time.Sleep(10 * time.Millisecond) - } - return c, lastError -} + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() -// NewSocket returns a new npipe listener -func NewSocket(address string) (net.Listener, error) { - l, err := winio.ListenPipe(address, nil) - if err != nil { - return nil, errors.Wrapf(err, "failed to listen to npipe %s", address) + // If there is nobody serving the pipe we limit the timeout for this case to + // 5 seconds because any shim that would serve this endpoint should serve it + // within 5 seconds. + serveTimer := time.NewTimer(5 * time.Second) + defer serveTimer.Stop() + for { + c, err := winio.DialPipeContext(ctx, address) + if err != nil { + if os.IsNotExist(err) { + select { + case <-serveTimer.C: + return nil, errors.Wrap(os.ErrNotExist, "pipe not found before timeout") + default: + // Wait 10ms for the shim to serve and try again. + time.Sleep(10 * time.Millisecond) + continue + } + } else if err == context.DeadlineExceeded { + return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address) + } + return nil, err + } + return c, nil } - return l, nil } diff --git a/vendor/github.com/containerd/containerd/services/diff/local.go b/vendor/github.com/containerd/containerd/services/diff/local.go index f60e1c658..f05b222db 100644 --- a/vendor/github.com/containerd/containerd/services/diff/local.go +++ b/vendor/github.com/containerd/containerd/services/diff/local.go @@ -100,6 +100,9 @@ func (l *local) Apply(ctx context.Context, er *diffapi.ApplyRequest, _ ...grpc.C ) var opts []diff.ApplyOpt + if er.Payloads != nil { + opts = append(opts, diff.WithPayloads(er.Payloads)) + } for _, differ := range l.differs { ocidesc, err = differ.Apply(ctx, desc, mounts, opts...) diff --git a/vendor/github.com/containerd/containerd/services/introspection/service.go b/vendor/github.com/containerd/containerd/services/introspection/service.go index bc5913bd7..404ffe481 100644 --- a/vendor/github.com/containerd/containerd/services/introspection/service.go +++ b/vendor/github.com/containerd/containerd/services/introspection/service.go @@ -18,6 +18,10 @@ package introspection import ( context "context" + "io/ioutil" + "os" + "path/filepath" + "sync" api "github.com/containerd/containerd/api/services/introspection/v1" "github.com/containerd/containerd/api/types" @@ -26,6 +30,7 @@ import ( "github.com/containerd/containerd/plugin" "github.com/gogo/googleapis/google/rpc" ptypes "github.com/gogo/protobuf/types" + "github.com/google/uuid" "google.golang.org/grpc" "google.golang.org/grpc/status" ) @@ -40,19 +45,22 @@ func init() { // this service is initialized. Since we require this service last, // it should provide the full set of plugins. pluginsPB := pluginsToPB(ic.GetAll()) - return NewService(pluginsPB), nil + return NewService(pluginsPB, ic.Root), nil }, }) } type service struct { + mu sync.Mutex plugins []api.Plugin + root string } // NewService returns the GRPC introspection server -func NewService(plugins []api.Plugin) api.IntrospectionServer { +func NewService(plugins []api.Plugin, root string) api.IntrospectionServer { return &service{ plugins: plugins, + root: root, } } @@ -81,6 +89,54 @@ func (s *service) Plugins(ctx context.Context, req *api.PluginsRequest) (*api.Pl }, nil } +func (s *service) Server(ctx context.Context, _ *ptypes.Empty) (*api.ServerResponse, error) { + u, err := s.getUUID() + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &api.ServerResponse{ + UUID: u, + }, nil +} + +func (s *service) getUUID() (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + + data, err := ioutil.ReadFile(s.uuidPath()) + if err != nil { + if os.IsNotExist(err) { + return s.generateUUID() + } + return "", err + } + u := string(data) + if _, err := uuid.Parse(u); err != nil { + return "", err + } + return u, nil +} + +func (s *service) generateUUID() (string, error) { + u, err := uuid.NewRandom() + if err != nil { + return "", err + } + path := s.uuidPath() + if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { + return "", err + } + uu := u.String() + if err := ioutil.WriteFile(path, []byte(uu), 0666); err != nil { + return "", err + } + return uu, nil +} + +func (s *service) uuidPath() string { + return filepath.Join(s.root, "uuid") +} + func adaptPlugin(o interface{}) filters.Adaptor { obj := o.(api.Plugin) return filters.AdapterFunc(func(fieldpath []string) (string, bool) { diff --git a/vendor/github.com/containerd/containerd/services/server/config/config.go b/vendor/github.com/containerd/containerd/services/server/config/config.go index 13ccd5b6f..365dfa0fd 100644 --- a/vendor/github.com/containerd/containerd/services/server/config/config.go +++ b/vendor/github.com/containerd/containerd/services/server/config/config.go @@ -56,9 +56,25 @@ type Config struct { // ProxyPlugins configures plugins which are communicated to over GRPC ProxyPlugins map[string]ProxyPlugin `toml:"proxy_plugins"` + StreamProcessors []StreamProcessor `toml:"stream_processors"` + md toml.MetaData } +// StreamProcessor provides configuration for diff content processors +type StreamProcessor struct { + // ID of the processor, also used to fetch the specific payload + ID string `toml:"id"` + // Accepts specific media-types + Accepts []string `toml:"accepts"` + // Returns the media-type + Returns string `toml:"returns"` + // Path or name of the binary + Path string `toml:"path"` + // Args to the binary + Args []string `toml:"args"` +} + // GetVersion returns the config file's version func (c *Config) GetVersion() int { if c.Version == 0 { diff --git a/vendor/github.com/containerd/containerd/services/server/server.go b/vendor/github.com/containerd/containerd/services/server/server.go index 3c85b4b7c..0e6923918 100644 --- a/vendor/github.com/containerd/containerd/services/server/server.go +++ b/vendor/github.com/containerd/containerd/services/server/server.go @@ -35,6 +35,7 @@ import ( "github.com/containerd/containerd/content/local" csproxy "github.com/containerd/containerd/content/proxy" "github.com/containerd/containerd/defaults" + "github.com/containerd/containerd/diff" "github.com/containerd/containerd/events/exchange" "github.com/containerd/containerd/log" "github.com/containerd/containerd/metadata" @@ -80,6 +81,10 @@ func New(ctx context.Context, config *srvconfig.Config) (*Server, error) { if err != nil { return nil, err } + for _, p := range config.StreamProcessors { + diff.RegisterProcessor(diff.BinaryHandler(p.ID, p.Returns, p.Accepts, p.Path, p.Args)) + } + serverOpts := []grpc.ServerOption{ grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), diff --git a/vendor/github.com/containerd/containerd/services/tasks/local.go b/vendor/github.com/containerd/containerd/services/tasks/local.go index 78e384f87..2f17b158c 100644 --- a/vendor/github.com/containerd/containerd/services/tasks/local.go +++ b/vendor/github.com/containerd/containerd/services/tasks/local.go @@ -43,7 +43,7 @@ import ( "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime/linux/runctypes" - "github.com/containerd/containerd/runtime/v2" + v2 "github.com/containerd/containerd/runtime/v2" "github.com/containerd/containerd/runtime/v2/runc/options" "github.com/containerd/containerd/services" "github.com/containerd/typeurl" @@ -182,24 +182,23 @@ func (l *local) Create(ctx context.Context, r *api.CreateTaskRequest, _ ...grpc. if err != nil { return nil, err } - if _, err := rtime.Get(ctx, r.ContainerID); err != runtime.ErrTaskNotExists { + _, err = rtime.Get(ctx, r.ContainerID) + if err != nil && err != runtime.ErrTaskNotExists { + return nil, errdefs.ToGRPC(err) + } + if err == nil { return nil, errdefs.ToGRPC(fmt.Errorf("task %s already exists", r.ContainerID)) } c, err := rtime.Create(ctx, r.ContainerID, opts) if err != nil { return nil, errdefs.ToGRPC(err) } - // TODO: fast path for getting pid on create if err := l.monitor.Monitor(c); err != nil { return nil, errors.Wrap(err, "monitor task") } - state, err := c.State(ctx) - if err != nil { - log.G(ctx).Error(err) - } return &api.CreateTaskResponse{ ContainerID: r.ContainerID, - Pid: state.Pid, + Pid: c.PID(), }, nil } @@ -266,12 +265,18 @@ func (l *local) DeleteProcess(ctx context.Context, r *api.DeleteProcessRequest, }, nil } -func processFromContainerd(ctx context.Context, p runtime.Process) (*task.Process, error) { +func getProcessState(ctx context.Context, p runtime.Process) (*task.Process, error) { + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + state, err := p.State(ctx) if err != nil { - return nil, err + if errdefs.IsNotFound(err) { + return nil, err + } + log.G(ctx).WithError(err).Errorf("get state for %s", p.ID()) } - var status task.Status + status := task.StatusUnknown switch state.Status { case runtime.CreatedStatus: status = task.StatusCreated @@ -310,7 +315,7 @@ func (l *local) Get(ctx context.Context, r *api.GetRequest, _ ...grpc.CallOption return nil, errdefs.ToGRPC(err) } } - t, err := processFromContainerd(ctx, p) + t, err := getProcessState(ctx, p) if err != nil { return nil, errdefs.ToGRPC(err) } @@ -333,7 +338,7 @@ func (l *local) List(ctx context.Context, r *api.ListTasksRequest, _ ...grpc.Cal func addTasks(ctx context.Context, r *api.ListTasksResponse, tasks []runtime.Task) { for _, t := range tasks { - tt, err := processFromContainerd(ctx, t) + tt, err := getProcessState(ctx, t) if err != nil { if !errdefs.IsNotFound(err) { // handle race with deletion log.G(ctx).WithError(err).WithField("id", t.ID()).Error("converting task to protobuf") diff --git a/vendor/github.com/containerd/containerd/services/tasks/local_unix.go b/vendor/github.com/containerd/containerd/services/tasks/local_unix.go index 9469d8dce..3818e9282 100644 --- a/vendor/github.com/containerd/containerd/services/tasks/local_unix.go +++ b/vendor/github.com/containerd/containerd/services/tasks/local_unix.go @@ -1,3 +1,5 @@ +// +build !windows + /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/services/tasks/local_windows.go b/vendor/github.com/containerd/containerd/services/tasks/local_windows.go new file mode 100644 index 000000000..d12f9e470 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/tasks/local_windows.go @@ -0,0 +1,35 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tasks + +import ( + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/runtime" +) + +var tasksServiceRequires = []plugin.Type{ + plugin.RuntimePluginV2, + plugin.MetadataPlugin, + plugin.TaskMonitorPlugin, +} + +// loadV1Runtimes on Windows V2 returns an empty map. There are no v1 runtimes +func loadV1Runtimes(ic *plugin.InitContext) (map[string]runtime.PlatformRuntime, error) { + return make(map[string]runtime.PlatformRuntime), nil +} diff --git a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go index b4af6a308..514538f7e 100644 --- a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go +++ b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go @@ -86,10 +86,15 @@ func (k *Kind) UnmarshalJSON(b []byte) error { // Info provides information about a particular snapshot. // JSON marshallability is supported for interactive with tools like ctr, type Info struct { - Kind Kind // active or committed snapshot - Name string // name or key of snapshot - Parent string `json:",omitempty"` // name of parent snapshot - Labels map[string]string `json:",omitempty"` // Labels for snapshot + Kind Kind // active or committed snapshot + Name string // name or key of snapshot + Parent string `json:",omitempty"` // name of parent snapshot + + // Labels for a snapshot. + // + // Note: only labels prefixed with `containerd.io/snapshot/` will be inherited by the + // snapshotter's `Prepare`, `View`, or `Commit` calls. + Labels map[string]string `json:",omitempty"` Created time.Time `json:",omitempty"` // Created time Updated time.Time `json:",omitempty"` // Last update time } diff --git a/vendor/github.com/containerd/containerd/vendor.conf b/vendor/github.com/containerd/containerd/vendor.conf index 8bf450510..edb2f4ea3 100644 --- a/vendor/github.com/containerd/containerd/vendor.conf +++ b/vendor/github.com/containerd/containerd/vendor.conf @@ -1,6 +1,6 @@ -github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3 +github.com/containerd/go-runc 9007c2405372fe28918845901a3276c0915689a1 github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f -github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1 +github.com/containerd/cgroups c4b9ac5c7601384c965b9646fc515884e091ebb9 github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40 github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c github.com/containerd/btrfs af5082808c833de0e79c1e72eea9fea239364877 @@ -20,7 +20,7 @@ github.com/gogo/protobuf v1.2.1 github.com/gogo/googleapis v1.2.0 github.com/golang/protobuf v1.2.0 github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db -github.com/opencontainers/runc v1.0.0-rc8 +github.com/opencontainers/runc f4982d86f7fde0b6f953cc62ccc4022c519a10a9 # v1.0.0-rc8-32-gf4982d86 github.com/konsorten/go-windows-terminal-sequences v1.0.1 github.com/sirupsen/logrus v1.4.1 github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c @@ -33,19 +33,19 @@ github.com/opencontainers/image-spec v1.0.1 golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e github.com/BurntSushi/toml v0.3.1 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 -github.com/Microsoft/go-winio 84b4ab48a50763fe7b3abcef38e5205c12027fac +github.com/Microsoft/go-winio v0.4.14 github.com/Microsoft/hcsshim 8abdbb8205e4192c68b5f84c31197156f31be517 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 -github.com/containerd/ttrpc a5bd8ce9e40bc7c065a11c6936f4d032ce6bfa2b +github.com/containerd/ttrpc 1fb3814edf44a76e0ccf503decf726d994919a9a github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2 gotest.tools v2.3.0 github.com/google/go-cmp v0.2.0 go.etcd.io/bbolt 2eb7227adea1d5cf85f0bc2a82b7059b13c2fa68 # cri dependencies -github.com/containerd/cri 2fc62db8146ce66f27b37306ad5fda34207835f3 # master -github.com/containerd/go-cni 891c2a41e18144b2d7921f971d6c9789a68046b2 +github.com/containerd/cri b213648c5bd0a1d2ee42709c10dff63fbfee3ad7 # master +github.com/containerd/go-cni 22460c018b64cf8bf4151b3ff9c4d077e6a88cbf github.com/containernetworking/cni v0.6.0 github.com/containernetworking/plugins v0.7.0 github.com/davecgh/go-spew v1.1.0 @@ -60,19 +60,20 @@ github.com/json-iterator/go 1.1.5 github.com/modern-go/reflect2 1.0.1 github.com/modern-go/concurrent 1.0.3 github.com/opencontainers/selinux v1.2.2 -github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 +github.com/seccomp/libseccomp-golang v0.9.1 github.com/tchap/go-patricia v2.2.6 golang.org/x/crypto 88737f569e3a9c7ab309cdc09a07fe7fc87233c3 golang.org/x/oauth2 a6bd8cefa1811bd24b86f8902872e4e8225f74c4 golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631 gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 gopkg.in/yaml.v2 v2.2.1 -k8s.io/api kubernetes-1.15.0-alpha.0 -k8s.io/apimachinery kubernetes-1.15.0-alpha.0 -k8s.io/apiserver kubernetes-1.15.0-alpha.0 -k8s.io/client-go kubernetes-1.15.0-alpha.0 -k8s.io/klog 8139d8cb77af419532b33dfa7dd09fbc5f1d344f -k8s.io/kubernetes v1.15.0-alpha.0 +k8s.io/api kubernetes-1.15.0 +k8s.io/apimachinery kubernetes-1.15.0 +k8s.io/apiserver kubernetes-1.15.0 +k8s.io/cri-api kubernetes-1.15.0 +k8s.io/client-go kubernetes-1.15.0 +k8s.io/klog v0.3.1 +k8s.io/kubernetes v1.15.0 k8s.io/utils c2654d5206da6b7b6ace12841e8f359bb89b443c sigs.k8s.io/yaml v1.1.0 @@ -83,3 +84,8 @@ github.com/google/uuid v1.1.1 # aufs dependencies github.com/containerd/aufs f894a800659b6e11c1a13084abd1712f346e349c + +# image encryption dependencies +gopkg.in/square/go-jose.v2 8254d6c783765f38c8675fae4427a1fe73fbd09d https://github.com/square/go-jose.git +github.com/fullsailor/pkcs7 8306686428a5fe132eac8cb7c4848af725098bd4 +github.com/miscreant/miscreant-go 325cbd69228b6af571a635f7502586a920a2749a https://github.com/miscreant/miscreant.go diff --git a/vendor/github.com/containerd/go-runc/README.md b/vendor/github.com/containerd/go-runc/README.md index 239601f1e..c899bdd7e 100644 --- a/vendor/github.com/containerd/go-runc/README.md +++ b/vendor/github.com/containerd/go-runc/README.md @@ -1,7 +1,7 @@ # go-runc [![Build Status](https://travis-ci.org/containerd/go-runc.svg?branch=master)](https://travis-ci.org/containerd/go-runc) - +[![codecov](https://codecov.io/gh/containerd/go-runc/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/go-runc) This is a package for consuming the [runc](https://github.com/opencontainers/runc) binary in your Go applications. It tries to expose all the settings and features of the runc CLI. If there is something missing then add it, its opensource! @@ -12,3 +12,14 @@ or greater. ## Docs Docs can be found at [godoc.org](https://godoc.org/github.com/containerd/go-runc). + +## Project details + +The go-runc is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + + * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/go-runc/runc.go b/vendor/github.com/containerd/go-runc/runc.go index 96262afab..613cc511c 100644 --- a/vendor/github.com/containerd/go-runc/runc.go +++ b/vendor/github.com/containerd/go-runc/runc.go @@ -275,7 +275,11 @@ func (r *Runc) Run(context context.Context, id, bundle string, opts *CreateOpts) if err != nil { return -1, err } - return Monitor.Wait(cmd, ec) + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + return status, err } type DeleteOpts struct { @@ -570,7 +574,11 @@ func (r *Runc) Restore(context context.Context, id, bundle string, opts *Restore } } } - return Monitor.Wait(cmd, ec) + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + return status, err } // Update updates the current container with the provided resource spec diff --git a/vendor/github.com/containerd/ttrpc/channel.go b/vendor/github.com/containerd/ttrpc/channel.go index 22f5496b4..aa8c9541c 100644 --- a/vendor/github.com/containerd/ttrpc/channel.go +++ b/vendor/github.com/containerd/ttrpc/channel.go @@ -18,7 +18,6 @@ package ttrpc import ( "bufio" - "context" "encoding/binary" "io" "net" @@ -98,7 +97,7 @@ func newChannel(conn net.Conn) *channel { // returned will be valid and caller should send that along to // the correct consumer. The bytes on the underlying channel // will be discarded. -func (ch *channel) recv(ctx context.Context) (messageHeader, []byte, error) { +func (ch *channel) recv() (messageHeader, []byte, error) { mh, err := readMessageHeader(ch.hrbuf[:], ch.br) if err != nil { return messageHeader{}, nil, err @@ -120,7 +119,7 @@ func (ch *channel) recv(ctx context.Context) (messageHeader, []byte, error) { return mh, p, nil } -func (ch *channel) send(ctx context.Context, streamID uint32, t messageType, p []byte) error { +func (ch *channel) send(streamID uint32, t messageType, p []byte) error { if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t}); err != nil { return err } diff --git a/vendor/github.com/containerd/ttrpc/client.go b/vendor/github.com/containerd/ttrpc/client.go index 748a0073a..9db15fe69 100644 --- a/vendor/github.com/containerd/ttrpc/client.go +++ b/vendor/github.com/containerd/ttrpc/client.go @@ -36,36 +36,52 @@ import ( // closed. var ErrClosed = errors.New("ttrpc: closed") +// Client for a ttrpc server type Client struct { codec codec conn net.Conn channel *channel calls chan *callRequest - closed chan struct{} - closeOnce sync.Once - closeFunc func() - done chan struct{} - err error + ctx context.Context + closed func() + + closeOnce sync.Once + userCloseFunc func() + + errOnce sync.Once + err error + interceptor UnaryClientInterceptor } +// ClientOpts configures a client type ClientOpts func(c *Client) +// WithOnClose sets the close func whenever the client's Close() method is called func WithOnClose(onClose func()) ClientOpts { return func(c *Client) { - c.closeFunc = onClose + c.userCloseFunc = onClose + } +} + +// WithUnaryClientInterceptor sets the provided client interceptor +func WithUnaryClientInterceptor(i UnaryClientInterceptor) ClientOpts { + return func(c *Client) { + c.interceptor = i } } func NewClient(conn net.Conn, opts ...ClientOpts) *Client { + ctx, cancel := context.WithCancel(context.Background()) c := &Client{ - codec: codec{}, - conn: conn, - channel: newChannel(conn), - calls: make(chan *callRequest), - closed: make(chan struct{}), - done: make(chan struct{}), - closeFunc: func() {}, + codec: codec{}, + conn: conn, + channel: newChannel(conn), + calls: make(chan *callRequest), + closed: cancel, + ctx: ctx, + userCloseFunc: func() {}, + interceptor: defaultClientInterceptor, } for _, o := range opts { @@ -100,14 +116,17 @@ func (c *Client) Call(ctx context.Context, service, method string, req, resp int ) if metadata, ok := GetMetadata(ctx); ok { - creq.Metadata = metadata + metadata.setRequest(creq) } if dl, ok := ctx.Deadline(); ok { creq.TimeoutNano = dl.Sub(time.Now()).Nanoseconds() } - if err := c.dispatch(ctx, creq, cresp); err != nil { + info := &UnaryClientInfo{ + FullMethod: fullPath(service, method), + } + if err := c.interceptor(ctx, creq, cresp, info, c.dispatch); err != nil { return err } @@ -135,8 +154,8 @@ func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) err case <-ctx.Done(): return ctx.Err() case c.calls <- call: - case <-c.done: - return c.err + case <-c.ctx.Done(): + return c.error() } select { @@ -144,16 +163,15 @@ func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) err return ctx.Err() case err := <-errs: return filterCloseErr(err) - case <-c.done: - return c.err + case <-c.ctx.Done(): + return c.error() } } func (c *Client) Close() error { c.closeOnce.Do(func() { - close(c.closed) + c.closed() }) - return nil } @@ -163,51 +181,82 @@ type message struct { err error } -func (c *Client) run() { - var ( - streamID uint32 = 1 - waiters = make(map[uint32]*callRequest) - calls = c.calls - incoming = make(chan *message) - shutdown = make(chan struct{}) - shutdownErr error - ) +type receiver struct { + wg *sync.WaitGroup + messages chan *message + err error +} - go func() { - defer close(shutdown) +func (r *receiver) run(ctx context.Context, c *channel) { + defer r.wg.Done() - // start one more goroutine to recv messages without blocking. - for { - mh, p, err := c.channel.recv(context.TODO()) + for { + select { + case <-ctx.Done(): + r.err = ctx.Err() + return + default: + mh, p, err := c.recv() if err != nil { _, ok := status.FromError(err) if !ok { // treat all errors that are not an rpc status as terminal. // all others poison the connection. - shutdownErr = err + r.err = filterCloseErr(err) return } } select { - case incoming <- &message{ + case r.messages <- &message{ messageHeader: mh, p: p[:mh.Length], err: err, }: - case <-c.done: + case <-ctx.Done(): + r.err = ctx.Err() return } } - }() + } +} - defer c.conn.Close() - defer close(c.done) - defer c.closeFunc() +func (c *Client) run() { + var ( + streamID uint32 = 1 + waiters = make(map[uint32]*callRequest) + calls = c.calls + incoming = make(chan *message) + receiversDone = make(chan struct{}) + wg sync.WaitGroup + ) + + // broadcast the shutdown error to the remaining waiters. + abortWaiters := func(wErr error) { + for _, waiter := range waiters { + waiter.errs <- wErr + } + } + recv := &receiver{ + wg: &wg, + messages: incoming, + } + wg.Add(1) + + go func() { + wg.Wait() + close(receiversDone) + }() + go recv.run(c.ctx, c.channel) + + defer func() { + c.conn.Close() + c.userCloseFunc() + }() for { select { case call := <-calls: - if err := c.send(call.ctx, streamID, messageTypeRequest, call.req); err != nil { + if err := c.send(streamID, messageTypeRequest, call.req); err != nil { call.errs <- err continue } @@ -223,41 +272,42 @@ func (c *Client) run() { call.errs <- c.recv(call.resp, msg) delete(waiters, msg.StreamID) - case <-shutdown: - if shutdownErr != nil { - shutdownErr = filterCloseErr(shutdownErr) - } else { - shutdownErr = ErrClosed - } - - shutdownErr = errors.Wrapf(shutdownErr, "ttrpc: client shutting down") - - c.err = shutdownErr - for _, waiter := range waiters { - waiter.errs <- shutdownErr + case <-receiversDone: + // all the receivers have exited + if recv.err != nil { + c.setError(recv.err) } + // don't return out, let the close of the context trigger the abort of waiters c.Close() - return - case <-c.closed: - if c.err == nil { - c.err = ErrClosed - } - // broadcast the shutdown error to the remaining waiters. - for _, waiter := range waiters { - waiter.errs <- c.err - } + case <-c.ctx.Done(): + abortWaiters(c.error()) return } } } -func (c *Client) send(ctx context.Context, streamID uint32, mtype messageType, msg interface{}) error { +func (c *Client) error() error { + c.errOnce.Do(func() { + if c.err == nil { + c.err = ErrClosed + } + }) + return c.err +} + +func (c *Client) setError(err error) { + c.errOnce.Do(func() { + c.err = err + }) +} + +func (c *Client) send(streamID uint32, mtype messageType, msg interface{}) error { p, err := c.codec.Marshal(msg) if err != nil { return err } - return c.channel.send(ctx, streamID, mtype, p) + return c.channel.send(streamID, mtype, p) } func (c *Client) recv(resp *Response, msg *message) error { @@ -278,22 +328,21 @@ func (c *Client) recv(resp *Response, msg *message) error { // // This purposely ignores errors with a wrapped cause. func filterCloseErr(err error) error { - if err == nil { + switch { + case err == nil: return nil - } - - if err == io.EOF { + case err == io.EOF: return ErrClosed - } - - if strings.Contains(err.Error(), "use of closed network connection") { + case errors.Cause(err) == io.EOF: return ErrClosed - } - - // if we have an epipe on a write, we cast to errclosed - if oerr, ok := err.(*net.OpError); ok && oerr.Op == "write" { - if serr, ok := oerr.Err.(*os.SyscallError); ok && serr.Err == syscall.EPIPE { - return ErrClosed + case strings.Contains(err.Error(), "use of closed network connection"): + return ErrClosed + default: + // if we have an epipe on a write, we cast to errclosed + if oerr, ok := err.(*net.OpError); ok && oerr.Op == "write" { + if serr, ok := oerr.Err.(*os.SyscallError); ok && serr.Err == syscall.EPIPE { + return ErrClosed + } } } diff --git a/vendor/github.com/containerd/ttrpc/config.go b/vendor/github.com/containerd/ttrpc/config.go index 019b7a09d..6a53c112b 100644 --- a/vendor/github.com/containerd/ttrpc/config.go +++ b/vendor/github.com/containerd/ttrpc/config.go @@ -19,9 +19,11 @@ package ttrpc import "github.com/pkg/errors" type serverConfig struct { - handshaker Handshaker + handshaker Handshaker + interceptor UnaryServerInterceptor } +// ServerOpt for configuring a ttrpc server type ServerOpt func(*serverConfig) error // WithServerHandshaker can be passed to NewServer to ensure that the @@ -37,3 +39,14 @@ func WithServerHandshaker(handshaker Handshaker) ServerOpt { return nil } } + +// WithUnaryServerInterceptor sets the provided interceptor on the server +func WithUnaryServerInterceptor(i UnaryServerInterceptor) ServerOpt { + return func(c *serverConfig) error { + if c.interceptor != nil { + return errors.New("only one interceptor allowed per server") + } + c.interceptor = i + return nil + } +} diff --git a/vendor/github.com/containerd/ttrpc/interceptor.go b/vendor/github.com/containerd/ttrpc/interceptor.go new file mode 100644 index 000000000..c1219dac6 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/interceptor.go @@ -0,0 +1,50 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import "context" + +// UnaryServerInfo provides information about the server request +type UnaryServerInfo struct { + FullMethod string +} + +// UnaryClientInfo provides information about the client request +type UnaryClientInfo struct { + FullMethod string +} + +// Unmarshaler contains the server request data and allows it to be unmarshaled +// into a concrete type +type Unmarshaler func(interface{}) error + +// Invoker invokes the client's request and response from the ttrpc server +type Invoker func(context.Context, *Request, *Response) error + +// UnaryServerInterceptor specifies the interceptor function for server request/response +type UnaryServerInterceptor func(context.Context, Unmarshaler, *UnaryServerInfo, Method) (interface{}, error) + +// UnaryClientInterceptor specifies the interceptor function for client request/response +type UnaryClientInterceptor func(context.Context, *Request, *Response, *UnaryClientInfo, Invoker) error + +func defaultServerInterceptor(ctx context.Context, unmarshal Unmarshaler, info *UnaryServerInfo, method Method) (interface{}, error) { + return method(ctx, unmarshal) +} + +func defaultClientInterceptor(ctx context.Context, req *Request, resp *Response, _ *UnaryClientInfo, invoker Invoker) error { + return invoker(ctx, req, resp) +} diff --git a/vendor/github.com/containerd/ttrpc/metadata.go b/vendor/github.com/containerd/ttrpc/metadata.go index d03626321..ce8c0d13c 100644 --- a/vendor/github.com/containerd/ttrpc/metadata.go +++ b/vendor/github.com/containerd/ttrpc/metadata.go @@ -16,53 +16,74 @@ package ttrpc -import "context" +import ( + "context" + "strings" +) -// Metadata represents the key-value pairs (similar to http.Header) to be passed to ttrpc server from a client. -type Metadata map[string]StringList +// MD is the user type for ttrpc metadata +type MD map[string][]string // Get returns the metadata for a given key when they exist. // If there is no metadata, a nil slice and false are returned. -func (m Metadata) Get(key string) ([]string, bool) { +func (m MD) Get(key string) ([]string, bool) { + key = strings.ToLower(key) list, ok := m[key] - if !ok || len(list.List) == 0 { + if !ok || len(list) == 0 { return nil, false } - return list.List, true + return list, true } // Set sets the provided values for a given key. // The values will overwrite any existing values. // If no values provided, a key will be deleted. -func (m Metadata) Set(key string, values ...string) { +func (m MD) Set(key string, values ...string) { + key = strings.ToLower(key) if len(values) == 0 { delete(m, key) return } - - m[key] = StringList{List: values} + m[key] = values } // Append appends additional values to the given key. -func (m Metadata) Append(key string, values ...string) { +func (m MD) Append(key string, values ...string) { + key = strings.ToLower(key) if len(values) == 0 { return } - - list, ok := m[key] + current, ok := m[key] if ok { - m.Set(key, append(list.List, values...)...) + m.Set(key, append(current, values...)...) } else { m.Set(key, values...) } } +func (m MD) setRequest(r *Request) { + for k, values := range m { + for _, v := range values { + r.Metadata = append(r.Metadata, &KeyValue{ + Key: k, + Value: v, + }) + } + } +} + +func (m MD) fromRequest(r *Request) { + for _, kv := range r.Metadata { + m[kv.Key] = append(m[kv.Key], kv.Value) + } +} + type metadataKey struct{} // GetMetadata retrieves metadata from context.Context (previously attached with WithMetadata) -func GetMetadata(ctx context.Context) (Metadata, bool) { - metadata, ok := ctx.Value(metadataKey{}).(Metadata) +func GetMetadata(ctx context.Context) (MD, bool) { + metadata, ok := ctx.Value(metadataKey{}).(MD) return metadata, ok } @@ -81,6 +102,6 @@ func GetMetadataValue(ctx context.Context, name string) (string, bool) { } // WithMetadata attaches metadata map to a context.Context -func WithMetadata(ctx context.Context, headers Metadata) context.Context { - return context.WithValue(ctx, metadataKey{}, headers) +func WithMetadata(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, metadataKey{}, md) } diff --git a/vendor/github.com/containerd/ttrpc/server.go b/vendor/github.com/containerd/ttrpc/server.go index 595a69a0d..1d4f1df65 100644 --- a/vendor/github.com/containerd/ttrpc/server.go +++ b/vendor/github.com/containerd/ttrpc/server.go @@ -53,10 +53,13 @@ func NewServer(opts ...ServerOpt) (*Server, error) { return nil, err } } + if config.interceptor == nil { + config.interceptor = defaultServerInterceptor + } return &Server{ config: config, - services: newServiceSet(), + services: newServiceSet(config.interceptor), done: make(chan struct{}), listeners: make(map[net.Listener]struct{}), connections: make(map[*serverConn]struct{}), @@ -341,7 +344,7 @@ func (c *serverConn) run(sctx context.Context) { default: // proceed } - mh, p, err := ch.recv(ctx) + mh, p, err := ch.recv() if err != nil { status, ok := status.FromError(err) if !ok { @@ -438,7 +441,7 @@ func (c *serverConn) run(sctx context.Context) { return } - if err := ch.send(ctx, response.id, messageTypeResponse, p); err != nil { + if err := ch.send(response.id, messageTypeResponse, p); err != nil { logrus.WithError(err).Error("failed sending message on channel") return } @@ -466,8 +469,10 @@ func (c *serverConn) run(sctx context.Context) { var noopFunc = func() {} func getRequestContext(ctx context.Context, req *Request) (retCtx context.Context, cancel func()) { - if req.Metadata != nil { - ctx = WithMetadata(ctx, req.Metadata) + if len(req.Metadata) > 0 { + md := MD{} + md.fromRequest(req) + ctx = WithMetadata(ctx, md) } cancel = noopFunc diff --git a/vendor/github.com/containerd/ttrpc/services.go b/vendor/github.com/containerd/ttrpc/services.go index fe1cade5a..655b2caea 100644 --- a/vendor/github.com/containerd/ttrpc/services.go +++ b/vendor/github.com/containerd/ttrpc/services.go @@ -37,12 +37,14 @@ type ServiceDesc struct { } type serviceSet struct { - services map[string]ServiceDesc + services map[string]ServiceDesc + interceptor UnaryServerInterceptor } -func newServiceSet() *serviceSet { +func newServiceSet(interceptor UnaryServerInterceptor) *serviceSet { return &serviceSet{ - services: make(map[string]ServiceDesc), + services: make(map[string]ServiceDesc), + interceptor: interceptor, } } @@ -84,7 +86,11 @@ func (s *serviceSet) dispatch(ctx context.Context, serviceName, methodName strin return nil } - resp, err := method(ctx, unmarshal) + info := &UnaryServerInfo{ + FullMethod: fullPath(serviceName, methodName), + } + + resp, err := s.interceptor(ctx, unmarshal, info, method) if err != nil { return nil, err } diff --git a/vendor/github.com/containerd/ttrpc/types.go b/vendor/github.com/containerd/ttrpc/types.go index c8ecb3868..9a1c19a72 100644 --- a/vendor/github.com/containerd/ttrpc/types.go +++ b/vendor/github.com/containerd/ttrpc/types.go @@ -23,11 +23,11 @@ import ( ) type Request struct { - Service string `protobuf:"bytes,1,opt,name=service,proto3"` - Method string `protobuf:"bytes,2,opt,name=method,proto3"` - Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3"` - TimeoutNano int64 `protobuf:"varint,4,opt,name=timeout_nano,proto3"` - Metadata Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Service string `protobuf:"bytes,1,opt,name=service,proto3"` + Method string `protobuf:"bytes,2,opt,name=method,proto3"` + Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3"` + TimeoutNano int64 `protobuf:"varint,4,opt,name=timeout_nano,proto3"` + Metadata []*KeyValue `protobuf:"bytes,5,rep,name=metadata,proto3"` } func (r *Request) Reset() { *r = Request{} } @@ -52,3 +52,12 @@ func (r *StringList) String() string { return fmt.Sprintf("%+#v", r) } func (r *StringList) ProtoMessage() {} func makeStringList(item ...string) StringList { return StringList{List: item} } + +type KeyValue struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3"` + Value string `protobuf:"bytes,2,opt,name=value,proto3"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (*KeyValue) ProtoMessage() {} +func (m *KeyValue) String() string { return fmt.Sprintf("%+#v", m) } diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 000000000..5dc68268d --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 000000000..9d92c11f1 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 000000000..fa820b9d3 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 000000000..5b8a4b9af --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod new file mode 100644 index 000000000..fc84cd79d --- /dev/null +++ b/vendor/github.com/google/uuid/go.mod @@ -0,0 +1 @@ +module github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 000000000..b17461631 --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) + h.Write(data) + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 000000000..7f9e0c6c0 --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,37 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err == nil { + *uuid = id + } + return err +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 000000000..d651a2b06 --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 000000000..24b78edc9 --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 000000000..0cbbcddbd --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 000000000..f326b54db --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 000000000..e6ef06cdc --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 000000000..5ea6c7378 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 000000000..524404cc5 --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,245 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 000000000..199a1ac65 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nodeMu.Unlock() + + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID[:]) + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 000000000..84af91c9f --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + var uuid UUID + _, err := io.ReadFull(rander, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c index 7750af35e..3b08c5e33 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c +++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c @@ -37,9 +37,6 @@ enum sync_t { SYNC_RECVPID_ACK = 0x43, /* PID was correctly received by parent. */ SYNC_GRANDCHILD = 0x44, /* The grandchild is ready to run. */ SYNC_CHILD_READY = 0x45, /* The child or grandchild is ready to return. */ - - /* XXX: This doesn't help with segfaults and other such issues. */ - SYNC_ERR = 0xFF, /* Fatal error, no turning back. The error code follows. */ }; /* @@ -95,6 +92,15 @@ struct nlconfig_t { size_t gidmappath_len; }; +#define PANIC "panic" +#define FATAL "fatal" +#define ERROR "error" +#define WARNING "warning" +#define INFO "info" +#define DEBUG "debug" + +static int logfd = -1; + /* * List of netlink message types sent to us as part of bootstrapping the init. * These constants are defined in libcontainer/message_linux.go. @@ -131,22 +137,34 @@ int setns(int fd, int nstype) } #endif +static void write_log_with_info(const char *level, const char *function, int line, const char *format, ...) +{ + char message[1024] = {}; + + va_list args; + + if (logfd < 0 || level == NULL) + return; + + va_start(args, format); + if (vsnprintf(message, sizeof(message), format, args) < 0) + return; + va_end(args); + + if (dprintf(logfd, "{\"level\":\"%s\", \"msg\": \"%s:%d %s\"}\n", level, function, line, message) < 0) + return; +} + +#define write_log(level, fmt, ...) \ + write_log_with_info((level), __FUNCTION__, __LINE__, (fmt), ##__VA_ARGS__) + /* XXX: This is ugly. */ static int syncfd = -1; -/* TODO(cyphar): Fix this so it correctly deals with syncT. */ -#define bail(fmt, ...) \ - do { \ - int ret = __COUNTER__ + 1; \ - fprintf(stderr, "nsenter: " fmt ": %m\n", ##__VA_ARGS__); \ - if (syncfd >= 0) { \ - enum sync_t s = SYNC_ERR; \ - if (write(syncfd, &s, sizeof(s)) != sizeof(s)) \ - fprintf(stderr, "nsenter: failed: write(s)"); \ - if (write(syncfd, &ret, sizeof(ret)) != sizeof(ret)) \ - fprintf(stderr, "nsenter: failed: write(ret)"); \ - } \ - exit(ret); \ +#define bail(fmt, ...) \ + do { \ + write_log(FATAL, "nsenter: " fmt ": %m", ##__VA_ARGS__); \ + exit(1); \ } while(0) static int write_file(char *data, size_t data_len, char *pathfmt, ...) @@ -352,6 +370,23 @@ static int initpipe(void) return pipenum; } +static void setup_logpipe(void) +{ + char *logpipe, *endptr; + + logpipe = getenv("_LIBCONTAINER_LOGPIPE"); + if (logpipe == NULL || *logpipe == '\0') { + return; + } + + logfd = strtol(logpipe, &endptr, 10); + if (logpipe == endptr || *endptr != '\0') { + fprintf(stderr, "unable to parse _LIBCONTAINER_LOGPIPE, value: %s\n", logpipe); + /* It is too early to use bail */ + exit(1); + } +} + /* Returns the clone(2) flag for a namespace, given the name of a namespace. */ static int nsflag(char *name) { @@ -544,6 +579,12 @@ void nsexec(void) int sync_child_pipe[2], sync_grandchild_pipe[2]; struct nlconfig_t config = { 0 }; + /* + * Setup a pipe to send logs to the parent. This should happen + * first, because bail will use that pipe. + */ + setup_logpipe(); + /* * If we don't have an init pipe, just return to the go routine. * We'll only get an init pipe for start or exec. @@ -560,6 +601,8 @@ void nsexec(void) if (ensure_cloned_binary() < 0) bail("could not ensure we are a cloned binary"); + write_log(DEBUG, "nsexec started"); + /* Parse all of the netlink configuration. */ nl_parse(pipenum, &config); @@ -676,7 +719,6 @@ void nsexec(void) */ while (!ready) { enum sync_t s; - int ret; syncfd = sync_child_pipe[1]; close(sync_child_pipe[0]); @@ -685,12 +727,6 @@ void nsexec(void) bail("failed to sync with child: next state"); switch (s) { - case SYNC_ERR: - /* We have to mirror the error code of the child. */ - if (read(syncfd, &ret, sizeof(ret)) != sizeof(ret)) - bail("failed to sync with child: read(error code)"); - - exit(ret); case SYNC_USERMAP_PLS: /* * Enable setgroups(2) if we've been asked to. But we also @@ -759,7 +795,6 @@ void nsexec(void) ready = false; while (!ready) { enum sync_t s; - int ret; syncfd = sync_grandchild_pipe[1]; close(sync_grandchild_pipe[0]); @@ -774,12 +809,6 @@ void nsexec(void) bail("failed to sync with child: next state"); switch (s) { - case SYNC_ERR: - /* We have to mirror the error code of the child. */ - if (read(syncfd, &ret, sizeof(ret)) != sizeof(ret)) - bail("failed to sync with child: read(error code)"); - - exit(ret); case SYNC_CHILD_READY: ready = true; break; diff --git a/vendor/github.com/opencontainers/runc/vendor.conf b/vendor/github.com/opencontainers/runc/vendor.conf index 22cba0f1b..b0bfb1ef8 100644 --- a/vendor/github.com/opencontainers/runc/vendor.conf +++ b/vendor/github.com/opencontainers/runc/vendor.conf @@ -6,8 +6,8 @@ github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 github.com/checkpoint-restore/go-criu v3.11 github.com/mrunalp/fileutils ed869b029674c0e9ce4c0dfa781405c2d9946d08 github.com/opencontainers/selinux v1.2.2 -github.com/seccomp/libseccomp-golang 84e90a91acea0f4e51e62bc1a75de18b1fc0790f -github.com/sirupsen/logrus a3f95b5c423586578a4e099b11a46c2479628cac +github.com/seccomp/libseccomp-golang v0.9.1 +github.com/sirupsen/logrus 8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16 github.com/vishvananda/netlink 1e2e08e8a2dcdacaae3f14ac44c5cfa31361f270 # systemd integration.