Update containerd to 5222236c1b
.
Signed-off-by: Lantao Liu <lantaol@google.com>
This commit is contained in:
parent
29d5eb69bc
commit
27de1a5862
13
vendor.conf
13
vendor.conf
@ -21,11 +21,12 @@ github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
|
|||||||
github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823
|
github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823
|
||||||
github.com/pkg/errors v0.8.1
|
github.com/pkg/errors v0.8.1
|
||||||
github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db
|
github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db
|
||||||
github.com/opencontainers/runc v1.0.0-rc8
|
github.com/opencontainers/runc f4982d86f7fde0b6f953cc62ccc4022c519a10a9 # v1.0.0-rc8-32-gf4982d86
|
||||||
github.com/opencontainers/image-spec v1.0.1
|
github.com/opencontainers/image-spec v1.0.1
|
||||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1
|
github.com/matttproud/golang_protobuf_extensions v1.0.1
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.1
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.1
|
||||||
|
github.com/google/uuid v1.1.1
|
||||||
github.com/golang/protobuf v1.2.0
|
github.com/golang/protobuf v1.2.0
|
||||||
github.com/gogo/protobuf v1.2.1
|
github.com/gogo/protobuf v1.2.1
|
||||||
github.com/gogo/googleapis v1.2.0
|
github.com/gogo/googleapis v1.2.0
|
||||||
@ -35,16 +36,16 @@ github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098
|
|||||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||||
github.com/coreos/go-systemd v14
|
github.com/coreos/go-systemd v14
|
||||||
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
|
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
|
||||||
github.com/containerd/ttrpc a5bd8ce9e40bc7c065a11c6936f4d032ce6bfa2b
|
github.com/containerd/ttrpc 1fb3814edf44a76e0ccf503decf726d994919a9a
|
||||||
github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
|
github.com/containerd/go-runc 9007c2405372fe28918845901a3276c0915689a1
|
||||||
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
||||||
github.com/containerd/continuity bd77b46c8352f74eb12c85bdc01f4b90f69d66b4
|
github.com/containerd/continuity bd77b46c8352f74eb12c85bdc01f4b90f69d66b4
|
||||||
github.com/containerd/containerd 31afff294400b5a69bdb3ec387ecdf5bad57a038
|
github.com/containerd/containerd 5222236c1b57d055362dbc413d042ab56427270a
|
||||||
github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f
|
github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f
|
||||||
github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1
|
github.com/containerd/cgroups c4b9ac5c7601384c965b9646fc515884e091ebb9
|
||||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||||
github.com/Microsoft/hcsshim 8abdbb8205e4192c68b5f84c31197156f31be517
|
github.com/Microsoft/hcsshim 8abdbb8205e4192c68b5f84c31197156f31be517
|
||||||
github.com/Microsoft/go-winio 84b4ab48a50763fe7b3abcef38e5205c12027fac
|
github.com/Microsoft/go-winio v0.4.14
|
||||||
github.com/BurntSushi/toml v0.3.1
|
github.com/BurntSushi/toml v0.3.1
|
||||||
|
|
||||||
# kubernetes dependencies
|
# kubernetes dependencies
|
||||||
|
18
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
18
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
@ -16,6 +16,7 @@ import (
|
|||||||
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
||||||
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||||
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||||
|
//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
|
||||||
|
|
||||||
type atomicBool int32
|
type atomicBool int32
|
||||||
|
|
||||||
@ -79,6 +80,7 @@ type win32File struct {
|
|||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
wgLock sync.RWMutex
|
wgLock sync.RWMutex
|
||||||
closing atomicBool
|
closing atomicBool
|
||||||
|
socket bool
|
||||||
readDeadline deadlineHandler
|
readDeadline deadlineHandler
|
||||||
writeDeadline deadlineHandler
|
writeDeadline deadlineHandler
|
||||||
}
|
}
|
||||||
@ -109,7 +111,13 @@ func makeWin32File(h syscall.Handle) (*win32File, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
||||||
return makeWin32File(h)
|
// If we return the result of makeWin32File directly, it can result in an
|
||||||
|
// interface-wrapped nil, rather than a nil interface value.
|
||||||
|
f, err := makeWin32File(h)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// closeHandle closes the resources associated with a Win32 handle
|
// closeHandle closes the resources associated with a Win32 handle
|
||||||
@ -190,6 +198,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
|||||||
if f.closing.isSet() {
|
if f.closing.isSet() {
|
||||||
err = ErrFileClosed
|
err = ErrFileClosed
|
||||||
}
|
}
|
||||||
|
} else if err != nil && f.socket {
|
||||||
|
// err is from Win32. Query the overlapped structure to get the winsock error.
|
||||||
|
var bytes, flags uint32
|
||||||
|
err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
|
||||||
}
|
}
|
||||||
case <-timeout:
|
case <-timeout:
|
||||||
cancelIoEx(f.handle, &c.o)
|
cancelIoEx(f.handle, &c.o)
|
||||||
@ -265,6 +277,10 @@ func (f *win32File) Flush() error {
|
|||||||
return syscall.FlushFileBuffers(f.handle)
|
return syscall.FlushFileBuffers(f.handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *win32File) Fd() uintptr {
|
||||||
|
return uintptr(f.handle)
|
||||||
|
}
|
||||||
|
|
||||||
func (d *deadlineHandler) set(deadline time.Time) error {
|
func (d *deadlineHandler) set(deadline time.Time) error {
|
||||||
d.setLock.Lock()
|
d.setLock.Lock()
|
||||||
defer d.setLock.Unlock()
|
defer d.setLock.Unlock()
|
||||||
|
9
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
Normal file
9
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
module github.com/Microsoft/go-winio
|
||||||
|
|
||||||
|
go 1.12
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/pkg/errors v0.8.1
|
||||||
|
github.com/sirupsen/logrus v1.4.1
|
||||||
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b
|
||||||
|
)
|
305
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
Normal file
305
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
Normal file
@ -0,0 +1,305 @@
|
|||||||
|
package winio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/Microsoft/go-winio/pkg/guid"
|
||||||
|
)
|
||||||
|
|
||||||
|
//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
|
||||||
|
|
||||||
|
const (
|
||||||
|
afHvSock = 34 // AF_HYPERV
|
||||||
|
|
||||||
|
socketError = ^uintptr(0)
|
||||||
|
)
|
||||||
|
|
||||||
|
// An HvsockAddr is an address for a AF_HYPERV socket.
|
||||||
|
type HvsockAddr struct {
|
||||||
|
VMID guid.GUID
|
||||||
|
ServiceID guid.GUID
|
||||||
|
}
|
||||||
|
|
||||||
|
type rawHvsockAddr struct {
|
||||||
|
Family uint16
|
||||||
|
_ uint16
|
||||||
|
VMID guid.GUID
|
||||||
|
ServiceID guid.GUID
|
||||||
|
}
|
||||||
|
|
||||||
|
// Network returns the address's network name, "hvsock".
|
||||||
|
func (addr *HvsockAddr) Network() string {
|
||||||
|
return "hvsock"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (addr *HvsockAddr) String() string {
|
||||||
|
return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
|
||||||
|
func VsockServiceID(port uint32) guid.GUID {
|
||||||
|
g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3")
|
||||||
|
g.Data1 = port
|
||||||
|
return g
|
||||||
|
}
|
||||||
|
|
||||||
|
func (addr *HvsockAddr) raw() rawHvsockAddr {
|
||||||
|
return rawHvsockAddr{
|
||||||
|
Family: afHvSock,
|
||||||
|
VMID: addr.VMID,
|
||||||
|
ServiceID: addr.ServiceID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) {
|
||||||
|
addr.VMID = raw.VMID
|
||||||
|
addr.ServiceID = raw.ServiceID
|
||||||
|
}
|
||||||
|
|
||||||
|
// HvsockListener is a socket listener for the AF_HYPERV address family.
|
||||||
|
type HvsockListener struct {
|
||||||
|
sock *win32File
|
||||||
|
addr HvsockAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
// HvsockConn is a connected socket of the AF_HYPERV address family.
|
||||||
|
type HvsockConn struct {
|
||||||
|
sock *win32File
|
||||||
|
local, remote HvsockAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHvSocket() (*win32File, error) {
|
||||||
|
fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, os.NewSyscallError("socket", err)
|
||||||
|
}
|
||||||
|
f, err := makeWin32File(fd)
|
||||||
|
if err != nil {
|
||||||
|
syscall.Close(fd)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f.socket = true
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListenHvsock listens for connections on the specified hvsock address.
|
||||||
|
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
|
||||||
|
l := &HvsockListener{addr: *addr}
|
||||||
|
sock, err := newHvSocket()
|
||||||
|
if err != nil {
|
||||||
|
return nil, l.opErr("listen", err)
|
||||||
|
}
|
||||||
|
sa := addr.raw()
|
||||||
|
err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
|
||||||
|
}
|
||||||
|
err = syscall.Listen(sock.handle, 16)
|
||||||
|
if err != nil {
|
||||||
|
return nil, l.opErr("listen", os.NewSyscallError("listen", err))
|
||||||
|
}
|
||||||
|
return &HvsockListener{sock: sock, addr: *addr}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *HvsockListener) opErr(op string, err error) error {
|
||||||
|
return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Addr returns the listener's network address.
|
||||||
|
func (l *HvsockListener) Addr() net.Addr {
|
||||||
|
return &l.addr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accept waits for the next connection and returns it.
|
||||||
|
func (l *HvsockListener) Accept() (_ net.Conn, err error) {
|
||||||
|
sock, err := newHvSocket()
|
||||||
|
if err != nil {
|
||||||
|
return nil, l.opErr("accept", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if sock != nil {
|
||||||
|
sock.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
c, err := l.sock.prepareIo()
|
||||||
|
if err != nil {
|
||||||
|
return nil, l.opErr("accept", err)
|
||||||
|
}
|
||||||
|
defer l.sock.wg.Done()
|
||||||
|
|
||||||
|
// AcceptEx, per documentation, requires an extra 16 bytes per address.
|
||||||
|
const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{}))
|
||||||
|
var addrbuf [addrlen * 2]byte
|
||||||
|
|
||||||
|
var bytes uint32
|
||||||
|
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o)
|
||||||
|
_, err = l.sock.asyncIo(c, nil, bytes, err)
|
||||||
|
if err != nil {
|
||||||
|
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
|
||||||
|
}
|
||||||
|
conn := &HvsockConn{
|
||||||
|
sock: sock,
|
||||||
|
}
|
||||||
|
conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0])))
|
||||||
|
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
|
||||||
|
sock = nil
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the listener, causing any pending Accept calls to fail.
|
||||||
|
func (l *HvsockListener) Close() error {
|
||||||
|
return l.sock.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Need to finish ConnectEx handling
|
||||||
|
func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) {
|
||||||
|
sock, err := newHvSocket()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if sock != nil {
|
||||||
|
sock.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
c, err := sock.prepareIo()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer sock.wg.Done()
|
||||||
|
var bytes uint32
|
||||||
|
err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o)
|
||||||
|
_, err = sock.asyncIo(ctx, c, nil, bytes, err)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
conn := &HvsockConn{
|
||||||
|
sock: sock,
|
||||||
|
remote: *addr,
|
||||||
|
}
|
||||||
|
sock = nil
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
func (conn *HvsockConn) opErr(op string, err error) error {
|
||||||
|
return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conn *HvsockConn) Read(b []byte) (int, error) {
|
||||||
|
c, err := conn.sock.prepareIo()
|
||||||
|
if err != nil {
|
||||||
|
return 0, conn.opErr("read", err)
|
||||||
|
}
|
||||||
|
defer conn.sock.wg.Done()
|
||||||
|
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||||
|
var flags, bytes uint32
|
||||||
|
err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
|
||||||
|
n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err)
|
||||||
|
if err != nil {
|
||||||
|
if _, ok := err.(syscall.Errno); ok {
|
||||||
|
err = os.NewSyscallError("wsarecv", err)
|
||||||
|
}
|
||||||
|
return 0, conn.opErr("read", err)
|
||||||
|
} else if n == 0 {
|
||||||
|
err = io.EOF
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conn *HvsockConn) Write(b []byte) (int, error) {
|
||||||
|
t := 0
|
||||||
|
for len(b) != 0 {
|
||||||
|
n, err := conn.write(b)
|
||||||
|
if err != nil {
|
||||||
|
return t + n, err
|
||||||
|
}
|
||||||
|
t += n
|
||||||
|
b = b[n:]
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conn *HvsockConn) write(b []byte) (int, error) {
|
||||||
|
c, err := conn.sock.prepareIo()
|
||||||
|
if err != nil {
|
||||||
|
return 0, conn.opErr("write", err)
|
||||||
|
}
|
||||||
|
defer conn.sock.wg.Done()
|
||||||
|
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||||
|
var bytes uint32
|
||||||
|
err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
|
||||||
|
n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err)
|
||||||
|
if err != nil {
|
||||||
|
if _, ok := err.(syscall.Errno); ok {
|
||||||
|
err = os.NewSyscallError("wsasend", err)
|
||||||
|
}
|
||||||
|
return 0, conn.opErr("write", err)
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the socket connection, failing any pending read or write calls.
|
||||||
|
func (conn *HvsockConn) Close() error {
|
||||||
|
return conn.sock.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conn *HvsockConn) shutdown(how int) error {
|
||||||
|
err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD)
|
||||||
|
if err != nil {
|
||||||
|
return os.NewSyscallError("shutdown", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseRead shuts down the read end of the socket.
|
||||||
|
func (conn *HvsockConn) CloseRead() error {
|
||||||
|
err := conn.shutdown(syscall.SHUT_RD)
|
||||||
|
if err != nil {
|
||||||
|
return conn.opErr("close", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
|
||||||
|
// no more data will be written.
|
||||||
|
func (conn *HvsockConn) CloseWrite() error {
|
||||||
|
err := conn.shutdown(syscall.SHUT_WR)
|
||||||
|
if err != nil {
|
||||||
|
return conn.opErr("close", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LocalAddr returns the local address of the connection.
|
||||||
|
func (conn *HvsockConn) LocalAddr() net.Addr {
|
||||||
|
return &conn.local
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoteAddr returns the remote address of the connection.
|
||||||
|
func (conn *HvsockConn) RemoteAddr() net.Addr {
|
||||||
|
return &conn.remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDeadline implements the net.Conn SetDeadline method.
|
||||||
|
func (conn *HvsockConn) SetDeadline(t time.Time) error {
|
||||||
|
conn.SetReadDeadline(t)
|
||||||
|
conn.SetWriteDeadline(t)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetReadDeadline implements the net.Conn SetReadDeadline method.
|
||||||
|
func (conn *HvsockConn) SetReadDeadline(t time.Time) error {
|
||||||
|
return conn.sock.SetReadDeadline(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
|
||||||
|
func (conn *HvsockConn) SetWriteDeadline(t time.Time) error {
|
||||||
|
return conn.sock.SetWriteDeadline(t)
|
||||||
|
}
|
13
vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go
generated
vendored
13
vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go
generated
vendored
@ -7,9 +7,14 @@
|
|||||||
// set of C macros.
|
// set of C macros.
|
||||||
package etw
|
package etw
|
||||||
|
|
||||||
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go etw.go
|
//go:generate go run mksyscall_windows.go -output zsyscall_windows.go etw.go
|
||||||
|
|
||||||
//sys eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) = advapi32.EventRegister
|
//sys eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) = advapi32.EventRegister
|
||||||
//sys eventUnregister(providerHandle providerHandle) (win32err error) = advapi32.EventUnregister
|
|
||||||
//sys eventWriteTransfer(providerHandle providerHandle, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) = advapi32.EventWriteTransfer
|
//sys eventUnregister_64(providerHandle providerHandle) (win32err error) = advapi32.EventUnregister
|
||||||
//sys eventSetInformation(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) = advapi32.EventSetInformation
|
//sys eventWriteTransfer_64(providerHandle providerHandle, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) = advapi32.EventWriteTransfer
|
||||||
|
//sys eventSetInformation_64(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) = advapi32.EventSetInformation
|
||||||
|
|
||||||
|
//sys eventUnregister_32(providerHandle_low uint32, providerHandle_high uint32) (win32err error) = advapi32.EventUnregister
|
||||||
|
//sys eventWriteTransfer_32(providerHandle_low uint32, providerHandle_high uint32, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) = advapi32.EventWriteTransfer
|
||||||
|
//sys eventSetInformation_32(providerHandle_low uint32, providerHandle_high uint32, class eventInfoClass, information uintptr, length uint32) (win32err error) = advapi32.EventSetInformation
|
||||||
|
6
vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go
generated
vendored
6
vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go
generated
vendored
@ -3,6 +3,7 @@ package etw
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"syscall"
|
||||||
)
|
)
|
||||||
|
|
||||||
// eventData maintains a buffer which builds up the data for an ETW event. It
|
// eventData maintains a buffer which builds up the data for an ETW event. It
|
||||||
@ -63,3 +64,8 @@ func (ed *eventData) writeUint32(value uint32) {
|
|||||||
func (ed *eventData) writeUint64(value uint64) {
|
func (ed *eventData) writeUint64(value uint64) {
|
||||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// writeFiletime appends a FILETIME to the buffer.
|
||||||
|
func (ed *eventData) writeFiletime(value syscall.Filetime) {
|
||||||
|
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||||
|
}
|
||||||
|
8
vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go
generated
vendored
8
vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go
generated
vendored
@ -6,8 +6,8 @@ import (
|
|||||||
|
|
||||||
type eventOptions struct {
|
type eventOptions struct {
|
||||||
descriptor *eventDescriptor
|
descriptor *eventDescriptor
|
||||||
activityID *guid.GUID
|
activityID guid.GUID
|
||||||
relatedActivityID *guid.GUID
|
relatedActivityID guid.GUID
|
||||||
tags uint32
|
tags uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -59,14 +59,14 @@ func WithTags(newTags uint32) EventOpt {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithActivityID specifies the activity ID of the event to be written.
|
// WithActivityID specifies the activity ID of the event to be written.
|
||||||
func WithActivityID(activityID *guid.GUID) EventOpt {
|
func WithActivityID(activityID guid.GUID) EventOpt {
|
||||||
return func(options *eventOptions) {
|
return func(options *eventOptions) {
|
||||||
options.activityID = activityID
|
options.activityID = activityID
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithRelatedActivityID specifies the parent activity ID of the event to be written.
|
// WithRelatedActivityID specifies the parent activity ID of the event to be written.
|
||||||
func WithRelatedActivityID(activityID *guid.GUID) EventOpt {
|
func WithRelatedActivityID(activityID guid.GUID) EventOpt {
|
||||||
return func(options *eventOptions) {
|
return func(options *eventOptions) {
|
||||||
options.relatedActivityID = activityID
|
options.relatedActivityID = activityID
|
||||||
}
|
}
|
||||||
|
12
vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go
generated
vendored
12
vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go
generated
vendored
@ -4,6 +4,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -380,6 +382,14 @@ func Struct(name string, opts ...FieldOpt) FieldOpt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Time adds a time to the event.
|
||||||
|
func Time(name string, value time.Time) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeField(name, inTypeFileTime, outTypeDateTimeUTC, 0)
|
||||||
|
ed.writeFiletime(syscall.NsecToFiletime(value.UTC().UnixNano()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Currently, we support logging basic builtin types (int, string, etc), slices
|
// Currently, we support logging basic builtin types (int, string, etc), slices
|
||||||
// of basic builtin types, error, types derived from the basic types (e.g. "type
|
// of basic builtin types, error, types derived from the basic types (e.g. "type
|
||||||
// foo int"), and structs (recursively logging their fields). We do not support
|
// foo int"), and structs (recursively logging their fields). We do not support
|
||||||
@ -454,6 +464,8 @@ func SmartField(name string, v interface{}) FieldOpt {
|
|||||||
return Float64Array(name, v)
|
return Float64Array(name, v)
|
||||||
case error:
|
case error:
|
||||||
return StringField(name, v.Error())
|
return StringField(name, v.Error())
|
||||||
|
case time.Time:
|
||||||
|
return Time(name, v)
|
||||||
default:
|
default:
|
||||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
|
53
vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go
generated
vendored
Normal file
53
vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
// +build amd64 arm64 386
|
||||||
|
|
||||||
|
package etw
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/Microsoft/go-winio/pkg/guid"
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewProviderWithID creates and registers a new ETW provider, allowing the
|
||||||
|
// provider ID to be manually specified. This is most useful when there is an
|
||||||
|
// existing provider ID that must be used to conform to existing diagnostic
|
||||||
|
// infrastructure.
|
||||||
|
func NewProviderWithID(name string, id guid.GUID, callback EnableCallback) (provider *Provider, err error) {
|
||||||
|
providerCallbackOnce.Do(func() {
|
||||||
|
globalProviderCallback = windows.NewCallback(providerCallbackAdapter)
|
||||||
|
})
|
||||||
|
|
||||||
|
provider = providers.newProvider()
|
||||||
|
defer func(provider *Provider) {
|
||||||
|
if err != nil {
|
||||||
|
providers.removeProvider(provider)
|
||||||
|
}
|
||||||
|
}(provider)
|
||||||
|
provider.ID = id
|
||||||
|
provider.callback = callback
|
||||||
|
|
||||||
|
if err := eventRegister((*windows.GUID)(&provider.ID), globalProviderCallback, uintptr(provider.index), &provider.handle); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata := &bytes.Buffer{}
|
||||||
|
binary.Write(metadata, binary.LittleEndian, uint16(0)) // Write empty size for buffer (to update later)
|
||||||
|
metadata.WriteString(name)
|
||||||
|
metadata.WriteByte(0) // Null terminator for name
|
||||||
|
binary.LittleEndian.PutUint16(metadata.Bytes(), uint16(metadata.Len())) // Update the size at the beginning of the buffer
|
||||||
|
provider.metadata = metadata.Bytes()
|
||||||
|
|
||||||
|
if err := eventSetInformation(
|
||||||
|
provider.handle,
|
||||||
|
eventInfoClassProviderSetTraits,
|
||||||
|
uintptr(unsafe.Pointer(&provider.metadata[0])),
|
||||||
|
uint32(len(provider.metadata))); err != nil {
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return provider, nil
|
||||||
|
}
|
12
vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go
generated
vendored
Normal file
12
vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
// +build arm
|
||||||
|
|
||||||
|
package etw
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Microsoft/go-winio/pkg/guid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewProviderWithID returns a nil provider on unsupported platforms.
|
||||||
|
func NewProviderWithID(name string, id guid.GUID, callback EnableCallback) (provider *Provider, err error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
104
vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go
generated
vendored
104
vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go
generated
vendored
@ -1,12 +1,10 @@
|
|||||||
package etw
|
package etw
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode/utf16"
|
"unicode/utf16"
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/Microsoft/go-winio/pkg/guid"
|
"github.com/Microsoft/go-winio/pkg/guid"
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
@ -16,7 +14,7 @@ import (
|
|||||||
// name and ID (GUID), which should always have a 1:1 mapping to each other
|
// name and ID (GUID), which should always have a 1:1 mapping to each other
|
||||||
// (e.g. don't use multiple provider names with the same ID, or vice versa).
|
// (e.g. don't use multiple provider names with the same ID, or vice versa).
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
ID *guid.GUID
|
ID guid.GUID
|
||||||
handle providerHandle
|
handle providerHandle
|
||||||
metadata []byte
|
metadata []byte
|
||||||
callback EnableCallback
|
callback EnableCallback
|
||||||
@ -29,10 +27,14 @@ type Provider struct {
|
|||||||
|
|
||||||
// String returns the `provider`.ID as a string
|
// String returns the `provider`.ID as a string
|
||||||
func (provider *Provider) String() string {
|
func (provider *Provider) String() string {
|
||||||
|
if provider == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
|
||||||
return provider.ID.String()
|
return provider.ID.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
type providerHandle windows.Handle
|
type providerHandle uint64
|
||||||
|
|
||||||
// ProviderState informs the provider EnableCallback what action is being
|
// ProviderState informs the provider EnableCallback what action is being
|
||||||
// performed.
|
// performed.
|
||||||
@ -59,9 +61,9 @@ const (
|
|||||||
|
|
||||||
// EnableCallback is the form of the callback function that receives provider
|
// EnableCallback is the form of the callback function that receives provider
|
||||||
// enable/disable notifications from ETW.
|
// enable/disable notifications from ETW.
|
||||||
type EnableCallback func(*guid.GUID, ProviderState, Level, uint64, uint64, uintptr)
|
type EnableCallback func(guid.GUID, ProviderState, Level, uint64, uint64, uintptr)
|
||||||
|
|
||||||
func providerCallback(sourceID *guid.GUID, state ProviderState, level Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr, i uintptr) {
|
func providerCallback(sourceID guid.GUID, state ProviderState, level Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr, i uintptr) {
|
||||||
provider := providers.getProvider(uint(i))
|
provider := providers.getProvider(uint(i))
|
||||||
|
|
||||||
switch state {
|
switch state {
|
||||||
@ -84,7 +86,7 @@ func providerCallback(sourceID *guid.GUID, state ProviderState, level Level, mat
|
|||||||
// different size, it has only pointer-sized arguments, which are then cast to
|
// different size, it has only pointer-sized arguments, which are then cast to
|
||||||
// the appropriate types when calling providerCallback.
|
// the appropriate types when calling providerCallback.
|
||||||
func providerCallbackAdapter(sourceID *guid.GUID, state uintptr, level uintptr, matchAnyKeyword uintptr, matchAllKeyword uintptr, filterData uintptr, i uintptr) uintptr {
|
func providerCallbackAdapter(sourceID *guid.GUID, state uintptr, level uintptr, matchAnyKeyword uintptr, matchAllKeyword uintptr, filterData uintptr, i uintptr) uintptr {
|
||||||
providerCallback(sourceID, ProviderState(state), Level(level), uint64(matchAnyKeyword), uint64(matchAllKeyword), filterData, i)
|
providerCallback(*sourceID, ProviderState(state), Level(level), uint64(matchAnyKeyword), uint64(matchAllKeyword), filterData, i)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,26 +94,27 @@ func providerCallbackAdapter(sourceID *guid.GUID, state uintptr, level uintptr,
|
|||||||
// uses the same algorithm as used by .NET's EventSource class, which is based
|
// uses the same algorithm as used by .NET's EventSource class, which is based
|
||||||
// on RFC 4122. More information on the algorithm can be found here:
|
// on RFC 4122. More information on the algorithm can be found here:
|
||||||
// https://blogs.msdn.microsoft.com/dcook/2015/09/08/etw-provider-names-and-guids/
|
// https://blogs.msdn.microsoft.com/dcook/2015/09/08/etw-provider-names-and-guids/
|
||||||
// The algorithm is roughly:
|
//
|
||||||
// Hash = Sha1(namespace + arg.ToUpper().ToUtf16be())
|
// The algorithm is roughly the RFC 4122 algorithm for a V5 UUID, but differs in
|
||||||
// Guid = Hash[0..15], with Hash[7] tweaked according to RFC 4122
|
// the following ways:
|
||||||
func providerIDFromName(name string) *guid.GUID {
|
// - The input name is first upper-cased, UTF16-encoded, and converted to
|
||||||
|
// big-endian.
|
||||||
|
// - No variant is set on the result UUID.
|
||||||
|
// - The result UUID is treated as being in little-endian format, rather than
|
||||||
|
// big-endian.
|
||||||
|
func providerIDFromName(name string) guid.GUID {
|
||||||
buffer := sha1.New()
|
buffer := sha1.New()
|
||||||
|
namespace := guid.GUID{0x482C2DB2, 0xC390, 0x47C8, [8]byte{0x87, 0xF8, 0x1A, 0x15, 0xBF, 0xC1, 0x30, 0xFB}}
|
||||||
namespace := []byte{0x48, 0x2C, 0x2D, 0xB2, 0xC3, 0x90, 0x47, 0xC8, 0x87, 0xF8, 0x1A, 0x15, 0xBF, 0xC1, 0x30, 0xFB}
|
namespaceBytes := namespace.ToArray()
|
||||||
buffer.Write(namespace)
|
buffer.Write(namespaceBytes[:])
|
||||||
|
|
||||||
binary.Write(buffer, binary.BigEndian, utf16.Encode([]rune(strings.ToUpper(name))))
|
binary.Write(buffer, binary.BigEndian, utf16.Encode([]rune(strings.ToUpper(name))))
|
||||||
|
|
||||||
sum := buffer.Sum(nil)
|
sum := buffer.Sum(nil)
|
||||||
sum[7] = (sum[7] & 0xf) | 0x50
|
sum[7] = (sum[7] & 0xf) | 0x50
|
||||||
|
|
||||||
return &guid.GUID{
|
a := [16]byte{}
|
||||||
Data1: binary.LittleEndian.Uint32(sum[0:4]),
|
copy(a[:], sum)
|
||||||
Data2: binary.LittleEndian.Uint16(sum[4:6]),
|
return guid.FromWindowsArray(a)
|
||||||
Data3: binary.LittleEndian.Uint16(sum[6:8]),
|
|
||||||
Data4: [8]byte{sum[8], sum[9], sum[10], sum[11], sum[12], sum[13], sum[14], sum[15]},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProvider creates and registers a new ETW provider. The provider ID is
|
// NewProvider creates and registers a new ETW provider. The provider ID is
|
||||||
@ -120,49 +123,12 @@ func NewProvider(name string, callback EnableCallback) (provider *Provider, err
|
|||||||
return NewProviderWithID(name, providerIDFromName(name), callback)
|
return NewProviderWithID(name, providerIDFromName(name), callback)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProviderWithID creates and registers a new ETW provider, allowing the
|
|
||||||
// provider ID to be manually specified. This is most useful when there is an
|
|
||||||
// existing provider ID that must be used to conform to existing diagnostic
|
|
||||||
// infrastructure.
|
|
||||||
func NewProviderWithID(name string, id *guid.GUID, callback EnableCallback) (provider *Provider, err error) {
|
|
||||||
providerCallbackOnce.Do(func() {
|
|
||||||
globalProviderCallback = windows.NewCallback(providerCallbackAdapter)
|
|
||||||
})
|
|
||||||
|
|
||||||
provider = providers.newProvider()
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
providers.removeProvider(provider)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
provider.ID = id
|
|
||||||
provider.callback = callback
|
|
||||||
|
|
||||||
if err := eventRegister((*windows.GUID)(provider.ID), globalProviderCallback, uintptr(provider.index), &provider.handle); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
metadata := &bytes.Buffer{}
|
|
||||||
binary.Write(metadata, binary.LittleEndian, uint16(0)) // Write empty size for buffer (to update later)
|
|
||||||
metadata.WriteString(name)
|
|
||||||
metadata.WriteByte(0) // Null terminator for name
|
|
||||||
binary.LittleEndian.PutUint16(metadata.Bytes(), uint16(metadata.Len())) // Update the size at the beginning of the buffer
|
|
||||||
provider.metadata = metadata.Bytes()
|
|
||||||
|
|
||||||
if err := eventSetInformation(
|
|
||||||
provider.handle,
|
|
||||||
eventInfoClassProviderSetTraits,
|
|
||||||
uintptr(unsafe.Pointer(&provider.metadata[0])),
|
|
||||||
uint32(len(provider.metadata))); err != nil {
|
|
||||||
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return provider, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close unregisters the provider.
|
// Close unregisters the provider.
|
||||||
func (provider *Provider) Close() error {
|
func (provider *Provider) Close() error {
|
||||||
|
if provider == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
providers.removeProvider(provider)
|
providers.removeProvider(provider)
|
||||||
return eventUnregister(provider.handle)
|
return eventUnregister(provider.handle)
|
||||||
}
|
}
|
||||||
@ -185,6 +151,10 @@ func (provider *Provider) IsEnabledForLevel(level Level) bool {
|
|||||||
// infrastructure, it can be useful to check if an event will actually be
|
// infrastructure, it can be useful to check if an event will actually be
|
||||||
// consumed before doing expensive work to build the event data.
|
// consumed before doing expensive work to build the event data.
|
||||||
func (provider *Provider) IsEnabledForLevelAndKeywords(level Level, keywords uint64) bool {
|
func (provider *Provider) IsEnabledForLevelAndKeywords(level Level, keywords uint64) bool {
|
||||||
|
if provider == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
if !provider.enabled {
|
if !provider.enabled {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -206,6 +176,10 @@ func (provider *Provider) IsEnabledForLevelAndKeywords(level Level, keywords uin
|
|||||||
// constructed based on the EventOpt and FieldOpt values that are passed as
|
// constructed based on the EventOpt and FieldOpt values that are passed as
|
||||||
// opts.
|
// opts.
|
||||||
func (provider *Provider) WriteEvent(name string, eventOpts []EventOpt, fieldOpts []FieldOpt) error {
|
func (provider *Provider) WriteEvent(name string, eventOpts []EventOpt, fieldOpts []FieldOpt) error {
|
||||||
|
if provider == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
options := eventOptions{descriptor: newEventDescriptor()}
|
options := eventOptions{descriptor: newEventDescriptor()}
|
||||||
em := &eventMetadata{}
|
em := &eventMetadata{}
|
||||||
ed := &eventData{}
|
ed := &eventData{}
|
||||||
@ -246,8 +220,8 @@ func (provider *Provider) WriteEvent(name string, eventOpts []EventOpt, fieldOpt
|
|||||||
// the ETW infrastructure.
|
// the ETW infrastructure.
|
||||||
func (provider *Provider) writeEventRaw(
|
func (provider *Provider) writeEventRaw(
|
||||||
descriptor *eventDescriptor,
|
descriptor *eventDescriptor,
|
||||||
activityID *guid.GUID,
|
activityID guid.GUID,
|
||||||
relatedActivityID *guid.GUID,
|
relatedActivityID guid.GUID,
|
||||||
metadataBlobs [][]byte,
|
metadataBlobs [][]byte,
|
||||||
dataBlobs [][]byte) error {
|
dataBlobs [][]byte) error {
|
||||||
|
|
||||||
@ -262,5 +236,5 @@ func (provider *Provider) writeEventRaw(
|
|||||||
dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeUserData, blob))
|
dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeUserData, blob))
|
||||||
}
|
}
|
||||||
|
|
||||||
return eventWriteTransfer(provider.handle, descriptor, (*windows.GUID)(activityID), (*windows.GUID)(relatedActivityID), dataDescriptorCount, &dataDescriptors[0])
|
return eventWriteTransfer(provider.handle, descriptor, (*windows.GUID)(&activityID), (*windows.GUID)(&relatedActivityID), dataDescriptorCount, &dataDescriptors[0])
|
||||||
}
|
}
|
||||||
|
51
vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go
generated
vendored
Normal file
51
vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
// +build 386 arm
|
||||||
|
|
||||||
|
package etw
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
func low(v providerHandle) uint32 {
|
||||||
|
return uint32(v & 0xffffffff)
|
||||||
|
}
|
||||||
|
|
||||||
|
func high(v providerHandle) uint32 {
|
||||||
|
return low(v >> 32)
|
||||||
|
}
|
||||||
|
|
||||||
|
func eventUnregister(providerHandle providerHandle) (win32err error) {
|
||||||
|
return eventUnregister_32(low(providerHandle), high(providerHandle))
|
||||||
|
}
|
||||||
|
|
||||||
|
func eventWriteTransfer(
|
||||||
|
providerHandle providerHandle,
|
||||||
|
descriptor *eventDescriptor,
|
||||||
|
activityID *windows.GUID,
|
||||||
|
relatedActivityID *windows.GUID,
|
||||||
|
dataDescriptorCount uint32,
|
||||||
|
dataDescriptors *eventDataDescriptor) (win32err error) {
|
||||||
|
|
||||||
|
return eventWriteTransfer_32(
|
||||||
|
low(providerHandle),
|
||||||
|
high(providerHandle),
|
||||||
|
descriptor,
|
||||||
|
activityID,
|
||||||
|
relatedActivityID,
|
||||||
|
dataDescriptorCount,
|
||||||
|
dataDescriptors)
|
||||||
|
}
|
||||||
|
|
||||||
|
func eventSetInformation(
|
||||||
|
providerHandle providerHandle,
|
||||||
|
class eventInfoClass,
|
||||||
|
information uintptr,
|
||||||
|
length uint32) (win32err error) {
|
||||||
|
|
||||||
|
return eventSetInformation_32(
|
||||||
|
low(providerHandle),
|
||||||
|
high(providerHandle),
|
||||||
|
class,
|
||||||
|
information,
|
||||||
|
length)
|
||||||
|
}
|
41
vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go
generated
vendored
Normal file
41
vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
// +build amd64 arm64
|
||||||
|
|
||||||
|
package etw
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
func eventUnregister(providerHandle providerHandle) (win32err error) {
|
||||||
|
return eventUnregister_64(providerHandle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func eventWriteTransfer(
|
||||||
|
providerHandle providerHandle,
|
||||||
|
descriptor *eventDescriptor,
|
||||||
|
activityID *windows.GUID,
|
||||||
|
relatedActivityID *windows.GUID,
|
||||||
|
dataDescriptorCount uint32,
|
||||||
|
dataDescriptors *eventDataDescriptor) (win32err error) {
|
||||||
|
|
||||||
|
return eventWriteTransfer_64(
|
||||||
|
providerHandle,
|
||||||
|
descriptor,
|
||||||
|
activityID,
|
||||||
|
relatedActivityID,
|
||||||
|
dataDescriptorCount,
|
||||||
|
dataDescriptors)
|
||||||
|
}
|
||||||
|
|
||||||
|
func eventSetInformation(
|
||||||
|
providerHandle providerHandle,
|
||||||
|
class eventInfoClass,
|
||||||
|
information uintptr,
|
||||||
|
length uint32) (win32err error) {
|
||||||
|
|
||||||
|
return eventSetInformation_64(
|
||||||
|
providerHandle,
|
||||||
|
class,
|
||||||
|
information,
|
||||||
|
length)
|
||||||
|
}
|
30
vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go
generated
vendored
30
vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go
generated
vendored
@ -53,7 +53,7 @@ func eventRegister(providerId *windows.GUID, callback uintptr, callbackContext u
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func eventUnregister(providerHandle providerHandle) (win32err error) {
|
func eventUnregister_64(providerHandle providerHandle) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall(procEventUnregister.Addr(), 1, uintptr(providerHandle), 0, 0)
|
r0, _, _ := syscall.Syscall(procEventUnregister.Addr(), 1, uintptr(providerHandle), 0, 0)
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
@ -61,7 +61,7 @@ func eventUnregister(providerHandle providerHandle) (win32err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func eventWriteTransfer(providerHandle providerHandle, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) {
|
func eventWriteTransfer_64(providerHandle providerHandle, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall6(procEventWriteTransfer.Addr(), 6, uintptr(providerHandle), uintptr(unsafe.Pointer(descriptor)), uintptr(unsafe.Pointer(activityID)), uintptr(unsafe.Pointer(relatedActivityID)), uintptr(dataDescriptorCount), uintptr(unsafe.Pointer(dataDescriptors)))
|
r0, _, _ := syscall.Syscall6(procEventWriteTransfer.Addr(), 6, uintptr(providerHandle), uintptr(unsafe.Pointer(descriptor)), uintptr(unsafe.Pointer(activityID)), uintptr(unsafe.Pointer(relatedActivityID)), uintptr(dataDescriptorCount), uintptr(unsafe.Pointer(dataDescriptors)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
@ -69,10 +69,34 @@ func eventWriteTransfer(providerHandle providerHandle, descriptor *eventDescript
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func eventSetInformation(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) {
|
func eventSetInformation_64(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall6(procEventSetInformation.Addr(), 4, uintptr(providerHandle), uintptr(class), uintptr(information), uintptr(length), 0, 0)
|
r0, _, _ := syscall.Syscall6(procEventSetInformation.Addr(), 4, uintptr(providerHandle), uintptr(class), uintptr(information), uintptr(length), 0, 0)
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func eventUnregister_32(providerHandle_low uint32, providerHandle_high uint32) (win32err error) {
|
||||||
|
r0, _, _ := syscall.Syscall(procEventUnregister.Addr(), 2, uintptr(providerHandle_low), uintptr(providerHandle_high), 0)
|
||||||
|
if r0 != 0 {
|
||||||
|
win32err = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func eventWriteTransfer_32(providerHandle_low uint32, providerHandle_high uint32, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) {
|
||||||
|
r0, _, _ := syscall.Syscall9(procEventWriteTransfer.Addr(), 7, uintptr(providerHandle_low), uintptr(providerHandle_high), uintptr(unsafe.Pointer(descriptor)), uintptr(unsafe.Pointer(activityID)), uintptr(unsafe.Pointer(relatedActivityID)), uintptr(dataDescriptorCount), uintptr(unsafe.Pointer(dataDescriptors)), 0, 0)
|
||||||
|
if r0 != 0 {
|
||||||
|
win32err = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func eventSetInformation_32(providerHandle_low uint32, providerHandle_high uint32, class eventInfoClass, information uintptr, length uint32) (win32err error) {
|
||||||
|
r0, _, _ := syscall.Syscall6(procEventSetInformation.Addr(), 5, uintptr(providerHandle_low), uintptr(providerHandle_high), uintptr(class), uintptr(information), uintptr(length), 0)
|
||||||
|
if r0 != 0 {
|
||||||
|
win32err = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
47
vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go
generated
vendored
47
vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go
generated
vendored
@ -1,6 +1,8 @@
|
|||||||
package etwlogrus
|
package etwlogrus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sort"
|
||||||
|
|
||||||
"github.com/Microsoft/go-winio/pkg/etw"
|
"github.com/Microsoft/go-winio/pkg/etw"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
@ -31,15 +33,7 @@ func NewHookFromProvider(provider *etw.Provider) (*Hook, error) {
|
|||||||
// Levels returns the set of levels that this hook wants to receive log entries
|
// Levels returns the set of levels that this hook wants to receive log entries
|
||||||
// for.
|
// for.
|
||||||
func (h *Hook) Levels() []logrus.Level {
|
func (h *Hook) Levels() []logrus.Level {
|
||||||
return []logrus.Level{
|
return logrus.AllLevels
|
||||||
logrus.TraceLevel,
|
|
||||||
logrus.DebugLevel,
|
|
||||||
logrus.InfoLevel,
|
|
||||||
logrus.WarnLevel,
|
|
||||||
logrus.ErrorLevel,
|
|
||||||
logrus.FatalLevel,
|
|
||||||
logrus.PanicLevel,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var logrusToETWLevelMap = map[logrus.Level]etw.Level{
|
var logrusToETWLevelMap = map[logrus.Level]etw.Level{
|
||||||
@ -62,19 +56,42 @@ func (h *Hook) Fire(e *logrus.Entry) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reserve extra space for the message field.
|
// Sort the fields by name so they are consistent in each instance
|
||||||
fields := make([]etw.FieldOpt, 0, len(e.Data)+1)
|
// of an event. Otherwise, the fields don't line up in WPA.
|
||||||
|
names := make([]string, 0, len(e.Data))
|
||||||
|
hasError := false
|
||||||
|
for k := range e.Data {
|
||||||
|
if k == logrus.ErrorKey {
|
||||||
|
// Always put the error last because it is optional in some events.
|
||||||
|
hasError = true
|
||||||
|
} else {
|
||||||
|
names = append(names, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
|
||||||
|
// Reserve extra space for the message and time fields.
|
||||||
|
fields := make([]etw.FieldOpt, 0, len(e.Data)+2)
|
||||||
fields = append(fields, etw.StringField("Message", e.Message))
|
fields = append(fields, etw.StringField("Message", e.Message))
|
||||||
|
fields = append(fields, etw.Time("Time", e.Time))
|
||||||
for k, v := range e.Data {
|
for _, k := range names {
|
||||||
fields = append(fields, etw.SmartField(k, v))
|
fields = append(fields, etw.SmartField(k, e.Data[k]))
|
||||||
|
}
|
||||||
|
if hasError {
|
||||||
|
fields = append(fields, etw.SmartField(logrus.ErrorKey, e.Data[logrus.ErrorKey]))
|
||||||
}
|
}
|
||||||
|
|
||||||
return h.provider.WriteEvent(
|
// Firing an ETW event is essentially best effort, as the event write can
|
||||||
|
// fail for reasons completely out of the control of the event writer (such
|
||||||
|
// as a session listening for the event having no available space in its
|
||||||
|
// buffers). Therefore, we don't return the error from WriteEvent, as it is
|
||||||
|
// just noise in many cases.
|
||||||
|
h.provider.WriteEvent(
|
||||||
"LogrusEntry",
|
"LogrusEntry",
|
||||||
etw.WithEventOpts(etw.WithLevel(level)),
|
etw.WithEventOpts(etw.WithLevel(level)),
|
||||||
fields)
|
fields)
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close cleans up the hook and closes the ETW provider. If the provder was
|
// Close cleans up the hook and closes the ETW provider. If the provder was
|
||||||
|
191
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
191
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
@ -1,19 +1,43 @@
|
|||||||
|
// Package guid provides a GUID type. The backing structure for a GUID is
|
||||||
|
// identical to that used by the golang.org/x/sys/windows GUID type.
|
||||||
|
// There are two main binary encodings used for a GUID, the big-endian encoding,
|
||||||
|
// and the Windows (mixed-endian) encoding. See here for details:
|
||||||
|
// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding
|
||||||
package guid
|
package guid
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = (json.Marshaler)(&GUID{})
|
// Variant specifies which GUID variant (or "type") of the GUID. It determines
|
||||||
var _ = (json.Unmarshaler)(&GUID{})
|
// how the entirety of the rest of the GUID is interpreted.
|
||||||
|
type Variant uint8
|
||||||
|
|
||||||
|
// The variants specified by RFC 4122.
|
||||||
|
const (
|
||||||
|
// VariantUnknown specifies a GUID variant which does not conform to one of
|
||||||
|
// the variant encodings specified in RFC 4122.
|
||||||
|
VariantUnknown Variant = iota
|
||||||
|
VariantNCS
|
||||||
|
VariantRFC4122
|
||||||
|
VariantMicrosoft
|
||||||
|
VariantFuture
|
||||||
|
)
|
||||||
|
|
||||||
|
// Version specifies how the bits in the GUID were generated. For instance, a
|
||||||
|
// version 4 GUID is randomly generated, and a version 5 is generated from the
|
||||||
|
// hash of an input string.
|
||||||
|
type Version uint8
|
||||||
|
|
||||||
|
var _ = (encoding.TextMarshaler)(GUID{})
|
||||||
|
var _ = (encoding.TextUnmarshaler)(&GUID{})
|
||||||
|
|
||||||
// GUID represents a GUID/UUID. It has the same structure as
|
// GUID represents a GUID/UUID. It has the same structure as
|
||||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||||
@ -23,24 +47,83 @@ var _ = (json.Unmarshaler)(&GUID{})
|
|||||||
type GUID windows.GUID
|
type GUID windows.GUID
|
||||||
|
|
||||||
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
|
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
|
||||||
func NewV4() (*GUID, error) {
|
func NewV4() (GUID, error) {
|
||||||
var b [16]byte
|
var b [16]byte
|
||||||
if _, err := rand.Read(b[:]); err != nil {
|
if _, err := rand.Read(b[:]); err != nil {
|
||||||
return nil, err
|
return GUID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var g GUID
|
g := FromArray(b)
|
||||||
g.Data1 = binary.LittleEndian.Uint32(b[0:4])
|
g.setVersion(4) // Version 4 means randomly generated.
|
||||||
g.Data2 = binary.LittleEndian.Uint16(b[4:6])
|
g.setVariant(VariantRFC4122)
|
||||||
g.Data3 = binary.LittleEndian.Uint16(b[6:8])
|
|
||||||
copy(g.Data4[:], b[8:16])
|
|
||||||
|
|
||||||
g.Data3 = (g.Data3 & 0x0fff) | 0x4000 // Version 4 (randomly generated)
|
return g, nil
|
||||||
g.Data4[0] = (g.Data4[0] & 0x3f) | 0x80 // RFC4122 variant
|
|
||||||
return &g, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *GUID) String() string {
|
// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing)
|
||||||
|
// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name,
|
||||||
|
// and the sample code treats it as a series of bytes, so we do the same here.
|
||||||
|
//
|
||||||
|
// Some implementations, such as those found on Windows, treat the name as a
|
||||||
|
// big-endian UTF16 stream of bytes. If that is desired, the string can be
|
||||||
|
// encoded as such before being passed to this function.
|
||||||
|
func NewV5(namespace GUID, name []byte) (GUID, error) {
|
||||||
|
b := sha1.New()
|
||||||
|
namespaceBytes := namespace.ToArray()
|
||||||
|
b.Write(namespaceBytes[:])
|
||||||
|
b.Write(name)
|
||||||
|
|
||||||
|
a := [16]byte{}
|
||||||
|
copy(a[:], b.Sum(nil))
|
||||||
|
|
||||||
|
g := FromArray(a)
|
||||||
|
g.setVersion(5) // Version 5 means generated from a string.
|
||||||
|
g.setVariant(VariantRFC4122)
|
||||||
|
|
||||||
|
return g, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromArray(b [16]byte, order binary.ByteOrder) GUID {
|
||||||
|
var g GUID
|
||||||
|
g.Data1 = order.Uint32(b[0:4])
|
||||||
|
g.Data2 = order.Uint16(b[4:6])
|
||||||
|
g.Data3 = order.Uint16(b[6:8])
|
||||||
|
copy(g.Data4[:], b[8:16])
|
||||||
|
return g
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g GUID) toArray(order binary.ByteOrder) [16]byte {
|
||||||
|
b := [16]byte{}
|
||||||
|
order.PutUint32(b[0:4], g.Data1)
|
||||||
|
order.PutUint16(b[4:6], g.Data2)
|
||||||
|
order.PutUint16(b[6:8], g.Data3)
|
||||||
|
copy(b[8:16], g.Data4[:])
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromArray constructs a GUID from a big-endian encoding array of 16 bytes.
|
||||||
|
func FromArray(b [16]byte) GUID {
|
||||||
|
return fromArray(b, binary.BigEndian)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToArray returns an array of 16 bytes representing the GUID in big-endian
|
||||||
|
// encoding.
|
||||||
|
func (g GUID) ToArray() [16]byte {
|
||||||
|
return g.toArray(binary.BigEndian)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromWindowsArray constructs a GUID from a Windows encoding array of bytes.
|
||||||
|
func FromWindowsArray(b [16]byte) GUID {
|
||||||
|
return fromArray(b, binary.LittleEndian)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows
|
||||||
|
// encoding.
|
||||||
|
func (g GUID) ToWindowsArray() [16]byte {
|
||||||
|
return g.toArray(binary.LittleEndian)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g GUID) String() string {
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
"%08x-%04x-%04x-%04x-%012x",
|
"%08x-%04x-%04x-%04x-%012x",
|
||||||
g.Data1,
|
g.Data1,
|
||||||
@ -53,58 +136,100 @@ func (g *GUID) String() string {
|
|||||||
// FromString parses a string containing a GUID and returns the GUID. The only
|
// FromString parses a string containing a GUID and returns the GUID. The only
|
||||||
// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
|
// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
|
||||||
// format.
|
// format.
|
||||||
func FromString(s string) (*GUID, error) {
|
func FromString(s string) (GUID, error) {
|
||||||
if len(s) != 36 {
|
if len(s) != 36 {
|
||||||
return nil, errors.New("invalid GUID format (length)")
|
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||||
}
|
}
|
||||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||||
return nil, errors.New("invalid GUID format (dashes)")
|
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||||
}
|
}
|
||||||
|
|
||||||
var g GUID
|
var g GUID
|
||||||
|
|
||||||
data1, err := strconv.ParseUint(s[0:8], 16, 32)
|
data1, err := strconv.ParseUint(s[0:8], 16, 32)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "invalid GUID format (Data1)")
|
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||||
}
|
}
|
||||||
g.Data1 = uint32(data1)
|
g.Data1 = uint32(data1)
|
||||||
|
|
||||||
data2, err := strconv.ParseUint(s[9:13], 16, 16)
|
data2, err := strconv.ParseUint(s[9:13], 16, 16)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "invalid GUID format (Data2)")
|
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||||
}
|
}
|
||||||
g.Data2 = uint16(data2)
|
g.Data2 = uint16(data2)
|
||||||
|
|
||||||
data3, err := strconv.ParseUint(s[14:18], 16, 16)
|
data3, err := strconv.ParseUint(s[14:18], 16, 16)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "invalid GUID format (Data3)")
|
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||||
}
|
}
|
||||||
g.Data3 = uint16(data3)
|
g.Data3 = uint16(data3)
|
||||||
|
|
||||||
for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} {
|
for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} {
|
||||||
v, err := strconv.ParseUint(s[x:x+2], 16, 8)
|
v, err := strconv.ParseUint(s[x:x+2], 16, 8)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "invalid GUID format (Data4)")
|
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||||
}
|
}
|
||||||
g.Data4[i] = uint8(v)
|
g.Data4[i] = uint8(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &g, nil
|
return g, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON marshals the GUID to JSON representation and returns it as a
|
func (g *GUID) setVariant(v Variant) {
|
||||||
// slice of bytes.
|
d := g.Data4[0]
|
||||||
func (g *GUID) MarshalJSON() ([]byte, error) {
|
switch v {
|
||||||
return json.Marshal(g.String())
|
case VariantNCS:
|
||||||
|
d = (d & 0x7f)
|
||||||
|
case VariantRFC4122:
|
||||||
|
d = (d & 0x3f) | 0x80
|
||||||
|
case VariantMicrosoft:
|
||||||
|
d = (d & 0x1f) | 0xc0
|
||||||
|
case VariantFuture:
|
||||||
|
d = (d & 0x0f) | 0xe0
|
||||||
|
case VariantUnknown:
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("invalid variant: %d", v))
|
||||||
|
}
|
||||||
|
g.Data4[0] = d
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalJSON unmarshals a GUID from JSON representation and sets itself to
|
// Variant returns the GUID variant, as defined in RFC 4122.
|
||||||
// the unmarshaled GUID.
|
func (g GUID) Variant() Variant {
|
||||||
func (g *GUID) UnmarshalJSON(data []byte) error {
|
b := g.Data4[0]
|
||||||
g2, err := FromString(strings.Trim(string(data), "\""))
|
if b&0x80 == 0 {
|
||||||
|
return VariantNCS
|
||||||
|
} else if b&0xc0 == 0x80 {
|
||||||
|
return VariantRFC4122
|
||||||
|
} else if b&0xe0 == 0xc0 {
|
||||||
|
return VariantMicrosoft
|
||||||
|
} else if b&0xe0 == 0xe0 {
|
||||||
|
return VariantFuture
|
||||||
|
}
|
||||||
|
return VariantUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GUID) setVersion(v Version) {
|
||||||
|
g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version returns the GUID version, as defined in RFC 4122.
|
||||||
|
func (g GUID) Version() Version {
|
||||||
|
return Version((g.Data3 & 0xF000) >> 12)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText returns the textual representation of the GUID.
|
||||||
|
func (g GUID) MarshalText() ([]byte, error) {
|
||||||
|
return []byte(g.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText takes the textual representation of a GUID, and unmarhals it
|
||||||
|
// into this GUID.
|
||||||
|
func (g *GUID) UnmarshalText(text []byte) error {
|
||||||
|
g2, err := FromString(string(text))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
*g = *g2
|
*g = g2
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
2
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
@ -1,3 +1,3 @@
|
|||||||
package winio
|
package winio
|
||||||
|
|
||||||
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go
|
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
|
||||||
|
33
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
33
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
@ -38,6 +38,7 @@ func errnoErr(e syscall.Errno) error {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||||
|
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
|
||||||
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||||
|
|
||||||
@ -45,6 +46,7 @@ var (
|
|||||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||||
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
||||||
|
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
|
||||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||||
@ -73,6 +75,7 @@ var (
|
|||||||
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
|
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
|
||||||
procBackupRead = modkernel32.NewProc("BackupRead")
|
procBackupRead = modkernel32.NewProc("BackupRead")
|
||||||
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
||||||
|
procbind = modws2_32.NewProc("bind")
|
||||||
)
|
)
|
||||||
|
|
||||||
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
|
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||||
@ -124,6 +127,24 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
|
||||||
|
var _p0 uint32
|
||||||
|
if wait {
|
||||||
|
_p0 = 1
|
||||||
|
} else {
|
||||||
|
_p0 = 0
|
||||||
|
}
|
||||||
|
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
|
||||||
|
if r1 == 0 {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = errnoErr(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
|
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
@ -527,3 +548,15 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
|
||||||
|
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
|
||||||
|
if r1 == socketError {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = errnoErr(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
1
vendor/github.com/containerd/cgroups/blkio.go
generated
vendored
1
vendor/github.com/containerd/cgroups/blkio.go
generated
vendored
@ -86,6 +86,7 @@ func (b *blkioController) Stat(path string, stats *Metrics) error {
|
|||||||
}
|
}
|
||||||
// Try to read CFQ stats available on all CFQ enabled kernels first
|
// Try to read CFQ stats available on all CFQ enabled kernels first
|
||||||
if _, err := os.Lstat(filepath.Join(b.Path(path), fmt.Sprintf("blkio.io_serviced_recursive"))); err == nil {
|
if _, err := os.Lstat(filepath.Join(b.Path(path), fmt.Sprintf("blkio.io_serviced_recursive"))); err == nil {
|
||||||
|
settings = []blkioStatSettings{}
|
||||||
settings = append(settings,
|
settings = append(settings,
|
||||||
blkioStatSettings{
|
blkioStatSettings{
|
||||||
name: "sectors_recursive",
|
name: "sectors_recursive",
|
||||||
|
3
vendor/github.com/containerd/cgroups/cgroup.go
generated
vendored
3
vendor/github.com/containerd/cgroups/cgroup.go
generated
vendored
@ -497,6 +497,9 @@ func (c *cgroup) MoveTo(destination Cgroup) error {
|
|||||||
}
|
}
|
||||||
for _, p := range processes {
|
for _, p := range processes {
|
||||||
if err := destination.Add(p); err != nil {
|
if err := destination.Add(p); err != nil {
|
||||||
|
if strings.Contains(err.Error(), "no such process") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/containerd/cgroups/memory.go
generated
vendored
4
vendor/github.com/containerd/cgroups/memory.go
generated
vendored
@ -281,6 +281,10 @@ func getMemorySettings(resources *specs.LinuxResources) []memorySettings {
|
|||||||
name: "limit_in_bytes",
|
name: "limit_in_bytes",
|
||||||
value: mem.Limit,
|
value: mem.Limit,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "soft_limit_in_bytes",
|
||||||
|
value: mem.Reservation,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "memsw.limit_in_bytes",
|
name: "memsw.limit_in_bytes",
|
||||||
value: mem.Swap,
|
value: mem.Swap,
|
||||||
|
654
vendor/github.com/containerd/cgroups/metrics.pb.go
generated
vendored
654
vendor/github.com/containerd/cgroups/metrics.pb.go
generated
vendored
@ -1,6 +1,5 @@
|
|||||||
// Code generated by protoc-gen-gogo.
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
// source: github.com/containerd/cgroups/metrics.proto
|
// source: github.com/containerd/cgroups/metrics.proto
|
||||||
// DO NOT EDIT!
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package cgroups is a generated protocol buffer package.
|
Package cgroups is a generated protocol buffer package.
|
||||||
@ -21,6 +20,7 @@
|
|||||||
BlkIOEntry
|
BlkIOEntry
|
||||||
RdmaStat
|
RdmaStat
|
||||||
RdmaEntry
|
RdmaEntry
|
||||||
|
NetworkStat
|
||||||
*/
|
*/
|
||||||
package cgroups
|
package cgroups
|
||||||
|
|
||||||
@ -52,6 +52,7 @@ type Metrics struct {
|
|||||||
Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory" json:"memory,omitempty"`
|
Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory" json:"memory,omitempty"`
|
||||||
Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio" json:"blkio,omitempty"`
|
Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio" json:"blkio,omitempty"`
|
||||||
Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma" json:"rdma,omitempty"`
|
Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma" json:"rdma,omitempty"`
|
||||||
|
Network []*NetworkStat `protobuf:"bytes,7,rep,name=network" json:"network,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Metrics) Reset() { *m = Metrics{} }
|
func (m *Metrics) Reset() { *m = Metrics{} }
|
||||||
@ -209,6 +210,22 @@ func (m *RdmaEntry) Reset() { *m = RdmaEntry{} }
|
|||||||
func (*RdmaEntry) ProtoMessage() {}
|
func (*RdmaEntry) ProtoMessage() {}
|
||||||
func (*RdmaEntry) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{11} }
|
func (*RdmaEntry) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{11} }
|
||||||
|
|
||||||
|
type NetworkStat struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"`
|
||||||
|
RxPackets uint64 `protobuf:"varint,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"`
|
||||||
|
RxErrors uint64 `protobuf:"varint,4,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"`
|
||||||
|
RxDropped uint64 `protobuf:"varint,5,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"`
|
||||||
|
TxBytes uint64 `protobuf:"varint,6,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"`
|
||||||
|
TxPackets uint64 `protobuf:"varint,7,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"`
|
||||||
|
TxErrors uint64 `protobuf:"varint,8,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"`
|
||||||
|
TxDropped uint64 `protobuf:"varint,9,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NetworkStat) Reset() { *m = NetworkStat{} }
|
||||||
|
func (*NetworkStat) ProtoMessage() {}
|
||||||
|
func (*NetworkStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{12} }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics")
|
proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics")
|
||||||
proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat")
|
proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat")
|
||||||
@ -222,6 +239,7 @@ func init() {
|
|||||||
proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry")
|
proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry")
|
||||||
proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat")
|
proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat")
|
||||||
proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry")
|
proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry")
|
||||||
|
proto.RegisterType((*NetworkStat)(nil), "io.containerd.cgroups.v1.NetworkStat")
|
||||||
}
|
}
|
||||||
func (m *Metrics) Marshal() (dAtA []byte, err error) {
|
func (m *Metrics) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
@ -300,6 +318,18 @@ func (m *Metrics) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
}
|
}
|
||||||
i += n5
|
i += n5
|
||||||
}
|
}
|
||||||
|
if len(m.Network) > 0 {
|
||||||
|
for _, msg := range m.Network {
|
||||||
|
dAtA[i] = 0x3a
|
||||||
|
i++
|
||||||
|
i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
|
||||||
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n
|
||||||
|
}
|
||||||
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -389,21 +419,21 @@ func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size()))
|
i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size()))
|
||||||
n5, err := m.Usage.MarshalTo(dAtA[i:])
|
n6, err := m.Usage.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n5
|
i += n6
|
||||||
}
|
}
|
||||||
if m.Throttling != nil {
|
if m.Throttling != nil {
|
||||||
dAtA[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintMetrics(dAtA, i, uint64(m.Throttling.Size()))
|
i = encodeVarintMetrics(dAtA, i, uint64(m.Throttling.Size()))
|
||||||
n6, err := m.Throttling.MarshalTo(dAtA[i:])
|
n7, err := m.Throttling.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n6
|
i += n7
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
@ -439,21 +469,21 @@ func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
i = encodeVarintMetrics(dAtA, i, uint64(m.User))
|
i = encodeVarintMetrics(dAtA, i, uint64(m.User))
|
||||||
}
|
}
|
||||||
if len(m.PerCPU) > 0 {
|
if len(m.PerCPU) > 0 {
|
||||||
dAtA8 := make([]byte, len(m.PerCPU)*10)
|
dAtA9 := make([]byte, len(m.PerCPU)*10)
|
||||||
var j7 int
|
var j8 int
|
||||||
for _, num := range m.PerCPU {
|
for _, num := range m.PerCPU {
|
||||||
for num >= 1<<7 {
|
for num >= 1<<7 {
|
||||||
dAtA8[j7] = uint8(uint64(num)&0x7f | 0x80)
|
dAtA9[j8] = uint8(uint64(num)&0x7f | 0x80)
|
||||||
num >>= 7
|
num >>= 7
|
||||||
j7++
|
j8++
|
||||||
}
|
}
|
||||||
dAtA8[j7] = uint8(num)
|
dAtA9[j8] = uint8(num)
|
||||||
j7++
|
j8++
|
||||||
}
|
}
|
||||||
dAtA[i] = 0x22
|
dAtA[i] = 0x22
|
||||||
i++
|
i++
|
||||||
i = encodeVarintMetrics(dAtA, i, uint64(j7))
|
i = encodeVarintMetrics(dAtA, i, uint64(j8))
|
||||||
i += copy(dAtA[i:], dAtA8[:j7])
|
i += copy(dAtA[i:], dAtA9[:j8])
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
@ -706,11 +736,11 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0x2
|
dAtA[i] = 0x2
|
||||||
i++
|
i++
|
||||||
i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size()))
|
i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size()))
|
||||||
n9, err := m.Usage.MarshalTo(dAtA[i:])
|
n10, err := m.Usage.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n9
|
i += n10
|
||||||
}
|
}
|
||||||
if m.Swap != nil {
|
if m.Swap != nil {
|
||||||
dAtA[i] = 0x92
|
dAtA[i] = 0x92
|
||||||
@ -718,11 +748,11 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0x2
|
dAtA[i] = 0x2
|
||||||
i++
|
i++
|
||||||
i = encodeVarintMetrics(dAtA, i, uint64(m.Swap.Size()))
|
i = encodeVarintMetrics(dAtA, i, uint64(m.Swap.Size()))
|
||||||
n10, err := m.Swap.MarshalTo(dAtA[i:])
|
n11, err := m.Swap.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n10
|
i += n11
|
||||||
}
|
}
|
||||||
if m.Kernel != nil {
|
if m.Kernel != nil {
|
||||||
dAtA[i] = 0x9a
|
dAtA[i] = 0x9a
|
||||||
@ -730,11 +760,11 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0x2
|
dAtA[i] = 0x2
|
||||||
i++
|
i++
|
||||||
i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel.Size()))
|
i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel.Size()))
|
||||||
n11, err := m.Kernel.MarshalTo(dAtA[i:])
|
n12, err := m.Kernel.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n11
|
i += n12
|
||||||
}
|
}
|
||||||
if m.KernelTCP != nil {
|
if m.KernelTCP != nil {
|
||||||
dAtA[i] = 0xa2
|
dAtA[i] = 0xa2
|
||||||
@ -742,11 +772,11 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0x2
|
dAtA[i] = 0x2
|
||||||
i++
|
i++
|
||||||
i = encodeVarintMetrics(dAtA, i, uint64(m.KernelTCP.Size()))
|
i = encodeVarintMetrics(dAtA, i, uint64(m.KernelTCP.Size()))
|
||||||
n12, err := m.KernelTCP.MarshalTo(dAtA[i:])
|
n13, err := m.KernelTCP.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n12
|
i += n13
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
@ -766,7 +796,6 @@ func (m *MemoryEntry) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
|
|
||||||
if m.Limit != 0 {
|
if m.Limit != 0 {
|
||||||
dAtA[i] = 0x8
|
dAtA[i] = 0x8
|
||||||
i++
|
i++
|
||||||
@ -1025,24 +1054,70 @@ func (m *RdmaEntry) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeFixed64Metrics(dAtA []byte, offset int, v uint64) int {
|
func (m *NetworkStat) Marshal() (dAtA []byte, err error) {
|
||||||
dAtA[offset] = uint8(v)
|
size := m.Size()
|
||||||
dAtA[offset+1] = uint8(v >> 8)
|
dAtA = make([]byte, size)
|
||||||
dAtA[offset+2] = uint8(v >> 16)
|
n, err := m.MarshalTo(dAtA)
|
||||||
dAtA[offset+3] = uint8(v >> 24)
|
if err != nil {
|
||||||
dAtA[offset+4] = uint8(v >> 32)
|
return nil, err
|
||||||
dAtA[offset+5] = uint8(v >> 40)
|
}
|
||||||
dAtA[offset+6] = uint8(v >> 48)
|
return dAtA[:n], nil
|
||||||
dAtA[offset+7] = uint8(v >> 56)
|
|
||||||
return offset + 8
|
|
||||||
}
|
}
|
||||||
func encodeFixed32Metrics(dAtA []byte, offset int, v uint32) int {
|
|
||||||
dAtA[offset] = uint8(v)
|
func (m *NetworkStat) MarshalTo(dAtA []byte) (int, error) {
|
||||||
dAtA[offset+1] = uint8(v >> 8)
|
var i int
|
||||||
dAtA[offset+2] = uint8(v >> 16)
|
_ = i
|
||||||
dAtA[offset+3] = uint8(v >> 24)
|
var l int
|
||||||
return offset + 4
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
if m.RxBytes != 0 {
|
||||||
|
dAtA[i] = 0x10
|
||||||
|
i++
|
||||||
|
i = encodeVarintMetrics(dAtA, i, uint64(m.RxBytes))
|
||||||
|
}
|
||||||
|
if m.RxPackets != 0 {
|
||||||
|
dAtA[i] = 0x18
|
||||||
|
i++
|
||||||
|
i = encodeVarintMetrics(dAtA, i, uint64(m.RxPackets))
|
||||||
|
}
|
||||||
|
if m.RxErrors != 0 {
|
||||||
|
dAtA[i] = 0x20
|
||||||
|
i++
|
||||||
|
i = encodeVarintMetrics(dAtA, i, uint64(m.RxErrors))
|
||||||
|
}
|
||||||
|
if m.RxDropped != 0 {
|
||||||
|
dAtA[i] = 0x28
|
||||||
|
i++
|
||||||
|
i = encodeVarintMetrics(dAtA, i, uint64(m.RxDropped))
|
||||||
|
}
|
||||||
|
if m.TxBytes != 0 {
|
||||||
|
dAtA[i] = 0x30
|
||||||
|
i++
|
||||||
|
i = encodeVarintMetrics(dAtA, i, uint64(m.TxBytes))
|
||||||
|
}
|
||||||
|
if m.TxPackets != 0 {
|
||||||
|
dAtA[i] = 0x38
|
||||||
|
i++
|
||||||
|
i = encodeVarintMetrics(dAtA, i, uint64(m.TxPackets))
|
||||||
|
}
|
||||||
|
if m.TxErrors != 0 {
|
||||||
|
dAtA[i] = 0x40
|
||||||
|
i++
|
||||||
|
i = encodeVarintMetrics(dAtA, i, uint64(m.TxErrors))
|
||||||
|
}
|
||||||
|
if m.TxDropped != 0 {
|
||||||
|
dAtA[i] = 0x48
|
||||||
|
i++
|
||||||
|
i = encodeVarintMetrics(dAtA, i, uint64(m.TxDropped))
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
|
func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
@ -1081,6 +1156,12 @@ func (m *Metrics) Size() (n int) {
|
|||||||
l = m.Rdma.Size()
|
l = m.Rdma.Size()
|
||||||
n += 1 + l + sovMetrics(uint64(l))
|
n += 1 + l + sovMetrics(uint64(l))
|
||||||
}
|
}
|
||||||
|
if len(m.Network) > 0 {
|
||||||
|
for _, e := range m.Network {
|
||||||
|
l = e.Size()
|
||||||
|
n += 1 + l + sovMetrics(uint64(l))
|
||||||
|
}
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1413,6 +1494,40 @@ func (m *RdmaEntry) Size() (n int) {
|
|||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *NetworkStat) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovMetrics(uint64(l))
|
||||||
|
}
|
||||||
|
if m.RxBytes != 0 {
|
||||||
|
n += 1 + sovMetrics(uint64(m.RxBytes))
|
||||||
|
}
|
||||||
|
if m.RxPackets != 0 {
|
||||||
|
n += 1 + sovMetrics(uint64(m.RxPackets))
|
||||||
|
}
|
||||||
|
if m.RxErrors != 0 {
|
||||||
|
n += 1 + sovMetrics(uint64(m.RxErrors))
|
||||||
|
}
|
||||||
|
if m.RxDropped != 0 {
|
||||||
|
n += 1 + sovMetrics(uint64(m.RxDropped))
|
||||||
|
}
|
||||||
|
if m.TxBytes != 0 {
|
||||||
|
n += 1 + sovMetrics(uint64(m.TxBytes))
|
||||||
|
}
|
||||||
|
if m.TxPackets != 0 {
|
||||||
|
n += 1 + sovMetrics(uint64(m.TxPackets))
|
||||||
|
}
|
||||||
|
if m.TxErrors != 0 {
|
||||||
|
n += 1 + sovMetrics(uint64(m.TxErrors))
|
||||||
|
}
|
||||||
|
if m.TxDropped != 0 {
|
||||||
|
n += 1 + sovMetrics(uint64(m.TxDropped))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
func sovMetrics(x uint64) (n int) {
|
func sovMetrics(x uint64) (n int) {
|
||||||
for {
|
for {
|
||||||
n++
|
n++
|
||||||
@ -1437,6 +1552,7 @@ func (this *Metrics) String() string {
|
|||||||
`Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "MemoryStat", "MemoryStat", 1) + `,`,
|
`Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "MemoryStat", "MemoryStat", 1) + `,`,
|
||||||
`Blkio:` + strings.Replace(fmt.Sprintf("%v", this.Blkio), "BlkIOStat", "BlkIOStat", 1) + `,`,
|
`Blkio:` + strings.Replace(fmt.Sprintf("%v", this.Blkio), "BlkIOStat", "BlkIOStat", 1) + `,`,
|
||||||
`Rdma:` + strings.Replace(fmt.Sprintf("%v", this.Rdma), "RdmaStat", "RdmaStat", 1) + `,`,
|
`Rdma:` + strings.Replace(fmt.Sprintf("%v", this.Rdma), "RdmaStat", "RdmaStat", 1) + `,`,
|
||||||
|
`Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "NetworkStat", "NetworkStat", 1) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
@ -1613,6 +1729,24 @@ func (this *RdmaEntry) String() string {
|
|||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
func (this *NetworkStat) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&NetworkStat{`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`RxBytes:` + fmt.Sprintf("%v", this.RxBytes) + `,`,
|
||||||
|
`RxPackets:` + fmt.Sprintf("%v", this.RxPackets) + `,`,
|
||||||
|
`RxErrors:` + fmt.Sprintf("%v", this.RxErrors) + `,`,
|
||||||
|
`RxDropped:` + fmt.Sprintf("%v", this.RxDropped) + `,`,
|
||||||
|
`TxBytes:` + fmt.Sprintf("%v", this.TxBytes) + `,`,
|
||||||
|
`TxPackets:` + fmt.Sprintf("%v", this.TxPackets) + `,`,
|
||||||
|
`TxErrors:` + fmt.Sprintf("%v", this.TxErrors) + `,`,
|
||||||
|
`TxDropped:` + fmt.Sprintf("%v", this.TxDropped) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
func valueToStringMetrics(v interface{}) string {
|
func valueToStringMetrics(v interface{}) string {
|
||||||
rv := reflect.ValueOf(v)
|
rv := reflect.ValueOf(v)
|
||||||
if rv.IsNil() {
|
if rv.IsNil() {
|
||||||
@ -1624,7 +1758,6 @@ func valueToStringMetrics(v interface{}) string {
|
|||||||
func (m *Metrics) Unmarshal(dAtA []byte) error {
|
func (m *Metrics) Unmarshal(dAtA []byte) error {
|
||||||
l := len(dAtA)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
|
|
||||||
for iNdEx < l {
|
for iNdEx < l {
|
||||||
preIndex := iNdEx
|
preIndex := iNdEx
|
||||||
var wire uint64
|
var wire uint64
|
||||||
@ -1847,6 +1980,37 @@ func (m *Metrics) Unmarshal(dAtA []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 7:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthMetrics
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Network = append(m.Network, &NetworkStat{})
|
||||||
|
if err := m.Network[len(m.Network)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipMetrics(dAtA[iNdEx:])
|
skippy, err := skipMetrics(dAtA[iNdEx:])
|
||||||
@ -4092,7 +4256,237 @@ func (m *RdmaEntry) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
func (m *NetworkStat) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: NetworkStat: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: NetworkStat: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthMetrics
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field RxBytes", wireType)
|
||||||
|
}
|
||||||
|
m.RxBytes = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.RxBytes |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 3:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field RxPackets", wireType)
|
||||||
|
}
|
||||||
|
m.RxPackets = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.RxPackets |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 4:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field RxErrors", wireType)
|
||||||
|
}
|
||||||
|
m.RxErrors = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.RxErrors |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 5:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field RxDropped", wireType)
|
||||||
|
}
|
||||||
|
m.RxDropped = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.RxDropped |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 6:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field TxBytes", wireType)
|
||||||
|
}
|
||||||
|
m.TxBytes = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.TxBytes |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 7:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field TxPackets", wireType)
|
||||||
|
}
|
||||||
|
m.TxPackets = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.TxPackets |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 8:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field TxErrors", wireType)
|
||||||
|
}
|
||||||
|
m.TxErrors = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.TxErrors |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 9:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field TxDropped", wireType)
|
||||||
|
}
|
||||||
|
m.TxDropped = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.TxDropped |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipMetrics(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthMetrics
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
func skipMetrics(dAtA []byte) (n int, err error) {
|
func skipMetrics(dAtA []byte) (n int, err error) {
|
||||||
l := len(dAtA)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
@ -4201,88 +4595,102 @@ var (
|
|||||||
func init() { proto.RegisterFile("github.com/containerd/cgroups/metrics.proto", fileDescriptorMetrics) }
|
func init() { proto.RegisterFile("github.com/containerd/cgroups/metrics.proto", fileDescriptorMetrics) }
|
||||||
|
|
||||||
var fileDescriptorMetrics = []byte{
|
var fileDescriptorMetrics = []byte{
|
||||||
// 1325 bytes of a gzipped FileDescriptorProto
|
// 1549 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x57, 0x4d, 0x6f, 0x1b, 0xb7,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x57, 0x4d, 0x6f, 0x1b, 0xb7,
|
||||||
0x16, 0x8d, 0xac, 0xb1, 0x3e, 0xae, 0x6c, 0xc7, 0xa6, 0x13, 0x67, 0xec, 0x97, 0x27, 0x29, 0xb2,
|
0x16, 0x8d, 0x2c, 0xd9, 0xd2, 0x5c, 0xd9, 0x8e, 0x4d, 0x27, 0xce, 0xd8, 0x49, 0x2c, 0x47, 0xb6,
|
||||||
0xfd, 0x9e, 0x5b, 0x03, 0x32, 0x9a, 0x02, 0x41, 0x93, 0xa6, 0x28, 0x22, 0xb7, 0x41, 0x83, 0xd6,
|
0xdf, 0xf3, 0x7b, 0x06, 0x64, 0xbc, 0x3c, 0x20, 0x68, 0xd2, 0x04, 0x45, 0xe4, 0x24, 0x48, 0xd0,
|
||||||
0x88, 0x32, 0xb2, 0x91, 0x76, 0x35, 0x18, 0x8d, 0x98, 0x31, 0xe3, 0xd1, 0x70, 0xc2, 0xe1, 0xc8,
|
0xba, 0x51, 0x46, 0x36, 0xd2, 0xae, 0x06, 0xd4, 0x88, 0x19, 0xd1, 0x96, 0x86, 0x13, 0x0e, 0xc7,
|
||||||
0x71, 0x57, 0xdd, 0xf5, 0x37, 0xf5, 0x1f, 0x64, 0xd9, 0x4d, 0x81, 0x76, 0x63, 0x34, 0xfa, 0x25,
|
0x96, 0xbb, 0xea, 0xa2, 0x40, 0x57, 0xfd, 0x33, 0xfd, 0x15, 0x59, 0x76, 0x53, 0xa0, 0xdd, 0x18,
|
||||||
0x05, 0x2f, 0xe7, 0x4b, 0x49, 0xdc, 0x40, 0xbb, 0xb9, 0xbc, 0xe7, 0x1c, 0x5e, 0x5e, 0x1e, 0x8a,
|
0x8d, 0x7e, 0x49, 0x41, 0x72, 0x3e, 0xa8, 0x24, 0x8e, 0xab, 0xdd, 0x90, 0x3c, 0xe7, 0xdc, 0xcb,
|
||||||
0x14, 0xec, 0x7b, 0x4c, 0x9e, 0xc6, 0xc3, 0xae, 0xcb, 0xc7, 0x07, 0x2e, 0x0f, 0xa4, 0xc3, 0x02,
|
0x3b, 0x87, 0xc3, 0x3b, 0xb0, 0xe3, 0x53, 0xd1, 0x8b, 0x3b, 0x0d, 0x8f, 0x0d, 0x76, 0x3d, 0x16,
|
||||||
0x2a, 0x46, 0x07, 0xae, 0x27, 0x78, 0x1c, 0x46, 0x07, 0x63, 0x2a, 0x05, 0x73, 0xa3, 0x6e, 0x28,
|
0x08, 0x4c, 0x03, 0xc2, 0xbb, 0xbb, 0x9e, 0xcf, 0x59, 0x1c, 0x46, 0xbb, 0x03, 0x22, 0x38, 0xf5,
|
||||||
0xb8, 0xe4, 0xc4, 0x64, 0xbc, 0x9b, 0x83, 0xba, 0x09, 0xa8, 0x3b, 0xf9, 0x6c, 0xeb, 0x86, 0xc7,
|
0xa2, 0x46, 0xc8, 0x99, 0x60, 0xc8, 0xa6, 0xac, 0x91, 0x83, 0x1a, 0x09, 0xa8, 0x71, 0xf2, 0xbf,
|
||||||
0x3d, 0x8e, 0xa0, 0x03, 0xf5, 0xa5, 0xf1, 0x9d, 0xdf, 0x16, 0xa0, 0x7a, 0xa4, 0x15, 0xc8, 0xd7,
|
0xd5, 0x6b, 0x3e, 0xf3, 0x99, 0x02, 0xed, 0xca, 0x27, 0x8d, 0xaf, 0xff, 0x5a, 0x84, 0xf2, 0xbe,
|
||||||
0x50, 0x3d, 0x8d, 0x3d, 0x2a, 0xfd, 0xa1, 0x59, 0x6a, 0x97, 0xf7, 0x1a, 0x77, 0x77, 0xbb, 0x57,
|
0x56, 0x40, 0x5f, 0x41, 0xb9, 0x17, 0xfb, 0x44, 0xf4, 0x3b, 0x76, 0x61, 0xbd, 0xb8, 0x5d, 0xbd,
|
||||||
0xa9, 0x75, 0xbf, 0xd3, 0xc0, 0x81, 0x74, 0xa4, 0x95, 0xb2, 0xc8, 0x3d, 0x30, 0x42, 0x36, 0x8a,
|
0xbb, 0xd5, 0xb8, 0x48, 0xad, 0xf1, 0x5c, 0x03, 0xdb, 0x02, 0x0b, 0x27, 0x65, 0xa1, 0x7b, 0x50,
|
||||||
0xcc, 0x85, 0x76, 0x69, 0xaf, 0x71, 0xb7, 0x73, 0x35, 0xbb, 0xcf, 0x46, 0x11, 0x52, 0x11, 0x4f,
|
0x0a, 0x69, 0x37, 0xb2, 0xa7, 0xd6, 0x0b, 0xdb, 0xd5, 0xbb, 0xf5, 0x8b, 0xd9, 0x2d, 0xda, 0x8d,
|
||||||
0x1e, 0x42, 0xd9, 0x0d, 0x63, 0xb3, 0x8c, 0xb4, 0x3b, 0x57, 0xd3, 0x0e, 0xfb, 0x27, 0x8a, 0xd5,
|
0x14, 0x55, 0xe1, 0xd1, 0x43, 0x28, 0x7a, 0x61, 0x6c, 0x17, 0x15, 0xed, 0xce, 0xc5, 0xb4, 0xbd,
|
||||||
0xab, 0x4e, 0x2f, 0x5b, 0xe5, 0xc3, 0xfe, 0x89, 0xa5, 0x68, 0xe4, 0x21, 0x54, 0xc6, 0x74, 0xcc,
|
0xd6, 0xa1, 0x64, 0x35, 0xcb, 0xa3, 0xf3, 0x5a, 0x71, 0xaf, 0x75, 0xe8, 0x48, 0x1a, 0x7a, 0x08,
|
||||||
0xc5, 0x85, 0x69, 0xa0, 0xc0, 0xce, 0xd5, 0x02, 0x47, 0x88, 0xc3, 0x99, 0x13, 0x0e, 0xb9, 0x0f,
|
0x33, 0x03, 0x32, 0x60, 0xfc, 0xcc, 0x2e, 0x29, 0x81, 0xcd, 0x8b, 0x05, 0xf6, 0x15, 0x4e, 0x45,
|
||||||
0x8b, 0x43, 0xff, 0x8c, 0x71, 0x73, 0x11, 0xc9, 0xdb, 0x57, 0x93, 0x7b, 0xfe, 0xd9, 0x93, 0xa7,
|
0x4e, 0x38, 0xe8, 0x3e, 0x4c, 0x77, 0xfa, 0xc7, 0x94, 0xd9, 0xd3, 0x8a, 0xbc, 0x71, 0x31, 0xb9,
|
||||||
0xc8, 0xd5, 0x8c, 0xce, 0x19, 0x34, 0x0a, 0x6d, 0x20, 0x37, 0x60, 0x31, 0x8e, 0x1c, 0x8f, 0x9a,
|
0xd9, 0x3f, 0x7e, 0xf1, 0x52, 0x71, 0x35, 0x43, 0x6e, 0x97, 0x77, 0x07, 0xd8, 0x9e, 0xb9, 0x6c,
|
||||||
0xa5, 0x76, 0x69, 0xcf, 0xb0, 0x74, 0x40, 0x56, 0xa1, 0x3c, 0x76, 0x5e, 0x63, 0x4b, 0x0c, 0x4b,
|
0xbb, 0x4e, 0x77, 0x80, 0xf5, 0x76, 0x25, 0x5e, 0xd6, 0x39, 0x20, 0xe2, 0x94, 0xf1, 0x63, 0xbb,
|
||||||
0x7d, 0x12, 0x13, 0xaa, 0x2f, 0x1c, 0xe6, 0xbb, 0x81, 0xc4, 0x15, 0x1b, 0x56, 0x1a, 0x92, 0x2d,
|
0x7c, 0x59, 0x9d, 0xbf, 0xd5, 0x40, 0x5d, 0xe7, 0x84, 0x55, 0x3f, 0x86, 0xaa, 0x51, 0x7f, 0x74,
|
||||||
0xa8, 0x85, 0x8e, 0x47, 0x23, 0xf6, 0x33, 0xc5, 0xb5, 0xd4, 0xad, 0x2c, 0xee, 0x3c, 0x80, 0x5a,
|
0x0d, 0xa6, 0xe3, 0x08, 0xfb, 0xc4, 0x2e, 0xac, 0x17, 0xb6, 0x4b, 0x8e, 0x1e, 0xa0, 0x05, 0x28,
|
||||||
0xda, 0x35, 0xa5, 0xe0, 0xc6, 0x42, 0xd0, 0x40, 0x26, 0x73, 0xa5, 0xa1, 0xaa, 0xc1, 0x67, 0x63,
|
0x0e, 0xf0, 0x50, 0xbd, 0x8b, 0x92, 0x23, 0x1f, 0x91, 0x0d, 0xe5, 0x37, 0x98, 0xf6, 0xbd, 0x40,
|
||||||
0x26, 0x93, 0xf9, 0x74, 0xd0, 0xf9, 0xb5, 0x04, 0xd5, 0xa4, 0x77, 0xe4, 0x8b, 0x62, 0x95, 0xff,
|
0xa8, 0x52, 0x97, 0x9c, 0x74, 0x88, 0x56, 0xa1, 0x12, 0x62, 0x9f, 0x44, 0xf4, 0x07, 0xa2, 0x8a,
|
||||||
0xba, 0x49, 0x87, 0xfd, 0x93, 0x13, 0x85, 0x4c, 0x57, 0xd2, 0x03, 0x90, 0xa7, 0x82, 0x4b, 0xe9,
|
0x68, 0x39, 0xd9, 0xb8, 0xfe, 0x00, 0x2a, 0xe9, 0xeb, 0x92, 0x0a, 0x5e, 0xcc, 0x39, 0x09, 0x44,
|
||||||
0xb3, 0xc0, 0xfb, 0xf8, 0x1e, 0x1f, 0x6b, 0x2c, 0xb5, 0x0a, 0xac, 0xce, 0x2b, 0xa8, 0xa5, 0xb2,
|
0x12, 0x2b, 0x1d, 0xca, 0x1c, 0xfa, 0x74, 0x40, 0x45, 0x12, 0x4f, 0x0f, 0xea, 0x3f, 0x17, 0xa0,
|
||||||
0xaa, 0x56, 0xc9, 0xa5, 0xe3, 0xa7, 0xfd, 0xc2, 0x80, 0x6c, 0x40, 0xe5, 0x8c, 0x8a, 0x80, 0xfa,
|
0x9c, 0xbc, 0x34, 0xf4, 0x85, 0x99, 0xe5, 0x67, 0xcb, 0xb5, 0xd7, 0x3a, 0x3c, 0x94, 0xc8, 0x74,
|
||||||
0xc9, 0x12, 0x92, 0x88, 0x10, 0x30, 0xe2, 0x88, 0x8a, 0xa4, 0x65, 0xf8, 0x4d, 0xb6, 0xa1, 0x1a,
|
0x27, 0x4d, 0x00, 0xd1, 0xe3, 0x4c, 0x88, 0x3e, 0x0d, 0xfc, 0xcb, 0xcd, 0x75, 0xa0, 0xb1, 0xc4,
|
||||||
0x52, 0x61, 0x2b, 0xef, 0x18, 0xed, 0xf2, 0x9e, 0xd1, 0x83, 0xe9, 0x65, 0xab, 0xd2, 0xa7, 0x42,
|
0x31, 0x58, 0xf5, 0xb7, 0x50, 0x49, 0x65, 0x65, 0xae, 0x82, 0x09, 0xdc, 0x4f, 0xeb, 0xa5, 0x06,
|
||||||
0x79, 0xa3, 0x12, 0x52, 0x71, 0x18, 0xc6, 0x9d, 0xd7, 0x50, 0x4b, 0x4b, 0x51, 0x8d, 0x0b, 0xa9,
|
0x68, 0x19, 0x66, 0x8e, 0x09, 0x0f, 0x48, 0x3f, 0xd9, 0x42, 0x32, 0x42, 0x08, 0x4a, 0x71, 0x44,
|
||||||
0x60, 0x7c, 0x14, 0xa5, 0x8d, 0x4b, 0x42, 0xb2, 0x0f, 0x6b, 0x49, 0x99, 0x74, 0x64, 0xa7, 0x18,
|
0x78, 0x52, 0x32, 0xf5, 0x8c, 0x36, 0xa0, 0x1c, 0x12, 0xee, 0x4a, 0xd3, 0x96, 0xd6, 0x8b, 0xdb,
|
||||||
0x5d, 0xc1, 0x6a, 0x96, 0xe8, 0x27, 0xe0, 0x5d, 0x58, 0xc9, 0xc1, 0x92, 0x8d, 0x69, 0x52, 0xd5,
|
0xa5, 0x26, 0x8c, 0xce, 0x6b, 0x33, 0x2d, 0xc2, 0xa5, 0x29, 0x67, 0x42, 0xc2, 0xf7, 0xc2, 0xb8,
|
||||||
0x72, 0x36, 0x7a, 0xcc, 0xc6, 0xb4, 0xf3, 0x57, 0x03, 0x20, 0x77, 0x9c, 0x5a, 0xaf, 0xeb, 0xb8,
|
0x3e, 0x84, 0x4a, 0x9a, 0x8a, 0x2c, 0x5c, 0x48, 0x38, 0x65, 0xdd, 0x28, 0x2d, 0x5c, 0x32, 0x44,
|
||||||
0xa7, 0x99, 0x3f, 0x30, 0x20, 0x9b, 0x50, 0x16, 0x51, 0x32, 0x95, 0x36, 0xb6, 0x35, 0x18, 0x58,
|
0x3b, 0xb0, 0x98, 0xa4, 0x49, 0xba, 0x6e, 0x8a, 0xd1, 0x19, 0x2c, 0x64, 0x0b, 0xad, 0x04, 0xbc,
|
||||||
0x6a, 0x8c, 0xfc, 0x0f, 0x6a, 0x22, 0x8a, 0x6c, 0x75, 0xba, 0xf4, 0x04, 0xbd, 0xc6, 0xf4, 0xb2,
|
0x05, 0xf3, 0x39, 0x58, 0xd0, 0x01, 0x49, 0xb2, 0x9a, 0xcb, 0x66, 0x0f, 0xe8, 0x80, 0xd4, 0xff,
|
||||||
0x55, 0xb5, 0x06, 0x03, 0x65, 0x3b, 0xab, 0x2a, 0xa2, 0x48, 0x7d, 0x90, 0x16, 0x34, 0xc6, 0x4e,
|
0xac, 0x02, 0xe4, 0x56, 0x97, 0xfb, 0xf5, 0xb0, 0xd7, 0xcb, 0xfc, 0xa1, 0x06, 0x68, 0x05, 0x8a,
|
||||||
0x18, 0xd2, 0x91, 0xfd, 0x82, 0xf9, 0xda, 0x39, 0x86, 0x05, 0x7a, 0xe8, 0x31, 0xf3, 0xb1, 0xd3,
|
0x3c, 0x4a, 0x42, 0xe9, 0x13, 0xe5, 0xb4, 0xdb, 0x8e, 0x9c, 0x43, 0xff, 0x82, 0x0a, 0x8f, 0x22,
|
||||||
0x23, 0x26, 0xe4, 0x05, 0x7a, 0xdc, 0xb0, 0x74, 0x40, 0x6e, 0x43, 0xfd, 0x5c, 0x30, 0x49, 0x87,
|
0x57, 0x1e, 0x6b, 0x1d, 0xa0, 0x59, 0x1d, 0x9d, 0xd7, 0xca, 0x4e, 0xbb, 0x2d, 0x6d, 0xe7, 0x94,
|
||||||
0x8e, 0x7b, 0x66, 0x56, 0x30, 0x93, 0x0f, 0x10, 0x13, 0x6a, 0xa1, 0x67, 0x87, 0x9e, 0xcd, 0x02,
|
0x79, 0x14, 0xc9, 0x07, 0x54, 0x83, 0xea, 0x00, 0x87, 0x21, 0xe9, 0xba, 0x6f, 0x68, 0x5f, 0x3b,
|
||||||
0xb3, 0xaa, 0x77, 0x22, 0xf4, 0xfa, 0xde, 0x93, 0x80, 0x6c, 0x41, 0x5d, 0x67, 0x78, 0x2c, 0xcd,
|
0xa7, 0xe4, 0x80, 0x9e, 0x7a, 0x46, 0xfb, 0xaa, 0xd2, 0x5d, 0xca, 0xc5, 0x99, 0x3a, 0x5c, 0x25,
|
||||||
0x5a, 0xd2, 0x46, 0xaf, 0xef, 0x3d, 0x8d, 0x25, 0xd9, 0x44, 0xd6, 0x0b, 0x27, 0xf6, 0xa5, 0x59,
|
0x47, 0x0f, 0xd0, 0x2d, 0xb0, 0x4e, 0x39, 0x15, 0xa4, 0x83, 0xbd, 0x63, 0x75, 0x78, 0x4a, 0x4e,
|
||||||
0x4f, 0x53, 0x8f, 0x55, 0x48, 0xda, 0xb0, 0x14, 0x7a, 0xf6, 0xd8, 0x79, 0x99, 0xa4, 0x41, 0x97,
|
0x3e, 0x81, 0x6c, 0xa8, 0x84, 0xbe, 0x1b, 0xfa, 0x2e, 0x0d, 0xec, 0xb2, 0x7e, 0x13, 0xa1, 0xdf,
|
||||||
0x19, 0x7a, 0x47, 0xce, 0x4b, 0x8d, 0xd8, 0x86, 0x65, 0x16, 0x38, 0xae, 0x64, 0x13, 0x6a, 0x3b,
|
0xf2, 0x5f, 0x04, 0x68, 0x15, 0x2c, 0xbd, 0xc2, 0x62, 0x61, 0x57, 0x92, 0x32, 0xfa, 0x2d, 0xff,
|
||||||
0x01, 0x0f, 0xcc, 0x06, 0x42, 0x96, 0xd2, 0xc1, 0x47, 0x01, 0x0f, 0xd4, 0x62, 0x8b, 0x90, 0x25,
|
0x65, 0x2c, 0xd0, 0x8a, 0x62, 0xbd, 0xc1, 0x71, 0x5f, 0xd8, 0x56, 0xba, 0xf4, 0x4c, 0x0e, 0xd1,
|
||||||
0xad, 0x52, 0x00, 0x14, 0x55, 0xb0, 0x1f, 0xcb, 0xb3, 0x2a, 0xd8, 0x91, 0x5c, 0x05, 0x21, 0x2b,
|
0x3a, 0xcc, 0x86, 0xbe, 0x3b, 0xc0, 0x47, 0xc9, 0x32, 0xe8, 0x34, 0x43, 0x7f, 0x1f, 0x1f, 0x69,
|
||||||
0x45, 0x15, 0x04, 0xb4, 0xa1, 0x11, 0x07, 0x74, 0xc2, 0x5c, 0xe9, 0x0c, 0x7d, 0x6a, 0x5e, 0x47,
|
0xc4, 0x06, 0xcc, 0xd1, 0x00, 0x7b, 0x82, 0x9e, 0x10, 0x17, 0x07, 0x2c, 0xb0, 0xab, 0x0a, 0x32,
|
||||||
0x40, 0x71, 0x88, 0x3c, 0x80, 0xcd, 0x53, 0x46, 0x85, 0x23, 0xdc, 0x53, 0xe6, 0x3a, 0xbe, 0xad,
|
0x9b, 0x4e, 0x3e, 0x0e, 0x58, 0x20, 0x37, 0x6b, 0x42, 0x66, 0xb5, 0x8a, 0x01, 0x30, 0x55, 0x54,
|
||||||
0x7f, 0x4f, 0x6c, 0x7d, 0xfc, 0x56, 0x11, 0x7f, 0xab, 0x08, 0xd0, 0x4e, 0xf8, 0x41, 0xa5, 0xc9,
|
0x3d, 0xe6, 0xc6, 0x55, 0x54, 0x45, 0x72, 0x15, 0x05, 0x99, 0x37, 0x55, 0x14, 0x60, 0x1d, 0xaa,
|
||||||
0x3d, 0x98, 0x49, 0xd9, 0xd1, 0xb9, 0x13, 0x26, 0xcc, 0x35, 0x64, 0xde, 0x2c, 0xa6, 0x07, 0xe7,
|
0x71, 0x40, 0x4e, 0xa8, 0x27, 0x70, 0xa7, 0x4f, 0xec, 0xab, 0x0a, 0x60, 0x4e, 0xa1, 0x07, 0xb0,
|
||||||
0x4e, 0xa8, 0x79, 0x2d, 0x68, 0xe0, 0x29, 0xb1, 0xb5, 0x91, 0x88, 0x2e, 0x1b, 0x87, 0x0e, 0xd1,
|
0xd2, 0xa3, 0x84, 0x63, 0xee, 0xf5, 0xa8, 0x87, 0xfb, 0xae, 0xfe, 0x90, 0xb9, 0xfa, 0xf8, 0x2d,
|
||||||
0x4d, 0x9f, 0x40, 0x5d, 0x03, 0x94, 0xa7, 0xd6, 0xd1, 0x33, 0x4b, 0xd3, 0xcb, 0x56, 0xed, 0x58,
|
0x28, 0xfc, 0x0d, 0x13, 0xa0, 0x9d, 0xf0, 0x8d, 0x5c, 0x46, 0xf7, 0x60, 0x6c, 0xc9, 0x8d, 0x4e,
|
||||||
0x0d, 0x2a, 0x63, 0xd5, 0x30, 0x6d, 0x45, 0x11, 0xb9, 0x07, 0x2b, 0x19, 0x54, 0x7b, 0xec, 0x06,
|
0x71, 0x98, 0x30, 0x17, 0x15, 0xf3, 0xba, 0xb9, 0xdc, 0x3e, 0xc5, 0xa1, 0xe6, 0xd5, 0xa0, 0xaa,
|
||||||
0xe2, 0x57, 0xa7, 0x97, 0xad, 0xa5, 0x14, 0x8f, 0x46, 0x5b, 0x4a, 0x39, 0xe8, 0xb6, 0x4f, 0x61,
|
0x4e, 0x89, 0xab, 0x8d, 0x84, 0x74, 0xda, 0x6a, 0x6a, 0x4f, 0xb9, 0xe9, 0x3f, 0x60, 0x69, 0x80,
|
||||||
0x4d, 0xf3, 0x8a, 0x9e, 0xbb, 0x89, 0x95, 0x5c, 0xc7, 0xc4, 0x51, 0x6e, 0xbc, 0xac, 0x5e, 0x6d,
|
0xf4, 0xd4, 0x92, 0xf2, 0xcc, 0xec, 0xe8, 0xbc, 0x56, 0x39, 0x90, 0x93, 0xd2, 0x58, 0x15, 0xb5,
|
||||||
0xbf, 0x8d, 0x42, 0xbd, 0xdf, 0xa0, 0x07, 0xff, 0x0f, 0x9a, 0x63, 0xe7, 0x4e, 0xbc, 0x85, 0x20,
|
0xec, 0x44, 0x11, 0xba, 0x07, 0xf3, 0x19, 0x54, 0x7b, 0xec, 0x9a, 0xc2, 0x2f, 0x8c, 0xce, 0x6b,
|
||||||
0x5d, 0xdb, 0xf3, 0xcc, 0x8e, 0xdb, 0x69, 0xb5, 0x99, 0x29, 0x4d, 0xbd, 0x25, 0x38, 0xda, 0xd7,
|
0xb3, 0x29, 0x5e, 0x19, 0x6d, 0x36, 0xe5, 0x28, 0xb7, 0xfd, 0x17, 0x16, 0x35, 0xcf, 0xf4, 0xdc,
|
||||||
0xce, 0xdc, 0x4d, 0xd5, 0x72, 0x7f, 0x6e, 0xea, 0xcd, 0xcf, 0x50, 0xca, 0xa4, 0x3b, 0x05, 0x2d,
|
0x75, 0x95, 0xc9, 0x55, 0xb5, 0xb0, 0x9f, 0x1b, 0x2f, 0xcb, 0x57, 0xdb, 0x6f, 0xd9, 0xc8, 0xf7,
|
||||||
0xed, 0xc5, 0xad, 0x19, 0x94, 0x76, 0xe3, 0x3e, 0x90, 0x0c, 0x95, 0xbb, 0xf6, 0x3f, 0x85, 0x85,
|
0x89, 0xf2, 0xe0, 0xbf, 0x41, 0x73, 0xdc, 0xdc, 0x89, 0x37, 0x14, 0x48, 0xe7, 0xf6, 0x3a, 0xb3,
|
||||||
0xf6, 0x73, 0xeb, 0x76, 0x61, 0x5d, 0x83, 0x67, 0x0d, 0x7c, 0x1b, 0xd1, 0xba, 0x5f, 0x4f, 0x8a,
|
0xe3, 0x46, 0x9a, 0x6d, 0x66, 0x4a, 0x5b, 0xbf, 0x12, 0x35, 0xdb, 0xd2, 0xce, 0xdc, 0x4a, 0xd5,
|
||||||
0x2e, 0xce, 0x9a, 0x58, 0x44, 0xff, 0xb7, 0xa0, 0xfd, 0x28, 0xc7, 0xbe, 0xaf, 0x8d, 0x2d, 0x6f,
|
0x72, 0x7f, 0xae, 0xe8, 0x97, 0x9f, 0xa1, 0xa4, 0x49, 0x37, 0x0d, 0x2d, 0xed, 0xc5, 0xd5, 0x31,
|
||||||
0x7e, 0x40, 0x1b, 0x9b, 0xfe, 0xae, 0x36, 0xa2, 0x5b, 0xef, 0x69, 0x23, 0x76, 0x3f, 0xc5, 0x16,
|
0x94, 0x76, 0xe3, 0x0e, 0xa0, 0x0c, 0x95, 0xbb, 0xf6, 0xa6, 0xb1, 0xd1, 0x56, 0x6e, 0xdd, 0x06,
|
||||||
0xcd, 0xde, 0x4e, 0x7e, 0xf6, 0x54, 0xe2, 0xa4, 0xe0, 0xf8, 0x2f, 0xd3, 0xab, 0xe3, 0x0e, 0xfe,
|
0x2c, 0x69, 0xf0, 0xb8, 0x81, 0x6f, 0x29, 0xb4, 0xae, 0xd7, 0x0b, 0xd3, 0xc5, 0x59, 0x11, 0x4d,
|
||||||
0xf6, 0xef, 0x7e, 0xec, 0x9e, 0xfd, 0x36, 0x90, 0xe2, 0x22, 0xbd, 0x3d, 0xee, 0x83, 0xa1, 0x5c,
|
0xf4, 0x6d, 0x43, 0xfb, 0x71, 0x8e, 0xfd, 0x58, 0x5b, 0x95, 0x7c, 0xed, 0x13, 0xda, 0xaa, 0xe8,
|
||||||
0x6e, 0x76, 0xe6, 0xe1, 0x22, 0x85, 0x7c, 0x95, 0x5d, 0x09, 0xdb, 0xf3, 0x90, 0xd3, 0x9b, 0x63,
|
0x1f, 0x6a, 0x2b, 0x74, 0xed, 0x23, 0x6d, 0x85, 0xdd, 0x49, 0xb1, 0xa6, 0xd9, 0xd7, 0x93, 0xcf,
|
||||||
0x00, 0xa0, 0xbf, 0x6c, 0xe9, 0x86, 0xe6, 0xce, 0x1c, 0x12, 0xbd, 0xe5, 0xe9, 0x65, 0xab, 0xfe,
|
0x9e, 0x5c, 0x38, 0x34, 0x1c, 0xff, 0x65, 0x7a, 0x75, 0xdc, 0x51, 0xdf, 0xfe, 0xad, 0xcb, 0x2e,
|
||||||
0x3d, 0x92, 0x8f, 0x0f, 0xfb, 0x56, 0x5d, 0xeb, 0x1c, 0xbb, 0x61, 0x87, 0x42, 0xa3, 0x00, 0xcc,
|
0xf8, 0xa7, 0x81, 0xe0, 0x67, 0xe9, 0xed, 0x71, 0x1f, 0x4a, 0xd2, 0xe5, 0x76, 0x7d, 0x12, 0xae,
|
||||||
0xef, 0xdd, 0x52, 0xe1, 0xde, 0xcd, 0x5f, 0x04, 0x0b, 0x1f, 0x78, 0x11, 0x94, 0x3f, 0xf8, 0x22,
|
0xa2, 0xa0, 0x47, 0xd9, 0x95, 0xb0, 0x31, 0x09, 0x39, 0xbd, 0x39, 0xda, 0x00, 0xfa, 0xc9, 0x15,
|
||||||
0x30, 0x66, 0x5e, 0x04, 0x9d, 0x3f, 0x16, 0xa1, 0x9e, 0xbd, 0x3b, 0x88, 0x03, 0x5b, 0x8c, 0xdb,
|
0x5e, 0x68, 0x6f, 0x4e, 0x20, 0xd1, 0x9c, 0x1b, 0x9d, 0xd7, 0xac, 0xaf, 0x15, 0xf9, 0x60, 0xaf,
|
||||||
0x11, 0x15, 0x13, 0xe6, 0x52, 0x7b, 0x78, 0x21, 0x69, 0x64, 0x0b, 0xea, 0xc6, 0x22, 0x62, 0x13,
|
0xe5, 0x58, 0x5a, 0xe7, 0xc0, 0x0b, 0xeb, 0x04, 0xaa, 0x06, 0x30, 0xbf, 0x77, 0x0b, 0xc6, 0xbd,
|
||||||
0x9a, 0xbc, 0xd9, 0x76, 0x3e, 0xf2, 0x80, 0xd1, 0xbd, 0xb9, 0xc5, 0xf8, 0x40, 0xcb, 0xf4, 0x94,
|
0x9b, 0x77, 0x04, 0x53, 0x9f, 0xe8, 0x08, 0x8a, 0x9f, 0xec, 0x08, 0x4a, 0x63, 0x1d, 0x41, 0xfd,
|
||||||
0x8a, 0x95, 0x8a, 0x90, 0x1f, 0xe1, 0x66, 0x3e, 0xc5, 0xa8, 0xa0, 0xbe, 0x30, 0x87, 0xfa, 0x7a,
|
0xf7, 0x69, 0xb0, 0xb2, 0x86, 0x07, 0x61, 0x58, 0xa5, 0xcc, 0x8d, 0x08, 0x3f, 0xa1, 0x1e, 0x71,
|
||||||
0xa6, 0x3e, 0xca, 0x95, 0x8f, 0x61, 0x9d, 0x71, 0xfb, 0x55, 0x4c, 0xe3, 0x19, 0xdd, 0xf2, 0x1c,
|
0x3b, 0x67, 0x82, 0x44, 0x2e, 0x27, 0x5e, 0xcc, 0x23, 0x7a, 0x42, 0x92, 0x66, 0x71, 0xf3, 0x92,
|
||||||
0xba, 0x6b, 0x8c, 0x3f, 0x43, 0x7e, 0xae, 0x6a, 0xc3, 0x66, 0xa1, 0x25, 0xea, 0x2e, 0x2e, 0x68,
|
0xce, 0x49, 0xd7, 0xe6, 0x06, 0x65, 0x6d, 0x2d, 0xd3, 0x94, 0x2a, 0x4e, 0x2a, 0x82, 0xbe, 0x83,
|
||||||
0x1b, 0x73, 0x68, 0x6f, 0x64, 0x35, 0xab, 0xbb, 0x3b, 0x9f, 0xe0, 0x27, 0xd8, 0x60, 0xdc, 0x3e,
|
0xeb, 0x79, 0x88, 0xae, 0xa1, 0x3e, 0x35, 0x81, 0xfa, 0x52, 0xa6, 0xde, 0xcd, 0x95, 0x0f, 0x60,
|
||||||
0x77, 0x98, 0x7c, 0x57, 0x7d, 0x71, 0xbe, 0x8e, 0x3c, 0x77, 0x98, 0x9c, 0x95, 0xd6, 0x1d, 0x19,
|
0x89, 0x32, 0xf7, 0x6d, 0x4c, 0xe2, 0x31, 0xdd, 0xe2, 0x04, 0xba, 0x8b, 0x94, 0xbd, 0x52, 0xfc,
|
||||||
0x53, 0xe1, 0xcd, 0x74, 0xa4, 0x32, 0x5f, 0x47, 0x8e, 0x90, 0x9f, 0xab, 0xf6, 0x61, 0x8d, 0xf1,
|
0x5c, 0xd5, 0x85, 0x15, 0xa3, 0x24, 0xf2, 0x2e, 0x36, 0xb4, 0x4b, 0x13, 0x68, 0x2f, 0x67, 0x39,
|
||||||
0x77, 0x6b, 0xad, 0xce, 0xa1, 0x79, 0x9d, 0xf1, 0xd9, 0x3a, 0x9f, 0xc1, 0x5a, 0x44, 0x5d, 0xc9,
|
0xcb, 0xbb, 0x3b, 0x0f, 0xf0, 0x3d, 0x2c, 0x53, 0xe6, 0x9e, 0x62, 0x2a, 0x3e, 0x54, 0x9f, 0x9e,
|
||||||
0x45, 0xd1, 0x6d, 0xb5, 0x39, 0x14, 0x57, 0x13, 0x7a, 0x26, 0xd9, 0x99, 0x00, 0xe4, 0x79, 0xb2,
|
0xac, 0x22, 0xaf, 0x31, 0x15, 0xe3, 0xd2, 0xba, 0x22, 0x03, 0xc2, 0xfd, 0xb1, 0x8a, 0xcc, 0x4c,
|
||||||
0x02, 0x0b, 0x3c, 0xc4, 0xa3, 0x53, 0xb7, 0x16, 0x78, 0xa8, 0xde, 0x80, 0x23, 0xf5, 0xb3, 0xa3,
|
0x56, 0x91, 0x7d, 0xc5, 0xcf, 0x55, 0x5b, 0xb0, 0x48, 0xd9, 0x87, 0xb9, 0x96, 0x27, 0xd0, 0xbc,
|
||||||
0x0f, 0x4e, 0xdd, 0x4a, 0x22, 0x75, 0x9e, 0xc6, 0xce, 0x4b, 0x9e, 0x3e, 0x02, 0x75, 0x80, 0xa3,
|
0x4a, 0xd9, 0x78, 0x9e, 0xaf, 0x60, 0x31, 0x22, 0x9e, 0x60, 0xdc, 0x74, 0x5b, 0x65, 0x02, 0xc5,
|
||||||
0x2c, 0xe0, 0x22, 0x39, 0x3b, 0x3a, 0x50, 0xa3, 0x13, 0xc7, 0x8f, 0x69, 0xfa, 0xe6, 0xc1, 0xa0,
|
0x85, 0x84, 0x9e, 0x49, 0xd6, 0x4f, 0x00, 0xf2, 0x75, 0x34, 0x0f, 0x53, 0x2c, 0x54, 0x47, 0xc7,
|
||||||
0x67, 0xbe, 0x79, 0xdb, 0xbc, 0xf6, 0xe7, 0xdb, 0xe6, 0xb5, 0x5f, 0xa6, 0xcd, 0xd2, 0x9b, 0x69,
|
0x72, 0xa6, 0x58, 0x28, 0x7b, 0xc0, 0xae, 0xfc, 0xec, 0xe8, 0x83, 0x63, 0x39, 0xc9, 0x48, 0x9e,
|
||||||
0xb3, 0xf4, 0xfb, 0xb4, 0x59, 0xfa, 0x7b, 0xda, 0x2c, 0x0d, 0x2b, 0xf8, 0x7f, 0xe8, 0xf3, 0x7f,
|
0xa7, 0x01, 0x3e, 0x62, 0x69, 0x13, 0xa8, 0x07, 0x6a, 0x96, 0x06, 0x8c, 0x27, 0x67, 0x47, 0x0f,
|
||||||
0x02, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x21, 0x0b, 0xcd, 0x6e, 0x0d, 0x00, 0x00,
|
0xe4, 0xec, 0x09, 0xee, 0xc7, 0x24, 0xed, 0x79, 0xd4, 0xa0, 0xfe, 0x53, 0x01, 0x2a, 0xe9, 0x6f,
|
||||||
|
0x00, 0x7a, 0x64, 0xb6, 0xd1, 0xc5, 0xcf, 0xff, 0x75, 0x48, 0x92, 0xde, 0x4c, 0xd6, 0x6b, 0xdf,
|
||||||
|
0xcf, 0x7b, 0xed, 0x7f, 0x4c, 0x4e, 0x1a, 0x72, 0x02, 0x56, 0x36, 0x67, 0xec, 0xb6, 0x30, 0xb6,
|
||||||
|
0xdb, 0x1a, 0x54, 0x7b, 0x1e, 0x76, 0x7b, 0x38, 0xe8, 0xf6, 0x89, 0xee, 0x10, 0xe7, 0x1c, 0xe8,
|
||||||
|
0x79, 0xf8, 0xb9, 0x9e, 0x49, 0x01, 0xac, 0x73, 0x44, 0x3c, 0x11, 0xa9, 0xa2, 0x68, 0xc0, 0x4b,
|
||||||
|
0x3d, 0x53, 0xff, 0x65, 0x0a, 0xaa, 0xc6, 0x9f, 0x8b, 0xec, 0xa1, 0x03, 0x3c, 0x48, 0xe3, 0xa8,
|
||||||
|
0x67, 0xd9, 0xb1, 0xf1, 0xa1, 0xfe, 0x96, 0x24, 0x9f, 0xa9, 0x32, 0x1f, 0xaa, 0x8f, 0x02, 0xba,
|
||||||
|
0x0d, 0xc0, 0x87, 0x6e, 0x88, 0xbd, 0x63, 0x92, 0xc8, 0x97, 0x1c, 0x8b, 0x0f, 0x5b, 0x7a, 0x02,
|
||||||
|
0xdd, 0x04, 0x8b, 0x0f, 0x5d, 0xc2, 0x39, 0xe3, 0x51, 0x52, 0xfb, 0x0a, 0x1f, 0x3e, 0x55, 0xe3,
|
||||||
|
0x84, 0xdb, 0xe5, 0x4c, 0xf6, 0x02, 0xc9, 0x3b, 0xb0, 0xf8, 0xf0, 0x89, 0x9e, 0x90, 0x51, 0x45,
|
||||||
|
0x1a, 0x55, 0xb7, 0x9e, 0x65, 0x91, 0x47, 0x15, 0x79, 0x54, 0xdd, 0x7a, 0x5a, 0xc2, 0x8c, 0x2a,
|
||||||
|
0xb2, 0xa8, 0xba, 0xfb, 0xac, 0x08, 0x23, 0xaa, 0xc8, 0xa3, 0x5a, 0x29, 0x37, 0x89, 0xda, 0xb4,
|
||||||
|
0xdf, 0xbd, 0x5f, 0xbb, 0xf2, 0xc7, 0xfb, 0xb5, 0x2b, 0x3f, 0x8e, 0xd6, 0x0a, 0xef, 0x46, 0x6b,
|
||||||
|
0x85, 0xdf, 0x46, 0x6b, 0x85, 0xbf, 0x46, 0x6b, 0x85, 0xce, 0x8c, 0xfa, 0x0d, 0xff, 0xff, 0xdf,
|
||||||
|
0x01, 0x00, 0x00, 0xff, 0xff, 0x19, 0x9d, 0xe2, 0xd3, 0xe5, 0x0f, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
13
vendor/github.com/containerd/cgroups/metrics.proto
generated
vendored
13
vendor/github.com/containerd/cgroups/metrics.proto
generated
vendored
@ -11,6 +11,7 @@ message Metrics {
|
|||||||
MemoryStat memory = 4;
|
MemoryStat memory = 4;
|
||||||
BlkIOStat blkio = 5;
|
BlkIOStat blkio = 5;
|
||||||
RdmaStat rdma = 6;
|
RdmaStat rdma = 6;
|
||||||
|
repeated NetworkStat network = 7;
|
||||||
}
|
}
|
||||||
|
|
||||||
message HugetlbStat {
|
message HugetlbStat {
|
||||||
@ -121,3 +122,15 @@ message RdmaEntry {
|
|||||||
uint32 hca_handles = 2;
|
uint32 hca_handles = 2;
|
||||||
uint32 hca_objects = 3;
|
uint32 hca_objects = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message NetworkStat {
|
||||||
|
string name = 1;
|
||||||
|
uint64 rx_bytes = 2;
|
||||||
|
uint64 rx_packets = 3;
|
||||||
|
uint64 rx_errors = 4;
|
||||||
|
uint64 rx_dropped = 5;
|
||||||
|
uint64 tx_bytes = 6;
|
||||||
|
uint64 tx_packets = 7;
|
||||||
|
uint64 tx_errors = 8;
|
||||||
|
uint64 tx_dropped = 9;
|
||||||
|
}
|
||||||
|
2
vendor/github.com/containerd/cgroups/utils.go
generated
vendored
2
vendor/github.com/containerd/cgroups/utils.go
generated
vendored
@ -168,7 +168,7 @@ func readTasksPids(path string, subsystem Name) ([]Task, error) {
|
|||||||
func hugePageSizes() ([]string, error) {
|
func hugePageSizes() ([]string, error) {
|
||||||
var (
|
var (
|
||||||
pageSizes []string
|
pageSizes []string
|
||||||
sizeList = []string{"B", "kB", "MB", "GB", "TB", "PB"}
|
sizeList = []string{"B", "KB", "MB", "GB", "TB", "PB"}
|
||||||
)
|
)
|
||||||
files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages")
|
files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
2
vendor/github.com/containerd/containerd/README.md
generated
vendored
2
vendor/github.com/containerd/containerd/README.md
generated
vendored
@ -218,7 +218,7 @@ This will be the best place to discuss design and implementation.
|
|||||||
For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development.
|
For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development.
|
||||||
|
|
||||||
**Slack:** Catch us in the #containerd and #containerd-dev channels on dockercommunity.slack.com.
|
**Slack:** Catch us in the #containerd and #containerd-dev channels on dockercommunity.slack.com.
|
||||||
[Click here for an invite to docker community slack.](https://join.slack.com/t/dockercommunity/shared_invite/enQtNDY4MDc1Mzc0MzIwLTgxZDBlMmM4ZGEyNDc1N2FkMzlhODJkYmE1YTVkYjM1MDE3ZjAwZjBkOGFlOTJkZjRmZGYzNjYyY2M3ZTUxYzQ)
|
[Click here for an invite to docker community slack.](https://dockr.ly/slack)
|
||||||
|
|
||||||
### Security audit
|
### Security audit
|
||||||
|
|
||||||
|
266
vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
generated
vendored
266
vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
|||||||
types "github.com/containerd/containerd/api/types"
|
types "github.com/containerd/containerd/api/types"
|
||||||
proto "github.com/gogo/protobuf/proto"
|
proto "github.com/gogo/protobuf/proto"
|
||||||
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||||
|
types1 "github.com/gogo/protobuf/types"
|
||||||
grpc "google.golang.org/grpc"
|
grpc "google.golang.org/grpc"
|
||||||
io "io"
|
io "io"
|
||||||
math "math"
|
math "math"
|
||||||
@ -29,11 +30,12 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
|||||||
|
|
||||||
type ApplyRequest struct {
|
type ApplyRequest struct {
|
||||||
// Diff is the descriptor of the diff to be extracted
|
// Diff is the descriptor of the diff to be extracted
|
||||||
Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"`
|
Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"`
|
||||||
Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"`
|
Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
Payloads map[string]*types1.Any `protobuf:"bytes,3,rep,name=payloads,proto3" json:"payloads,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
XXX_sizecache int32 `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ApplyRequest) Reset() { *m = ApplyRequest{} }
|
func (m *ApplyRequest) Reset() { *m = ApplyRequest{} }
|
||||||
@ -205,6 +207,7 @@ var xxx_messageInfo_DiffResponse proto.InternalMessageInfo
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest")
|
proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest")
|
||||||
|
proto.RegisterMapType((map[string]*types1.Any)(nil), "containerd.services.diff.v1.ApplyRequest.PayloadsEntry")
|
||||||
proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse")
|
proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse")
|
||||||
proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest")
|
proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest")
|
||||||
proto.RegisterMapType((map[string]string)(nil), "containerd.services.diff.v1.DiffRequest.LabelsEntry")
|
proto.RegisterMapType((map[string]string)(nil), "containerd.services.diff.v1.DiffRequest.LabelsEntry")
|
||||||
@ -216,36 +219,40 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptor_3b36a99e6faaa935 = []byte{
|
var fileDescriptor_3b36a99e6faaa935 = []byte{
|
||||||
// 457 bytes of a gzipped FileDescriptorProto
|
// 526 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x41, 0x6f, 0xd3, 0x4c,
|
||||||
0x14, 0xaf, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40,
|
0x10, 0x8d, 0xed, 0x24, 0xdf, 0x97, 0x49, 0x2b, 0xa1, 0x55, 0x24, 0x8c, 0x01, 0xab, 0xca, 0x29,
|
||||||
0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x01, 0x81, 0x04, 0x4a,
|
0x2d, 0x62, 0x4d, 0x03, 0x2a, 0xd0, 0x5e, 0x5a, 0x54, 0xc4, 0xa5, 0x48, 0x60, 0x7a, 0x40, 0x20,
|
||||||
0x9b, 0x97, 0xce, 0x22, 0x8d, 0xbd, 0xd8, 0xad, 0x94, 0x1b, 0xdf, 0x85, 0x8f, 0xc2, 0x65, 0x47,
|
0x81, 0x9c, 0x78, 0xed, 0xae, 0x70, 0xbc, 0x8b, 0x77, 0x1d, 0xc9, 0x37, 0xfe, 0x06, 0x67, 0x7e,
|
||||||
0x8e, 0x1c, 0x69, 0x3f, 0x09, 0xb2, 0x93, 0x40, 0x24, 0xa4, 0x12, 0x76, 0xca, 0xcb, 0xf3, 0xef,
|
0x0a, 0x97, 0x1e, 0x39, 0x72, 0xa4, 0xf9, 0x25, 0xc8, 0xeb, 0x75, 0x31, 0x02, 0x05, 0xc3, 0xc9,
|
||||||
0x9f, 0xfd, 0x6c, 0x38, 0x5d, 0x70, 0x7d, 0xb9, 0x9a, 0xb1, 0xb9, 0x58, 0xfa, 0x73, 0x91, 0xea,
|
0x9b, 0x9d, 0xf7, 0xde, 0xce, 0xbc, 0x37, 0x0a, 0x1c, 0xc6, 0x54, 0x9e, 0xe5, 0x33, 0x3c, 0x67,
|
||||||
0x90, 0xa7, 0x98, 0x45, 0xf5, 0x32, 0x94, 0xdc, 0x57, 0x98, 0xad, 0xf9, 0x1c, 0x95, 0x1f, 0xf1,
|
0x0b, 0x6f, 0xce, 0x52, 0x19, 0xd0, 0x94, 0x64, 0x61, 0xf3, 0x18, 0x70, 0xea, 0x09, 0x92, 0x2d,
|
||||||
0x38, 0xf6, 0xd7, 0x87, 0xf6, 0xcb, 0x64, 0x26, 0xb4, 0xa0, 0xf7, 0xfe, 0x60, 0x59, 0x85, 0x63,
|
0xe9, 0x9c, 0x08, 0x2f, 0xa4, 0x51, 0xe4, 0x2d, 0x77, 0xd5, 0x17, 0xf3, 0x8c, 0x49, 0x86, 0xae,
|
||||||
0x76, 0x7d, 0x7d, 0xe8, 0xee, 0x2f, 0xc4, 0x42, 0x58, 0x9c, 0x6f, 0xaa, 0x82, 0xe2, 0x1e, 0x35,
|
0xff, 0xc0, 0xe2, 0x1a, 0x87, 0x55, 0x7d, 0xb9, 0xeb, 0x8c, 0x62, 0x16, 0x33, 0x85, 0xf3, 0xca,
|
||||||
0x32, 0xd5, 0xb9, 0x44, 0xe5, 0x2f, 0xc5, 0x2a, 0xd5, 0x25, 0xef, 0xe4, 0x3f, 0x78, 0x11, 0xaa,
|
0x53, 0x45, 0x71, 0xae, 0xc5, 0x8c, 0xc5, 0x09, 0xf1, 0xd4, 0xaf, 0x59, 0x1e, 0x79, 0x41, 0x5a,
|
||||||
0x79, 0xc6, 0xa5, 0x16, 0x59, 0x41, 0x1e, 0x5f, 0xc1, 0xde, 0x4b, 0x29, 0x93, 0x3c, 0xc0, 0xab,
|
0xe8, 0xd2, 0x5e, 0xab, 0x7e, 0x64, 0xc1, 0x89, 0xf0, 0x16, 0x2c, 0x4f, 0xa5, 0xe6, 0x1d, 0xfc,
|
||||||
0x15, 0x2a, 0x4d, 0x9f, 0x40, 0xd7, 0xa4, 0x74, 0xc8, 0x88, 0x4c, 0x86, 0xd3, 0xfb, 0xac, 0xb6,
|
0x05, 0x2f, 0x24, 0x62, 0x9e, 0x51, 0x2e, 0x59, 0x56, 0x91, 0xc7, 0x1f, 0x4d, 0xd8, 0x38, 0xe2,
|
||||||
0x0d, 0xab, 0xc0, 0xce, 0x7e, 0x2b, 0x04, 0x16, 0x49, 0x7d, 0xe8, 0xdb, 0x34, 0xca, 0x69, 0x8f,
|
0x3c, 0x29, 0x7c, 0xf2, 0x3e, 0x27, 0x42, 0xa2, 0x3b, 0xd0, 0x2d, 0x27, 0xb0, 0x8d, 0x2d, 0x63,
|
||||||
0x3a, 0x93, 0xe1, 0xf4, 0xee, 0xdf, 0x9c, 0xb7, 0x66, 0x3d, 0x28, 0x61, 0xe3, 0x37, 0x70, 0xbb,
|
0x32, 0x9c, 0xde, 0xc0, 0x8d, 0x11, 0x95, 0x04, 0x3e, 0xbe, 0x94, 0xf0, 0x15, 0x12, 0x79, 0xd0,
|
||||||
0xb4, 0x54, 0x52, 0xa4, 0x0a, 0xe9, 0x11, 0xdc, 0x0a, 0xa5, 0x4c, 0x38, 0x46, 0x8d, 0x6c, 0x2b,
|
0x57, 0xed, 0x08, 0xdb, 0xdc, 0xb2, 0x26, 0xc3, 0xe9, 0xd5, 0x5f, 0x39, 0x4f, 0xcb, 0xba, 0xaf,
|
||||||
0xf0, 0xf8, 0x6b, 0x1b, 0x86, 0x67, 0x3c, 0x8e, 0xab, 0xec, 0x8f, 0xa0, 0x9b, 0x60, 0xac, 0x1d,
|
0x61, 0xe8, 0x05, 0xfc, 0xcf, 0x83, 0x22, 0x61, 0x41, 0x28, 0x6c, 0x4b, 0x51, 0xee, 0xe3, 0x35,
|
||||||
0xb2, 0x3b, 0x87, 0x05, 0xd1, 0xc7, 0xd0, 0xcb, 0xf8, 0xe2, 0x52, 0xff, 0x2b, 0x75, 0x81, 0xa2,
|
0x4e, 0xe2, 0x66, 0x7f, 0xf8, 0x99, 0x66, 0x3e, 0x4e, 0x65, 0x56, 0xf8, 0x97, 0x42, 0xce, 0x73,
|
||||||
0x0f, 0x00, 0x96, 0x18, 0xf1, 0xf0, 0x93, 0x59, 0x73, 0x3a, 0x23, 0x32, 0x19, 0x04, 0x03, 0xdb,
|
0xd8, 0xfc, 0xa9, 0x84, 0xae, 0x80, 0xf5, 0x8e, 0x14, 0x6a, 0x8e, 0x81, 0x5f, 0x1e, 0xd1, 0x0e,
|
||||||
0xb9, 0xc8, 0x25, 0xd2, 0x3b, 0xd0, 0xc9, 0x30, 0x76, 0xba, 0xb6, 0x6f, 0x4a, 0x7a, 0x0e, 0xfd,
|
0xf4, 0x96, 0x41, 0x92, 0x13, 0xdb, 0x54, 0xb3, 0x8d, 0x70, 0x95, 0x05, 0xae, 0xb3, 0xc0, 0x47,
|
||||||
0x24, 0x9c, 0x61, 0xa2, 0x9c, 0x9e, 0x35, 0x78, 0xc6, 0x76, 0xdc, 0x08, 0x56, 0xdb, 0x06, 0x3b,
|
0x69, 0xe1, 0x57, 0x90, 0x7d, 0xf3, 0x81, 0x31, 0x7e, 0x02, 0x9b, 0xfa, 0x69, 0xc1, 0x59, 0x2a,
|
||||||
0xb7, 0xb4, 0xd7, 0xa9, 0xce, 0xf2, 0xa0, 0xd4, 0x70, 0x5f, 0xc0, 0xb0, 0xd6, 0x36, 0x76, 0x9f,
|
0x08, 0xda, 0x83, 0xff, 0x02, 0xce, 0x13, 0x4a, 0xc2, 0x56, 0xf6, 0xd4, 0xe0, 0xf1, 0x27, 0x13,
|
||||||
0x31, 0xb7, 0xa7, 0x35, 0x08, 0x4c, 0x49, 0xf7, 0xa1, 0xb7, 0x0e, 0x93, 0x15, 0x3a, 0x6d, 0xdb,
|
0x86, 0xc7, 0x34, 0x8a, 0x6a, 0x8f, 0x6f, 0x41, 0x37, 0x21, 0x91, 0xb4, 0x8d, 0xf5, 0x7e, 0x29,
|
||||||
0x2b, 0x7e, 0x8e, 0xdb, 0xcf, 0xc9, 0xf8, 0x14, 0xf6, 0x0a, 0xf5, 0xf2, 0xb4, 0xab, 0x09, 0x77,
|
0x10, 0xba, 0x0d, 0xbd, 0x8c, 0xc6, 0x67, 0xf2, 0x4f, 0xee, 0x56, 0x28, 0x74, 0x13, 0x60, 0x41,
|
||||||
0x9a, 0x4e, 0x78, 0xfa, 0x8d, 0x40, 0xd7, 0x48, 0xd0, 0x8f, 0xd0, 0xb3, 0x93, 0xa3, 0x07, 0x3b,
|
0x42, 0x1a, 0xbc, 0x2d, 0x6b, 0xb6, 0xa5, 0xa6, 0x1f, 0xa8, 0x9b, 0xd3, 0x82, 0x93, 0xd2, 0x95,
|
||||||
0x37, 0x53, 0xbf, 0x50, 0xee, 0xc3, 0x26, 0xd0, 0x32, 0xda, 0x87, 0xd2, 0x67, 0xd2, 0xf4, 0xac,
|
0x8c, 0x44, 0x76, 0xb7, 0x72, 0x25, 0x23, 0x11, 0x3a, 0x81, 0x7e, 0x12, 0xcc, 0x48, 0x22, 0xec,
|
||||||
0xdc, 0x83, 0x06, 0xc8, 0x42, 0xfc, 0xd5, 0xc5, 0xf5, 0xc6, 0x6b, 0xfd, 0xd8, 0x78, 0xad, 0x2f,
|
0x9e, 0x7a, 0xe0, 0xde, 0xda, 0x2c, 0x1a, 0x63, 0xe0, 0x13, 0x45, 0xab, 0x82, 0xd0, 0x1a, 0xce,
|
||||||
0x5b, 0x8f, 0x5c, 0x6f, 0x3d, 0xf2, 0x7d, 0xeb, 0x91, 0x9f, 0x5b, 0x8f, 0xbc, 0x3f, 0xbe, 0xd1,
|
0x43, 0x18, 0x36, 0xae, 0x7f, 0x13, 0xc2, 0xa8, 0x19, 0xc2, 0xa0, 0x69, 0xf7, 0x21, 0x6c, 0x54,
|
||||||
0x6b, 0x3f, 0x31, 0xdf, 0x77, 0xad, 0x59, 0xdf, 0x3e, 0xa4, 0xa7, 0xbf, 0x02, 0x00, 0x00, 0xff,
|
0xea, 0xda, 0xed, 0x7a, 0x13, 0xad, 0xb6, 0x9b, 0x38, 0xfd, 0x6c, 0x40, 0xb7, 0x94, 0x40, 0x6f,
|
||||||
0xff, 0x61, 0xd1, 0x6e, 0x9e, 0x34, 0x04, 0x00, 0x00,
|
0xa0, 0xa7, 0x92, 0x43, 0xdb, 0xad, 0x17, 0xcb, 0xd9, 0x69, 0x03, 0xd5, 0xad, 0xbd, 0xd6, 0xef,
|
||||||
|
0x4c, 0xda, 0x7a, 0xe5, 0x6c, 0xb7, 0x40, 0x56, 0xe2, 0x8f, 0x4e, 0xcf, 0x2f, 0xdc, 0xce, 0xd7,
|
||||||
|
0x0b, 0xb7, 0xf3, 0x61, 0xe5, 0x1a, 0xe7, 0x2b, 0xd7, 0xf8, 0xb2, 0x72, 0x8d, 0x6f, 0x2b, 0xd7,
|
||||||
|
0x78, 0xb5, 0xff, 0x4f, 0xff, 0x58, 0x07, 0xe5, 0xf7, 0x65, 0x67, 0xd6, 0x57, 0x7b, 0x7e, 0xf7,
|
||||||
|
0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x85, 0x25, 0xb8, 0xf8, 0x04, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@ -400,6 +407,34 @@ func (m *ApplyRequest) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
i += n
|
i += n
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(m.Payloads) > 0 {
|
||||||
|
for k, _ := range m.Payloads {
|
||||||
|
dAtA[i] = 0x1a
|
||||||
|
i++
|
||||||
|
v := m.Payloads[k]
|
||||||
|
msgSize := 0
|
||||||
|
if v != nil {
|
||||||
|
msgSize = v.Size()
|
||||||
|
msgSize += 1 + sovDiff(uint64(msgSize))
|
||||||
|
}
|
||||||
|
mapSize := 1 + len(k) + sovDiff(uint64(len(k))) + msgSize
|
||||||
|
i = encodeVarintDiff(dAtA, i, uint64(mapSize))
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintDiff(dAtA, i, uint64(len(k)))
|
||||||
|
i += copy(dAtA[i:], k)
|
||||||
|
if v != nil {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintDiff(dAtA, i, uint64(v.Size()))
|
||||||
|
n2, err := v.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if m.XXX_unrecognized != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
}
|
}
|
||||||
@ -425,11 +460,11 @@ func (m *ApplyResponse) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintDiff(dAtA, i, uint64(m.Applied.Size()))
|
i = encodeVarintDiff(dAtA, i, uint64(m.Applied.Size()))
|
||||||
n2, err := m.Applied.MarshalTo(dAtA[i:])
|
n3, err := m.Applied.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n2
|
i += n3
|
||||||
}
|
}
|
||||||
if m.XXX_unrecognized != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
@ -530,11 +565,11 @@ func (m *DiffResponse) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0x1a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintDiff(dAtA, i, uint64(m.Diff.Size()))
|
i = encodeVarintDiff(dAtA, i, uint64(m.Diff.Size()))
|
||||||
n3, err := m.Diff.MarshalTo(dAtA[i:])
|
n4, err := m.Diff.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n3
|
i += n4
|
||||||
}
|
}
|
||||||
if m.XXX_unrecognized != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
@ -567,6 +602,19 @@ func (m *ApplyRequest) Size() (n int) {
|
|||||||
n += 1 + l + sovDiff(uint64(l))
|
n += 1 + l + sovDiff(uint64(l))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(m.Payloads) > 0 {
|
||||||
|
for k, v := range m.Payloads {
|
||||||
|
_ = k
|
||||||
|
_ = v
|
||||||
|
l = 0
|
||||||
|
if v != nil {
|
||||||
|
l = v.Size()
|
||||||
|
l += 1 + sovDiff(uint64(l))
|
||||||
|
}
|
||||||
|
mapEntrySize := 1 + len(k) + sovDiff(uint64(len(k))) + l
|
||||||
|
n += mapEntrySize + 1 + sovDiff(uint64(mapEntrySize))
|
||||||
|
}
|
||||||
|
}
|
||||||
if m.XXX_unrecognized != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
n += len(m.XXX_unrecognized)
|
n += len(m.XXX_unrecognized)
|
||||||
}
|
}
|
||||||
@ -662,9 +710,20 @@ func (this *ApplyRequest) String() string {
|
|||||||
if this == nil {
|
if this == nil {
|
||||||
return "nil"
|
return "nil"
|
||||||
}
|
}
|
||||||
|
keysForPayloads := make([]string, 0, len(this.Payloads))
|
||||||
|
for k, _ := range this.Payloads {
|
||||||
|
keysForPayloads = append(keysForPayloads, k)
|
||||||
|
}
|
||||||
|
github_com_gogo_protobuf_sortkeys.Strings(keysForPayloads)
|
||||||
|
mapStringForPayloads := "map[string]*types1.Any{"
|
||||||
|
for _, k := range keysForPayloads {
|
||||||
|
mapStringForPayloads += fmt.Sprintf("%v: %v,", k, this.Payloads[k])
|
||||||
|
}
|
||||||
|
mapStringForPayloads += "}"
|
||||||
s := strings.Join([]string{`&ApplyRequest{`,
|
s := strings.Join([]string{`&ApplyRequest{`,
|
||||||
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`,
|
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`,
|
||||||
`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`,
|
`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`,
|
||||||
|
`Payloads:` + mapStringForPayloads + `,`,
|
||||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
@ -824,6 +883,135 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 3:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Payloads", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowDiff
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthDiff
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthDiff
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if m.Payloads == nil {
|
||||||
|
m.Payloads = make(map[string]*types1.Any)
|
||||||
|
}
|
||||||
|
var mapkey string
|
||||||
|
var mapvalue *types1.Any
|
||||||
|
for iNdEx < postIndex {
|
||||||
|
entryPreIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowDiff
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
if fieldNum == 1 {
|
||||||
|
var stringLenmapkey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowDiff
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapkey |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapkey := int(stringLenmapkey)
|
||||||
|
if intStringLenmapkey < 0 {
|
||||||
|
return ErrInvalidLengthDiff
|
||||||
|
}
|
||||||
|
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||||
|
if postStringIndexmapkey < 0 {
|
||||||
|
return ErrInvalidLengthDiff
|
||||||
|
}
|
||||||
|
if postStringIndexmapkey > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||||
|
iNdEx = postStringIndexmapkey
|
||||||
|
} else if fieldNum == 2 {
|
||||||
|
var mapmsglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowDiff
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
mapmsglen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if mapmsglen < 0 {
|
||||||
|
return ErrInvalidLengthDiff
|
||||||
|
}
|
||||||
|
postmsgIndex := iNdEx + mapmsglen
|
||||||
|
if postmsgIndex < 0 {
|
||||||
|
return ErrInvalidLengthDiff
|
||||||
|
}
|
||||||
|
if postmsgIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapvalue = &types1.Any{}
|
||||||
|
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postmsgIndex
|
||||||
|
} else {
|
||||||
|
iNdEx = entryPreIndex
|
||||||
|
skippy, err := skipDiff(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthDiff
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > postIndex {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.Payloads[mapkey] = mapvalue
|
||||||
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipDiff(dAtA[iNdEx:])
|
skippy, err := skipDiff(dAtA[iNdEx:])
|
||||||
|
3
vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto
generated
vendored
3
vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto
generated
vendored
@ -3,6 +3,7 @@ syntax = "proto3";
|
|||||||
package containerd.services.diff.v1;
|
package containerd.services.diff.v1;
|
||||||
|
|
||||||
import weak "gogoproto/gogo.proto";
|
import weak "gogoproto/gogo.proto";
|
||||||
|
import "google/protobuf/any.proto";
|
||||||
import "github.com/containerd/containerd/api/types/mount.proto";
|
import "github.com/containerd/containerd/api/types/mount.proto";
|
||||||
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
||||||
|
|
||||||
@ -25,6 +26,8 @@ message ApplyRequest {
|
|||||||
containerd.types.Descriptor diff = 1;
|
containerd.types.Descriptor diff = 1;
|
||||||
|
|
||||||
repeated containerd.types.Mount mounts = 2;
|
repeated containerd.types.Mount mounts = 2;
|
||||||
|
|
||||||
|
map<string, google.protobuf.Any> payloads = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ApplyResponse {
|
message ApplyResponse {
|
||||||
|
284
vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
generated
vendored
284
vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
generated
vendored
@ -10,6 +10,7 @@ import (
|
|||||||
rpc "github.com/gogo/googleapis/google/rpc"
|
rpc "github.com/gogo/googleapis/google/rpc"
|
||||||
proto "github.com/gogo/protobuf/proto"
|
proto "github.com/gogo/protobuf/proto"
|
||||||
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||||
|
types1 "github.com/gogo/protobuf/types"
|
||||||
grpc "google.golang.org/grpc"
|
grpc "google.golang.org/grpc"
|
||||||
io "io"
|
io "io"
|
||||||
math "math"
|
math "math"
|
||||||
@ -191,11 +192,51 @@ func (m *PluginsResponse) XXX_DiscardUnknown() {
|
|||||||
|
|
||||||
var xxx_messageInfo_PluginsResponse proto.InternalMessageInfo
|
var xxx_messageInfo_PluginsResponse proto.InternalMessageInfo
|
||||||
|
|
||||||
|
type ServerResponse struct {
|
||||||
|
UUID string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ServerResponse) Reset() { *m = ServerResponse{} }
|
||||||
|
func (*ServerResponse) ProtoMessage() {}
|
||||||
|
func (*ServerResponse) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_1a14fda866f10715, []int{3}
|
||||||
|
}
|
||||||
|
func (m *ServerResponse) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *ServerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_ServerResponse.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalTo(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *ServerResponse) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_ServerResponse.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *ServerResponse) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *ServerResponse) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_ServerResponse.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_ServerResponse proto.InternalMessageInfo
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Plugin)(nil), "containerd.services.introspection.v1.Plugin")
|
proto.RegisterType((*Plugin)(nil), "containerd.services.introspection.v1.Plugin")
|
||||||
proto.RegisterMapType((map[string]string)(nil), "containerd.services.introspection.v1.Plugin.ExportsEntry")
|
proto.RegisterMapType((map[string]string)(nil), "containerd.services.introspection.v1.Plugin.ExportsEntry")
|
||||||
proto.RegisterType((*PluginsRequest)(nil), "containerd.services.introspection.v1.PluginsRequest")
|
proto.RegisterType((*PluginsRequest)(nil), "containerd.services.introspection.v1.PluginsRequest")
|
||||||
proto.RegisterType((*PluginsResponse)(nil), "containerd.services.introspection.v1.PluginsResponse")
|
proto.RegisterType((*PluginsResponse)(nil), "containerd.services.introspection.v1.PluginsResponse")
|
||||||
|
proto.RegisterType((*ServerResponse)(nil), "containerd.services.introspection.v1.ServerResponse")
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -203,38 +244,42 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptor_1a14fda866f10715 = []byte{
|
var fileDescriptor_1a14fda866f10715 = []byte{
|
||||||
// 487 bytes of a gzipped FileDescriptorProto
|
// 549 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x40,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xc1, 0x6e, 0xd3, 0x40,
|
||||||
0x10, 0xcd, 0x3a, 0x69, 0xdc, 0x4c, 0xca, 0x87, 0x56, 0x15, 0x58, 0x3e, 0xb8, 0x51, 0xc4, 0x21,
|
0x10, 0xad, 0x9d, 0x34, 0x6e, 0x37, 0xa5, 0xa0, 0x55, 0x55, 0x2c, 0x83, 0x9c, 0x28, 0xe2, 0x10,
|
||||||
0x42, 0xb0, 0x56, 0x03, 0x48, 0xb4, 0x48, 0x1c, 0x22, 0x72, 0xa8, 0xd4, 0x43, 0xe5, 0x5e, 0x10,
|
0x21, 0x58, 0xab, 0x01, 0x24, 0x5a, 0x24, 0x0e, 0x51, 0x73, 0x88, 0xd4, 0x43, 0xe5, 0xa8, 0x08,
|
||||||
0x97, 0xca, 0x71, 0x36, 0x66, 0x85, 0xeb, 0xdd, 0xee, 0xae, 0x2d, 0x72, 0xe3, 0xc6, 0x5f, 0xcb,
|
0x71, 0xa9, 0x1c, 0x67, 0x63, 0x56, 0x38, 0xde, 0xed, 0xee, 0xda, 0x22, 0x37, 0x3e, 0x2f, 0x47,
|
||||||
0x91, 0x23, 0xa7, 0x8a, 0xfa, 0x37, 0xf0, 0x03, 0x90, 0xbd, 0x76, 0x9b, 0xdc, 0x12, 0x71, 0x9b,
|
0x8e, 0x9c, 0x02, 0xf5, 0x37, 0xf0, 0x01, 0xc8, 0xbb, 0x76, 0x9a, 0xdc, 0x12, 0x71, 0x9b, 0x79,
|
||||||
0x79, 0x7e, 0x6f, 0xe6, 0xcd, 0x93, 0x17, 0x82, 0x98, 0xe9, 0xaf, 0xd9, 0x8c, 0x44, 0xfc, 0xda,
|
0x33, 0x6f, 0xe6, 0xcd, 0xf3, 0xca, 0xc0, 0x8f, 0x88, 0xfc, 0x9a, 0x8e, 0x51, 0x48, 0x67, 0x5e,
|
||||||
0x8f, 0x78, 0xaa, 0x43, 0x96, 0x52, 0x39, 0x5f, 0x2f, 0x43, 0xc1, 0x7c, 0x45, 0x65, 0xce, 0x22,
|
0x48, 0x13, 0x19, 0x90, 0x04, 0xf3, 0xc9, 0x7a, 0x18, 0x30, 0xe2, 0x09, 0xcc, 0x33, 0x12, 0x62,
|
||||||
0xaa, 0x7c, 0x96, 0x6a, 0xc9, 0x95, 0xa0, 0x91, 0x66, 0x3c, 0xf5, 0xf3, 0xe3, 0x4d, 0x80, 0x08,
|
0xe1, 0x91, 0x44, 0x72, 0x2a, 0x18, 0x0e, 0x25, 0xa1, 0x89, 0x97, 0x9d, 0x6d, 0x02, 0x88, 0x71,
|
||||||
0xc9, 0x35, 0xc7, 0x2f, 0x1e, 0xd4, 0xa4, 0x51, 0x92, 0x4d, 0x62, 0x7e, 0xec, 0x9e, 0x6c, 0xb5,
|
0x2a, 0x29, 0x7c, 0xf1, 0xc0, 0x46, 0x15, 0x13, 0x6d, 0x36, 0x66, 0x67, 0xce, 0xf9, 0x56, 0x9b,
|
||||||
0x59, 0x2f, 0x05, 0x55, 0xbe, 0x48, 0x42, 0xbd, 0xe0, 0xf2, 0xda, 0x2c, 0x70, 0x9f, 0xc7, 0x9c,
|
0xe5, 0x9c, 0x61, 0xe1, 0xb1, 0x38, 0x90, 0x53, 0xca, 0x67, 0x7a, 0x81, 0xf3, 0x34, 0xa2, 0x34,
|
||||||
0xc7, 0x09, 0xf5, 0xa5, 0x88, 0x7c, 0xa5, 0x43, 0x9d, 0xa9, 0xfa, 0xc3, 0x61, 0xcc, 0x63, 0x5e,
|
0x8a, 0xb1, 0xc7, 0x59, 0xe8, 0x09, 0x19, 0xc8, 0x54, 0x94, 0x85, 0x67, 0x65, 0x41, 0x65, 0xe3,
|
||||||
0x95, 0x7e, 0x59, 0x19, 0x74, 0xf8, 0xd7, 0x82, 0xee, 0x45, 0x92, 0xc5, 0x2c, 0xc5, 0x18, 0x3a,
|
0x74, 0xea, 0xe1, 0x19, 0x93, 0xf3, 0xb2, 0x78, 0x12, 0xd1, 0x88, 0xaa, 0xd0, 0x2b, 0x22, 0x8d,
|
||||||
0xe5, 0x44, 0x07, 0x0d, 0xd0, 0xa8, 0x17, 0x54, 0x35, 0x7e, 0x06, 0x16, 0x9b, 0x3b, 0x56, 0x89,
|
0x76, 0xfe, 0x9a, 0xa0, 0x71, 0x1d, 0xa7, 0x11, 0x49, 0x20, 0x04, 0xf5, 0x62, 0x9d, 0x6d, 0xb4,
|
||||||
0x4c, 0xba, 0xc5, 0xed, 0x91, 0x75, 0xf6, 0x29, 0xb0, 0xd8, 0x1c, 0xbb, 0xb0, 0x2f, 0xe9, 0x4d,
|
0x8d, 0xee, 0xa1, 0xaf, 0x62, 0x78, 0x0a, 0x4c, 0x32, 0xb1, 0xcd, 0x02, 0xe9, 0x37, 0xf2, 0x65,
|
||||||
0xc6, 0x24, 0x55, 0x4e, 0x7b, 0xd0, 0x1e, 0xf5, 0x82, 0xfb, 0x1e, 0x7f, 0x84, 0x5e, 0xe3, 0x49,
|
0xcb, 0x1c, 0x5e, 0xfa, 0x26, 0x99, 0x40, 0x07, 0x1c, 0x70, 0x7c, 0x97, 0x12, 0x8e, 0x85, 0x5d,
|
||||||
0x39, 0x9d, 0x41, 0x7b, 0xd4, 0x1f, 0xbb, 0x64, 0xed, 0xec, 0xca, 0x36, 0xb9, 0xa8, 0x29, 0x93,
|
0x6b, 0xd7, 0xba, 0x87, 0xfe, 0x2a, 0x87, 0x1f, 0xc1, 0x61, 0x25, 0x58, 0xd8, 0xf5, 0x76, 0xad,
|
||||||
0xce, 0xea, 0xf6, 0xa8, 0x15, 0x3c, 0x48, 0xf0, 0x25, 0xd8, 0xf4, 0xbb, 0xe0, 0x52, 0x2b, 0x67,
|
0xdb, 0xec, 0x39, 0x68, 0xcd, 0x13, 0x75, 0x13, 0xba, 0x2e, 0x5b, 0xfa, 0xf5, 0xc5, 0xb2, 0xb5,
|
||||||
0xaf, 0x52, 0x9f, 0x90, 0x6d, 0x42, 0x23, 0xe6, 0x0c, 0x32, 0x35, 0xda, 0x69, 0xaa, 0xe5, 0x32,
|
0xe7, 0x3f, 0x50, 0xe0, 0x08, 0x58, 0xf8, 0x3b, 0xa3, 0x5c, 0x0a, 0x7b, 0x5f, 0xb1, 0xcf, 0xd1,
|
||||||
0x68, 0x26, 0xe1, 0x21, 0x1c, 0x44, 0xa1, 0x08, 0x67, 0x2c, 0x61, 0x9a, 0x51, 0xe5, 0x74, 0x2b,
|
0x36, 0x8e, 0x22, 0x7d, 0x06, 0x1a, 0x68, 0xee, 0x20, 0x91, 0x7c, 0xee, 0x57, 0x93, 0x60, 0x07,
|
||||||
0xd3, 0x1b, 0x18, 0x7e, 0x0d, 0xfb, 0x2c, 0x65, 0xfa, 0x8a, 0x4a, 0xe9, 0xd8, 0x03, 0x34, 0xea,
|
0x1c, 0x85, 0x01, 0x0b, 0xc6, 0x24, 0x26, 0x92, 0x60, 0x61, 0x37, 0x94, 0xe8, 0x0d, 0x0c, 0xbe,
|
||||||
0x8f, 0x31, 0x31, 0x69, 0x12, 0x29, 0x22, 0x72, 0x59, 0xa5, 0x19, 0xd8, 0x25, 0x67, 0x2a, 0xa5,
|
0x06, 0x07, 0x24, 0x21, 0xf2, 0x16, 0x73, 0x6e, 0x5b, 0x6d, 0xa3, 0xdb, 0xec, 0x41, 0xa4, 0x1d,
|
||||||
0x7b, 0x0a, 0x07, 0xeb, 0xbb, 0xf0, 0x53, 0x68, 0x7f, 0xa3, 0xcb, 0x3a, 0xbe, 0xb2, 0xc4, 0x87,
|
0x45, 0x9c, 0x85, 0x68, 0xa4, 0xac, 0xf6, 0xad, 0xa2, 0x67, 0xc0, 0xb9, 0x73, 0x01, 0x8e, 0xd6,
|
||||||
0xb0, 0x97, 0x87, 0x49, 0x46, 0x4d, 0x80, 0x81, 0x69, 0x4e, 0xad, 0xf7, 0x68, 0xf8, 0x12, 0x1e,
|
0x77, 0xc1, 0x27, 0xa0, 0xf6, 0x0d, 0xcf, 0x4b, 0xfb, 0x8a, 0x10, 0x9e, 0x80, 0xfd, 0x2c, 0x88,
|
||||||
0x1b, 0xbb, 0x2a, 0xa0, 0x37, 0x19, 0x55, 0x1a, 0x3b, 0x60, 0x2f, 0x58, 0xa2, 0xa9, 0x54, 0x0e,
|
0x53, 0xac, 0x0d, 0xf4, 0x75, 0x72, 0x61, 0xbe, 0x37, 0x3a, 0x2f, 0xc1, 0xb1, 0x96, 0x2b, 0x7c,
|
||||||
0xaa, 0xbc, 0x35, 0xed, 0xf0, 0x0a, 0x9e, 0xdc, 0x73, 0x95, 0xe0, 0xa9, 0xa2, 0xf8, 0x1c, 0x6c,
|
0x7c, 0x97, 0x62, 0x21, 0xa1, 0x0d, 0xac, 0x29, 0x89, 0x25, 0xe6, 0xc2, 0x36, 0x94, 0xb6, 0x2a,
|
||||||
0x61, 0xa0, 0x8a, 0xdc, 0x1f, 0xbf, 0xda, 0x25, 0xa2, 0x3a, 0xf2, 0x66, 0xc4, 0xf8, 0x27, 0x82,
|
0xed, 0xdc, 0x82, 0xc7, 0xab, 0x5e, 0xc1, 0x68, 0x22, 0x30, 0xbc, 0x02, 0x16, 0xd3, 0x90, 0x6a,
|
||||||
0x47, 0x67, 0xeb, 0x54, 0x9c, 0x83, 0x5d, 0xaf, 0xc4, 0x6f, 0x77, 0x99, 0xdc, 0x5c, 0xe3, 0xbe,
|
0x6e, 0xf6, 0x5e, 0xed, 0x62, 0x51, 0x69, 0x79, 0x35, 0xa2, 0x83, 0xc0, 0xf1, 0x08, 0xf3, 0x0c,
|
||||||
0xdb, 0x51, 0x65, 0xee, 0x9a, 0x2c, 0x56, 0x77, 0x5e, 0xeb, 0xf7, 0x9d, 0xd7, 0xfa, 0x51, 0x78,
|
0xf3, 0xd5, 0xfc, 0xe7, 0xa0, 0x9e, 0xa6, 0x64, 0xa2, 0x6f, 0xe9, 0x1f, 0xe4, 0xcb, 0x56, 0xfd,
|
||||||
0x68, 0x55, 0x78, 0xe8, 0x57, 0xe1, 0xa1, 0x3f, 0x85, 0x87, 0xbe, 0x9c, 0xff, 0xdf, 0x5b, 0xfc,
|
0xe6, 0x66, 0x78, 0xe9, 0x2b, 0xb4, 0xf7, 0xdb, 0x00, 0x8f, 0x86, 0xeb, 0xa3, 0x61, 0x06, 0xac,
|
||||||
0xb0, 0x01, 0x7c, 0xb6, 0x66, 0xdd, 0xea, 0xf7, 0x7f, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe6,
|
0x52, 0x22, 0x7c, 0xbb, 0x8b, 0x92, 0xea, 0x7a, 0xe7, 0xdd, 0x8e, 0xac, 0x52, 0xe7, 0x27, 0xd0,
|
||||||
0x72, 0xde, 0x35, 0xe4, 0x03, 0x00, 0x00,
|
0xd0, 0xca, 0xe1, 0x69, 0xf5, 0xa5, 0xaa, 0xb7, 0x8f, 0x06, 0xc5, 0xdb, 0x77, 0xb6, 0x94, 0xb3,
|
||||||
|
0x79, 0x7f, 0x7f, 0xba, 0xb8, 0x77, 0xf7, 0x7e, 0xdd, 0xbb, 0x7b, 0x3f, 0x72, 0xd7, 0x58, 0xe4,
|
||||||
|
0xae, 0xf1, 0x33, 0x77, 0x8d, 0x3f, 0xb9, 0x6b, 0x7c, 0xb9, 0xfa, 0xbf, 0x1f, 0xc6, 0x87, 0x0d,
|
||||||
|
0xe0, 0x73, 0x6d, 0xdc, 0x50, 0x7a, 0xdf, 0xfc, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xb3, 0x50,
|
||||||
|
0xdc, 0x89, 0x04, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@ -254,6 +299,8 @@ type IntrospectionClient interface {
|
|||||||
// Clients can use this to detect features and capabilities when using
|
// Clients can use this to detect features and capabilities when using
|
||||||
// containerd.
|
// containerd.
|
||||||
Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error)
|
Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error)
|
||||||
|
// Server returns information about the containerd server
|
||||||
|
Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type introspectionClient struct {
|
type introspectionClient struct {
|
||||||
@ -273,6 +320,15 @@ func (c *introspectionClient) Plugins(ctx context.Context, in *PluginsRequest, o
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *introspectionClient) Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error) {
|
||||||
|
out := new(ServerResponse)
|
||||||
|
err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Server", in, out, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
// IntrospectionServer is the server API for Introspection service.
|
// IntrospectionServer is the server API for Introspection service.
|
||||||
type IntrospectionServer interface {
|
type IntrospectionServer interface {
|
||||||
// Plugins returns a list of plugins in containerd.
|
// Plugins returns a list of plugins in containerd.
|
||||||
@ -280,6 +336,8 @@ type IntrospectionServer interface {
|
|||||||
// Clients can use this to detect features and capabilities when using
|
// Clients can use this to detect features and capabilities when using
|
||||||
// containerd.
|
// containerd.
|
||||||
Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error)
|
Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error)
|
||||||
|
// Server returns information about the containerd server
|
||||||
|
Server(context.Context, *types1.Empty) (*ServerResponse, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func RegisterIntrospectionServer(s *grpc.Server, srv IntrospectionServer) {
|
func RegisterIntrospectionServer(s *grpc.Server, srv IntrospectionServer) {
|
||||||
@ -304,6 +362,24 @@ func _Introspection_Plugins_Handler(srv interface{}, ctx context.Context, dec fu
|
|||||||
return interceptor(ctx, in, info, handler)
|
return interceptor(ctx, in, info, handler)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func _Introspection_Server_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(types1.Empty)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(IntrospectionServer).Server(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: "/containerd.services.introspection.v1.Introspection/Server",
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(IntrospectionServer).Server(ctx, req.(*types1.Empty))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
var _Introspection_serviceDesc = grpc.ServiceDesc{
|
var _Introspection_serviceDesc = grpc.ServiceDesc{
|
||||||
ServiceName: "containerd.services.introspection.v1.Introspection",
|
ServiceName: "containerd.services.introspection.v1.Introspection",
|
||||||
HandlerType: (*IntrospectionServer)(nil),
|
HandlerType: (*IntrospectionServer)(nil),
|
||||||
@ -312,6 +388,10 @@ var _Introspection_serviceDesc = grpc.ServiceDesc{
|
|||||||
MethodName: "Plugins",
|
MethodName: "Plugins",
|
||||||
Handler: _Introspection_Plugins_Handler,
|
Handler: _Introspection_Plugins_Handler,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
MethodName: "Server",
|
||||||
|
Handler: _Introspection_Server_Handler,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Streams: []grpc.StreamDesc{},
|
Streams: []grpc.StreamDesc{},
|
||||||
Metadata: "github.com/containerd/containerd/api/services/introspection/v1/introspection.proto",
|
Metadata: "github.com/containerd/containerd/api/services/introspection/v1/introspection.proto",
|
||||||
@ -488,6 +568,33 @@ func (m *PluginsResponse) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *ServerResponse) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ServerResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.UUID) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintIntrospection(dAtA, i, uint64(len(m.UUID)))
|
||||||
|
i += copy(dAtA[i:], m.UUID)
|
||||||
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int {
|
func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int {
|
||||||
for v >= 1<<7 {
|
for v >= 1<<7 {
|
||||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
@ -583,6 +690,22 @@ func (m *PluginsResponse) Size() (n int) {
|
|||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *ServerResponse) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.UUID)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovIntrospection(uint64(l))
|
||||||
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
func sovIntrospection(x uint64) (n int) {
|
func sovIntrospection(x uint64) (n int) {
|
||||||
for {
|
for {
|
||||||
n++
|
n++
|
||||||
@ -645,6 +768,17 @@ func (this *PluginsResponse) String() string {
|
|||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
func (this *ServerResponse) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&ServerResponse{`,
|
||||||
|
`UUID:` + fmt.Sprintf("%v", this.UUID) + `,`,
|
||||||
|
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
func valueToStringIntrospection(v interface{}) string {
|
func valueToStringIntrospection(v interface{}) string {
|
||||||
rv := reflect.ValueOf(v)
|
rv := reflect.ValueOf(v)
|
||||||
if rv.IsNil() {
|
if rv.IsNil() {
|
||||||
@ -1206,6 +1340,92 @@ func (m *PluginsResponse) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
func (m *ServerResponse) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowIntrospection
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: ServerResponse: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: ServerResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field UUID", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowIntrospection
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthIntrospection
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthIntrospection
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.UUID = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipIntrospection(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthIntrospection
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthIntrospection
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
func skipIntrospection(dAtA []byte) (n int, err error) {
|
func skipIntrospection(dAtA []byte) (n int, err error) {
|
||||||
l := len(dAtA)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
|
@ -4,6 +4,7 @@ package containerd.services.introspection.v1;
|
|||||||
|
|
||||||
import "github.com/containerd/containerd/api/types/platform.proto";
|
import "github.com/containerd/containerd/api/types/platform.proto";
|
||||||
import "google/rpc/status.proto";
|
import "google/rpc/status.proto";
|
||||||
|
import "google/protobuf/empty.proto";
|
||||||
import weak "gogoproto/gogo.proto";
|
import weak "gogoproto/gogo.proto";
|
||||||
|
|
||||||
option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection";
|
option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection";
|
||||||
@ -14,6 +15,8 @@ service Introspection {
|
|||||||
// Clients can use this to detect features and capabilities when using
|
// Clients can use this to detect features and capabilities when using
|
||||||
// containerd.
|
// containerd.
|
||||||
rpc Plugins(PluginsRequest) returns (PluginsResponse);
|
rpc Plugins(PluginsRequest) returns (PluginsResponse);
|
||||||
|
// Server returns information about the containerd server
|
||||||
|
rpc Server(google.protobuf.Empty) returns (ServerResponse);
|
||||||
}
|
}
|
||||||
|
|
||||||
message Plugin {
|
message Plugin {
|
||||||
@ -79,3 +82,7 @@ message PluginsRequest {
|
|||||||
message PluginsResponse {
|
message PluginsResponse {
|
||||||
repeated Plugin plugins = 1 [(gogoproto.nullable) = false];
|
repeated Plugin plugins = 1 [(gogoproto.nullable) = false];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message ServerResponse {
|
||||||
|
string uuid = 1 [(gogoproto.customname) = "UUID"];
|
||||||
|
}
|
||||||
|
2
vendor/github.com/containerd/containerd/archive/compression/compression.go
generated
vendored
2
vendor/github.com/containerd/containerd/archive/compression/compression.go
generated
vendored
@ -180,7 +180,7 @@ func DecompressStream(archive io.Reader) (DecompressReadCloser, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompressStream compresseses the dest with specified compression algorithm.
|
// CompressStream compresses the dest with specified compression algorithm.
|
||||||
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
|
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
|
||||||
switch compression {
|
switch compression {
|
||||||
case Uncompressed:
|
case Uncompressed:
|
||||||
|
2
vendor/github.com/containerd/containerd/archive/time_unix.go
generated
vendored
2
vendor/github.com/containerd/containerd/archive/time_unix.go
generated
vendored
@ -32,7 +32,7 @@ func chtimes(path string, atime, mtime time.Time) error {
|
|||||||
utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
|
utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
|
||||||
|
|
||||||
if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
||||||
return errors.Wrap(err, "failed call to UtimesNanoAt")
|
return errors.Wrapf(err, "failed call to UtimesNanoAt for %s", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
29
vendor/github.com/containerd/containerd/cio/io.go
generated
vendored
29
vendor/github.com/containerd/containerd/cio/io.go
generated
vendored
@ -18,10 +18,13 @@ package cio
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/containerd/containerd/defaults"
|
"github.com/containerd/containerd/defaults"
|
||||||
@ -242,17 +245,24 @@ func LogURI(uri *url.URL) Creator {
|
|||||||
// BinaryIO forwards container STDOUT|STDERR directly to a logging binary
|
// BinaryIO forwards container STDOUT|STDERR directly to a logging binary
|
||||||
func BinaryIO(binary string, args map[string]string) Creator {
|
func BinaryIO(binary string, args map[string]string) Creator {
|
||||||
return func(_ string) (IO, error) {
|
return func(_ string) (IO, error) {
|
||||||
|
binary = filepath.Clean(binary)
|
||||||
|
if !strings.HasPrefix(binary, "/") {
|
||||||
|
return nil, errors.New("absolute path needed")
|
||||||
|
}
|
||||||
uri := &url.URL{
|
uri := &url.URL{
|
||||||
Scheme: "binary",
|
Scheme: "binary",
|
||||||
Host: binary,
|
Path: binary,
|
||||||
}
|
}
|
||||||
|
q := uri.Query()
|
||||||
for k, v := range args {
|
for k, v := range args {
|
||||||
uri.Query().Set(k, v)
|
q.Set(k, v)
|
||||||
}
|
}
|
||||||
|
uri.RawQuery = q.Encode()
|
||||||
|
res := uri.String()
|
||||||
return &logURI{
|
return &logURI{
|
||||||
config: Config{
|
config: Config{
|
||||||
Stdout: uri.String(),
|
Stdout: res,
|
||||||
Stderr: uri.String(),
|
Stderr: res,
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@ -262,14 +272,19 @@ func BinaryIO(binary string, args map[string]string) Creator {
|
|||||||
// If the log file already exists, the logs will be appended to the file.
|
// If the log file already exists, the logs will be appended to the file.
|
||||||
func LogFile(path string) Creator {
|
func LogFile(path string) Creator {
|
||||||
return func(_ string) (IO, error) {
|
return func(_ string) (IO, error) {
|
||||||
|
path = filepath.Clean(path)
|
||||||
|
if !strings.HasPrefix(path, "/") {
|
||||||
|
return nil, errors.New("absolute path needed")
|
||||||
|
}
|
||||||
uri := &url.URL{
|
uri := &url.URL{
|
||||||
Scheme: "file",
|
Scheme: "file",
|
||||||
Host: path,
|
Path: path,
|
||||||
}
|
}
|
||||||
|
res := uri.String()
|
||||||
return &logURI{
|
return &logURI{
|
||||||
config: Config{
|
config: Config{
|
||||||
Stdout: uri.String(),
|
Stdout: res,
|
||||||
Stderr: uri.String(),
|
Stderr: res,
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
139
vendor/github.com/containerd/containerd/client.go
generated
vendored
139
vendor/github.com/containerd/containerd/client.go
generated
vendored
@ -43,6 +43,7 @@ import (
|
|||||||
"github.com/containerd/containerd/content"
|
"github.com/containerd/containerd/content"
|
||||||
contentproxy "github.com/containerd/containerd/content/proxy"
|
contentproxy "github.com/containerd/containerd/content/proxy"
|
||||||
"github.com/containerd/containerd/defaults"
|
"github.com/containerd/containerd/defaults"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/events"
|
"github.com/containerd/containerd/events"
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
"github.com/containerd/containerd/leases"
|
"github.com/containerd/containerd/leases"
|
||||||
@ -56,6 +57,7 @@ import (
|
|||||||
"github.com/containerd/containerd/snapshots"
|
"github.com/containerd/containerd/snapshots"
|
||||||
snproxy "github.com/containerd/containerd/snapshots/proxy"
|
snproxy "github.com/containerd/containerd/snapshots/proxy"
|
||||||
"github.com/containerd/typeurl"
|
"github.com/containerd/typeurl"
|
||||||
|
"github.com/gogo/protobuf/types"
|
||||||
ptypes "github.com/gogo/protobuf/types"
|
ptypes "github.com/gogo/protobuf/types"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
@ -86,13 +88,17 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
|
|||||||
if copts.timeout == 0 {
|
if copts.timeout == 0 {
|
||||||
copts.timeout = 10 * time.Second
|
copts.timeout = 10 * time.Second
|
||||||
}
|
}
|
||||||
rt := fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS)
|
|
||||||
if copts.defaultRuntime != "" {
|
|
||||||
rt = copts.defaultRuntime
|
|
||||||
}
|
|
||||||
c := &Client{
|
c := &Client{
|
||||||
runtime: rt,
|
defaultns: copts.defaultns,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if copts.defaultRuntime != "" {
|
||||||
|
c.runtime = copts.defaultRuntime
|
||||||
|
} else {
|
||||||
|
c.runtime = defaults.DefaultRuntime
|
||||||
|
}
|
||||||
|
|
||||||
if copts.services != nil {
|
if copts.services != nil {
|
||||||
c.services = *copts.services
|
c.services = *copts.services
|
||||||
}
|
}
|
||||||
@ -134,19 +140,15 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
|
|||||||
c.conn, c.connector = conn, connector
|
c.conn, c.connector = conn, connector
|
||||||
}
|
}
|
||||||
if copts.services == nil && c.conn == nil {
|
if copts.services == nil && c.conn == nil {
|
||||||
return nil, errors.New("no grpc connection or services is available")
|
return nil, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection or services is available")
|
||||||
}
|
}
|
||||||
|
|
||||||
// check namespace labels for default runtime
|
// check namespace labels for default runtime
|
||||||
if copts.defaultRuntime == "" && copts.defaultns != "" {
|
if copts.defaultRuntime == "" && c.defaultns != "" {
|
||||||
namespaces := c.NamespaceService()
|
if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil {
|
||||||
ctx := context.Background()
|
|
||||||
if labels, err := namespaces.Labels(ctx, copts.defaultns); err == nil {
|
|
||||||
if defaultRuntime, ok := labels[defaults.DefaultRuntimeNSLabel]; ok {
|
|
||||||
c.runtime = defaultRuntime
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
|
} else if label != "" {
|
||||||
|
c.runtime = label
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,20 +165,17 @@ func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
c := &Client{
|
c := &Client{
|
||||||
conn: conn,
|
defaultns: copts.defaultns,
|
||||||
runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS),
|
conn: conn,
|
||||||
|
runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS),
|
||||||
}
|
}
|
||||||
|
|
||||||
// check namespace labels for default runtime
|
// check namespace labels for default runtime
|
||||||
if copts.defaultRuntime == "" && copts.defaultns != "" {
|
if copts.defaultRuntime == "" && c.defaultns != "" {
|
||||||
namespaces := c.NamespaceService()
|
if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil {
|
||||||
ctx := context.Background()
|
|
||||||
if labels, err := namespaces.Labels(ctx, copts.defaultns); err == nil {
|
|
||||||
if defaultRuntime, ok := labels[defaults.DefaultRuntimeNSLabel]; ok {
|
|
||||||
c.runtime = defaultRuntime
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
|
} else if label != "" {
|
||||||
|
c.runtime = label
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,13 +192,14 @@ type Client struct {
|
|||||||
connMu sync.Mutex
|
connMu sync.Mutex
|
||||||
conn *grpc.ClientConn
|
conn *grpc.ClientConn
|
||||||
runtime string
|
runtime string
|
||||||
|
defaultns string
|
||||||
connector func() (*grpc.ClientConn, error)
|
connector func() (*grpc.ClientConn, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reconnect re-establishes the GRPC connection to the containerd daemon
|
// Reconnect re-establishes the GRPC connection to the containerd daemon
|
||||||
func (c *Client) Reconnect() error {
|
func (c *Client) Reconnect() error {
|
||||||
if c.connector == nil {
|
if c.connector == nil {
|
||||||
return errors.New("unable to reconnect to containerd, no connector available")
|
return errors.Wrap(errdefs.ErrUnavailable, "unable to reconnect to containerd, no connector available")
|
||||||
}
|
}
|
||||||
c.connMu.Lock()
|
c.connMu.Lock()
|
||||||
defer c.connMu.Unlock()
|
defer c.connMu.Unlock()
|
||||||
@ -222,7 +222,7 @@ func (c *Client) IsServing(ctx context.Context) (bool, error) {
|
|||||||
c.connMu.Lock()
|
c.connMu.Lock()
|
||||||
if c.conn == nil {
|
if c.conn == nil {
|
||||||
c.connMu.Unlock()
|
c.connMu.Unlock()
|
||||||
return false, errors.New("no grpc connection available")
|
return false, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
|
||||||
}
|
}
|
||||||
c.connMu.Unlock()
|
c.connMu.Unlock()
|
||||||
r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.WaitForReady(true))
|
r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.WaitForReady(true))
|
||||||
@ -339,7 +339,6 @@ func defaultRemoteContext() *RemoteContext {
|
|||||||
Resolver: docker.NewResolver(docker.ResolverOptions{
|
Resolver: docker.NewResolver(docker.ResolverOptions{
|
||||||
Client: http.DefaultClient,
|
Client: http.DefaultClient,
|
||||||
}),
|
}),
|
||||||
Snapshotter: DefaultSnapshotter,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -354,7 +353,7 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag
|
|||||||
}
|
}
|
||||||
|
|
||||||
if fetchCtx.Unpack {
|
if fetchCtx.Unpack {
|
||||||
return images.Image{}, errors.New("unpack on fetch not supported, try pull")
|
return images.Image{}, errors.Wrap(errdefs.ErrNotImplemented, "unpack on fetch not supported, try pull")
|
||||||
}
|
}
|
||||||
|
|
||||||
if fetchCtx.PlatformMatcher == nil {
|
if fetchCtx.PlatformMatcher == nil {
|
||||||
@ -407,6 +406,11 @@ func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Annotate ref with digest to push only push tag for single digest
|
||||||
|
if !strings.Contains(ref, "@") {
|
||||||
|
ref = ref + "@" + desc.Digest.String()
|
||||||
|
}
|
||||||
|
|
||||||
pusher, err := pushCtx.Resolver.Pusher(ctx, ref)
|
pusher, err := pushCtx.Resolver.Pusher(ctx, ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -490,6 +494,27 @@ func writeIndex(ctx context.Context, index *ocispec.Index, client *Client, ref s
|
|||||||
return writeContent(ctx, client.ContentStore(), ocispec.MediaTypeImageIndex, ref, bytes.NewReader(data), content.WithLabels(labels))
|
return writeContent(ctx, client.ContentStore(), ocispec.MediaTypeImageIndex, ref, bytes.NewReader(data), content.WithLabels(labels))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetLabel gets a label value from namespace store
|
||||||
|
// If there is no default label, an empty string returned with nil error
|
||||||
|
func (c *Client) GetLabel(ctx context.Context, label string) (string, error) {
|
||||||
|
ns, err := namespaces.NamespaceRequired(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if c.defaultns == "" {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
ns = c.defaultns
|
||||||
|
}
|
||||||
|
|
||||||
|
srv := c.NamespaceService()
|
||||||
|
labels, err := srv.Labels(ctx, ns)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
value := labels[label]
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Subscribe to events that match one or more of the provided filters.
|
// Subscribe to events that match one or more of the provided filters.
|
||||||
//
|
//
|
||||||
// Callers should listen on both the envelope and errs channels. If the errs
|
// Callers should listen on both the envelope and errs channels. If the errs
|
||||||
@ -543,6 +568,10 @@ func (c *Client) ContentStore() content.Store {
|
|||||||
|
|
||||||
// SnapshotService returns the underlying snapshotter for the provided snapshotter name
|
// SnapshotService returns the underlying snapshotter for the provided snapshotter name
|
||||||
func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter {
|
func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter {
|
||||||
|
snapshotterName, err := c.resolveSnapshotterName(context.Background(), snapshotterName)
|
||||||
|
if err != nil {
|
||||||
|
snapshotterName = DefaultSnapshotter
|
||||||
|
}
|
||||||
if c.snapshotters != nil {
|
if c.snapshotters != nil {
|
||||||
return c.snapshotters[snapshotterName]
|
return c.snapshotters[snapshotterName]
|
||||||
}
|
}
|
||||||
@ -642,7 +671,7 @@ func (c *Client) Version(ctx context.Context) (Version, error) {
|
|||||||
c.connMu.Lock()
|
c.connMu.Lock()
|
||||||
if c.conn == nil {
|
if c.conn == nil {
|
||||||
c.connMu.Unlock()
|
c.connMu.Unlock()
|
||||||
return Version{}, errors.New("no grpc connection available")
|
return Version{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
|
||||||
}
|
}
|
||||||
c.connMu.Unlock()
|
c.connMu.Unlock()
|
||||||
response, err := c.VersionService().Version(ctx, &ptypes.Empty{})
|
response, err := c.VersionService().Version(ctx, &ptypes.Empty{})
|
||||||
@ -655,6 +684,58 @@ func (c *Client) Version(ctx context.Context) (Version, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ServerInfo struct {
|
||||||
|
UUID string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) Server(ctx context.Context) (ServerInfo, error) {
|
||||||
|
c.connMu.Lock()
|
||||||
|
if c.conn == nil {
|
||||||
|
c.connMu.Unlock()
|
||||||
|
return ServerInfo{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
|
||||||
|
}
|
||||||
|
c.connMu.Unlock()
|
||||||
|
|
||||||
|
response, err := c.IntrospectionService().Server(ctx, &types.Empty{})
|
||||||
|
if err != nil {
|
||||||
|
return ServerInfo{}, err
|
||||||
|
}
|
||||||
|
return ServerInfo{
|
||||||
|
UUID: response.UUID,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) resolveSnapshotterName(ctx context.Context, name string) (string, error) {
|
||||||
|
if name == "" {
|
||||||
|
label, err := c.GetLabel(ctx, defaults.DefaultSnapshotterNSLabel)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if label != "" {
|
||||||
|
name = label
|
||||||
|
} else {
|
||||||
|
name = DefaultSnapshotter
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return name, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) getSnapshotter(ctx context.Context, name string) (snapshots.Snapshotter, error) {
|
||||||
|
name, err := c.resolveSnapshotterName(ctx, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s := c.SnapshotService(name)
|
||||||
|
if s == nil {
|
||||||
|
return nil, errors.Wrapf(errdefs.ErrNotFound, "snapshotter %s was not found", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
// CheckRuntime returns true if the current runtime matches the expected
|
// CheckRuntime returns true if the current runtime matches the expected
|
||||||
// runtime. Providing various parts of the runtime schema will match those
|
// runtime. Providing various parts of the runtime schema will match those
|
||||||
// parts of the expected runtime
|
// parts of the expected runtime
|
||||||
|
3
vendor/github.com/containerd/containerd/cmd/containerd/command/main.go
generated
vendored
3
vendor/github.com/containerd/containerd/cmd/containerd/command/main.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/log"
|
"github.com/containerd/containerd/log"
|
||||||
"github.com/containerd/containerd/mount"
|
"github.com/containerd/containerd/mount"
|
||||||
"github.com/containerd/containerd/services/server"
|
"github.com/containerd/containerd/services/server"
|
||||||
@ -152,7 +153,7 @@ func App() *cli.App {
|
|||||||
ttrpcAddress = fmt.Sprintf("%s.ttrpc", config.GRPC.Address)
|
ttrpcAddress = fmt.Sprintf("%s.ttrpc", config.GRPC.Address)
|
||||||
)
|
)
|
||||||
if address == "" {
|
if address == "" {
|
||||||
return errors.New("grpc address cannot be empty")
|
return errors.Wrap(errdefs.ErrInvalidArgument, "grpc address cannot be empty")
|
||||||
}
|
}
|
||||||
log.G(ctx).WithFields(logrus.Fields{
|
log.G(ctx).WithFields(logrus.Fields{
|
||||||
"version": version.Version,
|
"version": version.Version,
|
||||||
|
2
vendor/github.com/containerd/containerd/cmd/containerd/command/main_windows.go
generated
vendored
2
vendor/github.com/containerd/containerd/cmd/containerd/command/main_windows.go
generated
vendored
@ -93,7 +93,7 @@ func setupDumpStacks() {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func etwCallback(sourceID *guid.GUID, state etw.ProviderState, level etw.Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr) {
|
func etwCallback(sourceID guid.GUID, state etw.ProviderState, level etw.Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr) {
|
||||||
if state == etw.ProviderStateCaptureState {
|
if state == etw.ProviderStateCaptureState {
|
||||||
dumpStacks(false)
|
dumpStacks(false)
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containerd/containerd/cmd/containerd/command/publish.go
generated
vendored
2
vendor/github.com/containerd/containerd/cmd/containerd/command/publish.go
generated
vendored
@ -51,7 +51,7 @@ var publishCommand = cli.Command{
|
|||||||
ctx := namespaces.WithNamespace(gocontext.Background(), context.String("namespace"))
|
ctx := namespaces.WithNamespace(gocontext.Background(), context.String("namespace"))
|
||||||
topic := context.String("topic")
|
topic := context.String("topic")
|
||||||
if topic == "" {
|
if topic == "" {
|
||||||
return errors.New("topic required to publish event")
|
return errors.Wrap(errdefs.ErrInvalidArgument, "topic required to publish event")
|
||||||
}
|
}
|
||||||
payload, err := getEventPayload(os.Stdin)
|
payload, err := getEventPayload(os.Stdin)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
29
vendor/github.com/containerd/containerd/cmd/containerd/command/service_windows.go
generated
vendored
29
vendor/github.com/containerd/containerd/cmd/containerd/command/service_windows.go
generated
vendored
@ -18,7 +18,6 @@ package command
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
@ -28,7 +27,9 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/services/server"
|
"github.com/containerd/containerd/services/server"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/urfave/cli"
|
"github.com/urfave/cli"
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
@ -44,7 +45,9 @@ var (
|
|||||||
unregisterServiceFlag bool
|
unregisterServiceFlag bool
|
||||||
runServiceFlag bool
|
runServiceFlag bool
|
||||||
|
|
||||||
setStdHandle = windows.NewLazySystemDLL("kernel32.dll").NewProc("SetStdHandle")
|
kernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||||
|
setStdHandle = kernel32.NewProc("SetStdHandle")
|
||||||
|
allocConsole = kernel32.NewProc("AllocConsole")
|
||||||
oldStderr windows.Handle
|
oldStderr windows.Handle
|
||||||
panicFile *os.File
|
panicFile *os.File
|
||||||
|
|
||||||
@ -162,7 +165,7 @@ func (h *etwHook) Fire(e *logrus.Entry) error {
|
|||||||
etype = windows.EVENTLOG_INFORMATION_TYPE
|
etype = windows.EVENTLOG_INFORMATION_TYPE
|
||||||
eid = eventDebug
|
eid = eventDebug
|
||||||
default:
|
default:
|
||||||
return errors.New("unknown level")
|
return errors.Wrap(errdefs.ErrInvalidArgument, "unknown level")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is additional data, include it as a second string.
|
// If there is additional data, include it as a second string.
|
||||||
@ -311,7 +314,7 @@ func registerUnregisterService(root string) (bool, error) {
|
|||||||
|
|
||||||
if unregisterServiceFlag {
|
if unregisterServiceFlag {
|
||||||
if registerServiceFlag {
|
if registerServiceFlag {
|
||||||
return true, errors.New("--register-service and --unregister-service cannot be used together")
|
return true, errors.Wrap(errdefs.ErrInvalidArgument, "--register-service and --unregister-service cannot be used together")
|
||||||
}
|
}
|
||||||
return true, unregisterService()
|
return true, unregisterService()
|
||||||
}
|
}
|
||||||
@ -321,6 +324,23 @@ func registerUnregisterService(root string) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if runServiceFlag {
|
if runServiceFlag {
|
||||||
|
// Allocate a conhost for containerd here. We don't actually use this
|
||||||
|
// at all in containerd, but it will be inherited by any processes
|
||||||
|
// containerd executes, so they won't need to allocate their own
|
||||||
|
// conhosts. This is important for two reasons:
|
||||||
|
// - Creating a conhost slows down process launch.
|
||||||
|
// - We have seen reliability issues when launching many processes.
|
||||||
|
// Sometimes the process invocation will fail due to an error when
|
||||||
|
// creating the conhost.
|
||||||
|
//
|
||||||
|
// This needs to be done before initializing the panic file, as
|
||||||
|
// AllocConsole sets the stdio handles to point to the new conhost,
|
||||||
|
// and we want to make sure stderr goes to the panic file.
|
||||||
|
r, _, err := allocConsole.Call()
|
||||||
|
if r == 0 && err != nil {
|
||||||
|
return true, fmt.Errorf("error allocating conhost: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
if err := initPanicFile(filepath.Join(root, "panic.log")); err != nil {
|
if err := initPanicFile(filepath.Join(root, "panic.log")); err != nil {
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
@ -340,7 +360,6 @@ func registerUnregisterService(root string) (bool, error) {
|
|||||||
|
|
||||||
logrus.AddHook(&etwHook{log})
|
logrus.AddHook(&etwHook{log})
|
||||||
logrus.SetOutput(ioutil.Discard)
|
logrus.SetOutput(ioutil.Discard)
|
||||||
|
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/containerd/containerd/container.go
generated
vendored
6
vendor/github.com/containerd/containerd/container.go
generated
vendored
@ -233,7 +233,11 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N
|
|||||||
}
|
}
|
||||||
|
|
||||||
// get the rootfs from the snapshotter and add it to the request
|
// get the rootfs from the snapshotter and add it to the request
|
||||||
mounts, err := c.client.SnapshotService(r.Snapshotter).Mounts(ctx, r.SnapshotKey)
|
s, err := c.client.getSnapshotter(ctx, r.Snapshotter)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mounts, err := s.Mounts(ctx, r.SnapshotKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
61
vendor/github.com/containerd/containerd/container_opts.go
generated
vendored
61
vendor/github.com/containerd/containerd/container_opts.go
generated
vendored
@ -20,9 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/containerd/containerd/containers"
|
"github.com/containerd/containerd/containers"
|
||||||
"github.com/containerd/containerd/defaults"
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/namespaces"
|
|
||||||
"github.com/containerd/containerd/oci"
|
"github.com/containerd/containerd/oci"
|
||||||
"github.com/containerd/containerd/platforms"
|
"github.com/containerd/containerd/platforms"
|
||||||
"github.com/containerd/containerd/snapshots"
|
"github.com/containerd/containerd/snapshots"
|
||||||
@ -118,9 +116,17 @@ func WithSnapshotter(name string) NewContainerOpts {
|
|||||||
// WithSnapshot uses an existing root filesystem for the container
|
// WithSnapshot uses an existing root filesystem for the container
|
||||||
func WithSnapshot(id string) NewContainerOpts {
|
func WithSnapshot(id string) NewContainerOpts {
|
||||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||||
setSnapshotterIfEmpty(ctx, client, c)
|
|
||||||
// check that the snapshot exists, if not, fail on creation
|
// check that the snapshot exists, if not, fail on creation
|
||||||
if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil {
|
var err error
|
||||||
|
c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s, err := client.getSnapshotter(ctx, c.Snapshotter)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := s.Mounts(ctx, id); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.SnapshotKey = id
|
c.SnapshotKey = id
|
||||||
@ -136,9 +142,17 @@ func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
setSnapshotterIfEmpty(ctx, client, c)
|
|
||||||
parent := identity.ChainID(diffIDs).String()
|
parent := identity.ChainID(diffIDs).String()
|
||||||
if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, parent, opts...); err != nil {
|
c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s, err := client.getSnapshotter(ctx, c.Snapshotter)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := s.Prepare(ctx, id, parent, opts...); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.SnapshotKey = id
|
c.SnapshotKey = id
|
||||||
@ -153,7 +167,13 @@ func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Conta
|
|||||||
if c.Snapshotter == "" {
|
if c.Snapshotter == "" {
|
||||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set to cleanup rootfs snapshot")
|
return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set to cleanup rootfs snapshot")
|
||||||
}
|
}
|
||||||
return client.SnapshotService(c.Snapshotter).Remove(ctx, c.SnapshotKey)
|
s, err := client.getSnapshotter(ctx, c.Snapshotter)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.Remove(ctx, c.SnapshotKey); err != nil && !errdefs.IsNotFound(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -166,9 +186,17 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
setSnapshotterIfEmpty(ctx, client, c)
|
|
||||||
parent := identity.ChainID(diffIDs).String()
|
parent := identity.ChainID(diffIDs).String()
|
||||||
if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, parent, opts...); err != nil {
|
c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s, err := client.getSnapshotter(ctx, c.Snapshotter)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := s.View(ctx, id, parent, opts...); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.SnapshotKey = id
|
c.SnapshotKey = id
|
||||||
@ -177,21 +205,6 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func setSnapshotterIfEmpty(ctx context.Context, client *Client, c *containers.Container) {
|
|
||||||
if c.Snapshotter == "" {
|
|
||||||
defaultSnapshotter := DefaultSnapshotter
|
|
||||||
namespaceService := client.NamespaceService()
|
|
||||||
if ns, err := namespaces.NamespaceRequired(ctx); err == nil {
|
|
||||||
if labels, err := namespaceService.Labels(ctx, ns); err == nil {
|
|
||||||
if snapshotLabel, ok := labels[defaults.DefaultSnapshotterNSLabel]; ok {
|
|
||||||
defaultSnapshotter = snapshotLabel
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.Snapshotter = defaultSnapshotter
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithContainerExtension appends extension data to the container object.
|
// WithContainerExtension appends extension data to the container object.
|
||||||
// Use this to decorate the container object with additional data for the client
|
// Use this to decorate the container object with additional data for the client
|
||||||
// integration.
|
// integration.
|
||||||
|
15
vendor/github.com/containerd/containerd/container_opts_unix.go
generated
vendored
15
vendor/github.com/containerd/containerd/container_opts_unix.go
generated
vendored
@ -50,13 +50,18 @@ func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
setSnapshotterIfEmpty(ctx, client, c)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
snapshotter = client.SnapshotService(c.Snapshotter)
|
parent = identity.ChainID(diffIDs).String()
|
||||||
parent = identity.ChainID(diffIDs).String()
|
usernsID = fmt.Sprintf("%s-%d-%d", parent, uid, gid)
|
||||||
usernsID = fmt.Sprintf("%s-%d-%d", parent, uid, gid)
|
|
||||||
)
|
)
|
||||||
|
c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
snapshotter, err := client.getSnapshotter(ctx, c.Snapshotter)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if _, err := snapshotter.Stat(ctx, usernsID); err == nil {
|
if _, err := snapshotter.Stat(ctx, usernsID); err == nil {
|
||||||
if _, err := snapshotter.Prepare(ctx, id, usernsID); err == nil {
|
if _, err := snapshotter.Prepare(ctx, id, usernsID); err == nil {
|
||||||
c.SnapshotKey = id
|
c.SnapshotKey = id
|
||||||
|
22
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
22
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
@ -169,6 +169,28 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CopyReader copies to a writer from a given reader, returning
|
||||||
|
// the number of bytes copied.
|
||||||
|
// Note: if the writer has a non-zero offset, the total number
|
||||||
|
// of bytes read may be greater than those copied if the reader
|
||||||
|
// is not an io.Seeker.
|
||||||
|
// This copy does not commit the writer.
|
||||||
|
func CopyReader(cw Writer, r io.Reader) (int64, error) {
|
||||||
|
ws, err := cw.Status()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "failed to get status")
|
||||||
|
}
|
||||||
|
|
||||||
|
if ws.Offset > 0 {
|
||||||
|
r, err = seekReader(r, ws.Offset, 0)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return copyWithBuffer(cw, r)
|
||||||
|
}
|
||||||
|
|
||||||
// seekReader attempts to seek the reader to the given offset, either by
|
// seekReader attempts to seek the reader to the given offset, either by
|
||||||
// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding
|
// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding
|
||||||
// up to the given offset.
|
// up to the given offset.
|
||||||
|
2
vendor/github.com/containerd/containerd/defaults/defaults_unix.go
generated
vendored
2
vendor/github.com/containerd/containerd/defaults/defaults_unix.go
generated
vendored
@ -32,4 +32,6 @@ const (
|
|||||||
// DefaultFIFODir is the default location used by client-side cio library
|
// DefaultFIFODir is the default location used by client-side cio library
|
||||||
// to store FIFOs.
|
// to store FIFOs.
|
||||||
DefaultFIFODir = "/run/containerd/fifo"
|
DefaultFIFODir = "/run/containerd/fifo"
|
||||||
|
// DefaultRuntime is the default linux runtime
|
||||||
|
DefaultRuntime = "io.containerd.runc.v2"
|
||||||
)
|
)
|
||||||
|
2
vendor/github.com/containerd/containerd/defaults/defaults_windows.go
generated
vendored
2
vendor/github.com/containerd/containerd/defaults/defaults_windows.go
generated
vendored
@ -40,4 +40,6 @@ const (
|
|||||||
// DefaultFIFODir is the default location used by client-side cio library
|
// DefaultFIFODir is the default location used by client-side cio library
|
||||||
// to store FIFOs. Unused on Windows.
|
// to store FIFOs. Unused on Windows.
|
||||||
DefaultFIFODir = ""
|
DefaultFIFODir = ""
|
||||||
|
// DefaultRuntime is the default windows runtime
|
||||||
|
DefaultRuntime = "io.containerd.runhcs.v1"
|
||||||
)
|
)
|
||||||
|
7
vendor/github.com/containerd/containerd/diff.go
generated
vendored
7
vendor/github.com/containerd/containerd/diff.go
generated
vendored
@ -48,13 +48,14 @@ type diffRemote struct {
|
|||||||
func (r *diffRemote) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (ocispec.Descriptor, error) {
|
func (r *diffRemote) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (ocispec.Descriptor, error) {
|
||||||
var config diff.ApplyConfig
|
var config diff.ApplyConfig
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
if err := opt(&config); err != nil {
|
if err := opt(ctx, desc, &config); err != nil {
|
||||||
return ocispec.Descriptor{}, err
|
return ocispec.Descriptor{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
req := &diffapi.ApplyRequest{
|
req := &diffapi.ApplyRequest{
|
||||||
Diff: fromDescriptor(desc),
|
Diff: fromDescriptor(desc),
|
||||||
Mounts: fromMounts(mounts),
|
Mounts: fromMounts(mounts),
|
||||||
|
Payloads: config.ProcessorPayloads,
|
||||||
}
|
}
|
||||||
resp, err := r.client.Apply(ctx, req)
|
resp, err := r.client.Apply(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
86
vendor/github.com/containerd/containerd/diff/apply/apply.go
generated
vendored
86
vendor/github.com/containerd/containerd/diff/apply/apply.go
generated
vendored
@ -23,11 +23,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/archive"
|
"github.com/containerd/containerd/archive"
|
||||||
"github.com/containerd/containerd/archive/compression"
|
|
||||||
"github.com/containerd/containerd/content"
|
"github.com/containerd/containerd/content"
|
||||||
"github.com/containerd/containerd/diff"
|
"github.com/containerd/containerd/diff"
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
"github.com/containerd/containerd/images"
|
|
||||||
"github.com/containerd/containerd/log"
|
"github.com/containerd/containerd/log"
|
||||||
"github.com/containerd/containerd/mount"
|
"github.com/containerd/containerd/mount"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
@ -66,54 +63,63 @@ func (s *fsApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts [
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
isCompressed, err := images.IsCompressedDiff(ctx, desc.MediaType)
|
var config diff.ApplyConfig
|
||||||
if err != nil {
|
for _, o := range opts {
|
||||||
return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", desc.MediaType)
|
if err := o(ctx, desc, &config); err != nil {
|
||||||
|
return emptyDesc, errors.Wrap(err, "failed to apply config opt")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var ocidesc ocispec.Descriptor
|
ra, err := s.store.ReaderAt(ctx, desc)
|
||||||
|
if err != nil {
|
||||||
|
return emptyDesc, errors.Wrap(err, "failed to get reader from content store")
|
||||||
|
}
|
||||||
|
defer ra.Close()
|
||||||
|
|
||||||
|
var processors []diff.StreamProcessor
|
||||||
|
processor := diff.NewProcessorChain(desc.MediaType, content.NewReader(ra))
|
||||||
|
processors = append(processors, processor)
|
||||||
|
for {
|
||||||
|
if processor, err = diff.GetProcessor(ctx, processor, config.ProcessorPayloads); err != nil {
|
||||||
|
return emptyDesc, errors.Wrapf(err, "failed to get stream processor for %s", desc.MediaType)
|
||||||
|
}
|
||||||
|
processors = append(processors, processor)
|
||||||
|
if processor.MediaType() == ocispec.MediaTypeImageLayer {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
defer processor.Close()
|
||||||
|
|
||||||
|
digester := digest.Canonical.Digester()
|
||||||
|
rc := &readCounter{
|
||||||
|
r: io.TeeReader(processor, digester.Hash()),
|
||||||
|
}
|
||||||
if err := mount.WithTempMount(ctx, mounts, func(root string) error {
|
if err := mount.WithTempMount(ctx, mounts, func(root string) error {
|
||||||
ra, err := s.store.ReaderAt(ctx, desc)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to get reader from content store")
|
|
||||||
}
|
|
||||||
defer ra.Close()
|
|
||||||
|
|
||||||
r := content.NewReader(ra)
|
|
||||||
if isCompressed {
|
|
||||||
ds, err := compression.DecompressStream(r)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer ds.Close()
|
|
||||||
r = ds
|
|
||||||
}
|
|
||||||
|
|
||||||
digester := digest.Canonical.Digester()
|
|
||||||
rc := &readCounter{
|
|
||||||
r: io.TeeReader(r, digester.Hash()),
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := archive.Apply(ctx, root, rc); err != nil {
|
if _, err := archive.Apply(ctx, root, rc); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read any trailing data
|
// Read any trailing data
|
||||||
if _, err := io.Copy(ioutil.Discard, rc); err != nil {
|
_, err := io.Copy(ioutil.Discard, rc)
|
||||||
return err
|
return err
|
||||||
}
|
|
||||||
|
|
||||||
ocidesc = ocispec.Descriptor{
|
|
||||||
MediaType: ocispec.MediaTypeImageLayer,
|
|
||||||
Size: rc.c,
|
|
||||||
Digest: digester.Digest(),
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
|
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return emptyDesc, err
|
return emptyDesc, err
|
||||||
}
|
}
|
||||||
return ocidesc, nil
|
|
||||||
|
for _, p := range processors {
|
||||||
|
if ep, ok := p.(interface {
|
||||||
|
Err() error
|
||||||
|
}); ok {
|
||||||
|
if err := ep.Err(); err != nil {
|
||||||
|
return emptyDesc, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ocispec.Descriptor{
|
||||||
|
MediaType: ocispec.MediaTypeImageLayer,
|
||||||
|
Size: rc.c,
|
||||||
|
Digest: digester.Digest(),
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type readCounter struct {
|
type readCounter struct {
|
||||||
|
13
vendor/github.com/containerd/containerd/diff/diff.go
generated
vendored
13
vendor/github.com/containerd/containerd/diff/diff.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/containerd/containerd/mount"
|
"github.com/containerd/containerd/mount"
|
||||||
|
"github.com/gogo/protobuf/types"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -53,10 +54,12 @@ type Comparer interface {
|
|||||||
|
|
||||||
// ApplyConfig is used to hold parameters needed for a apply operation
|
// ApplyConfig is used to hold parameters needed for a apply operation
|
||||||
type ApplyConfig struct {
|
type ApplyConfig struct {
|
||||||
|
// ProcessorPayloads specifies the payload sent to various processors
|
||||||
|
ProcessorPayloads map[string]*types.Any
|
||||||
}
|
}
|
||||||
|
|
||||||
// ApplyOpt is used to configure an Apply operation
|
// ApplyOpt is used to configure an Apply operation
|
||||||
type ApplyOpt func(*ApplyConfig) error
|
type ApplyOpt func(context.Context, ocispec.Descriptor, *ApplyConfig) error
|
||||||
|
|
||||||
// Applier allows applying diffs between mounts
|
// Applier allows applying diffs between mounts
|
||||||
type Applier interface {
|
type Applier interface {
|
||||||
@ -94,3 +97,11 @@ func WithLabels(labels map[string]string) Opt {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithPayloads sets the apply processor payloads to the config
|
||||||
|
func WithPayloads(payloads map[string]*types.Any) ApplyOpt {
|
||||||
|
return func(_ context.Context, _ ocispec.Descriptor, c *ApplyConfig) error {
|
||||||
|
c.ProcessorPayloads = payloads
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
187
vendor/github.com/containerd/containerd/diff/stream.go
generated
vendored
Normal file
187
vendor/github.com/containerd/containerd/diff/stream.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package diff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/archive/compression"
|
||||||
|
"github.com/containerd/containerd/images"
|
||||||
|
"github.com/gogo/protobuf/types"
|
||||||
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
handlers []Handler
|
||||||
|
|
||||||
|
// ErrNoProcessor is returned when no stream processor is available for a media-type
|
||||||
|
ErrNoProcessor = errors.New("no processor for media-type")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// register the default compression handler
|
||||||
|
RegisterProcessor(compressedHandler)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterProcessor registers a stream processor for media-types
|
||||||
|
func RegisterProcessor(handler Handler) {
|
||||||
|
handlers = append(handlers, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetProcessor returns the processor for a media-type
|
||||||
|
func GetProcessor(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
|
||||||
|
// reverse this list so that user configured handlers come up first
|
||||||
|
for i := len(handlers) - 1; i >= 0; i-- {
|
||||||
|
processor, ok := handlers[i](ctx, stream.MediaType())
|
||||||
|
if ok {
|
||||||
|
return processor(ctx, stream, payloads)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, ErrNoProcessor
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler checks a media-type and initializes the processor
|
||||||
|
type Handler func(ctx context.Context, mediaType string) (StreamProcessorInit, bool)
|
||||||
|
|
||||||
|
// StaticHandler returns the processor init func for a static media-type
|
||||||
|
func StaticHandler(expectedMediaType string, fn StreamProcessorInit) Handler {
|
||||||
|
return func(ctx context.Context, mediaType string) (StreamProcessorInit, bool) {
|
||||||
|
if mediaType == expectedMediaType {
|
||||||
|
return fn, true
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StreamProcessorInit returns the initialized stream processor
|
||||||
|
type StreamProcessorInit func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error)
|
||||||
|
|
||||||
|
// RawProcessor provides access to direct fd for processing
|
||||||
|
type RawProcessor interface {
|
||||||
|
// File returns the fd for the read stream of the underlying processor
|
||||||
|
File() *os.File
|
||||||
|
}
|
||||||
|
|
||||||
|
// StreamProcessor handles processing a content stream and transforming it into a different media-type
|
||||||
|
type StreamProcessor interface {
|
||||||
|
io.ReadCloser
|
||||||
|
|
||||||
|
// MediaType is the resulting media-type that the processor processes the stream into
|
||||||
|
MediaType() string
|
||||||
|
}
|
||||||
|
|
||||||
|
func compressedHandler(ctx context.Context, mediaType string) (StreamProcessorInit, bool) {
|
||||||
|
compressed, err := images.IsCompressedDiff(ctx, mediaType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
if compressed {
|
||||||
|
return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
|
||||||
|
ds, err := compression.DecompressStream(stream)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &compressedProcessor{
|
||||||
|
rc: ds,
|
||||||
|
}, nil
|
||||||
|
}, true
|
||||||
|
}
|
||||||
|
return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
|
||||||
|
return &stdProcessor{
|
||||||
|
rc: stream,
|
||||||
|
}, nil
|
||||||
|
}, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProcessorChain initialized the root StreamProcessor
|
||||||
|
func NewProcessorChain(mt string, r io.Reader) StreamProcessor {
|
||||||
|
return &processorChain{
|
||||||
|
mt: mt,
|
||||||
|
rc: r,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type processorChain struct {
|
||||||
|
mt string
|
||||||
|
rc io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *processorChain) MediaType() string {
|
||||||
|
return c.mt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *processorChain) Read(p []byte) (int, error) {
|
||||||
|
return c.rc.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *processorChain) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type stdProcessor struct {
|
||||||
|
rc StreamProcessor
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *stdProcessor) MediaType() string {
|
||||||
|
return ocispec.MediaTypeImageLayer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *stdProcessor) Read(p []byte) (int, error) {
|
||||||
|
return c.rc.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *stdProcessor) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type compressedProcessor struct {
|
||||||
|
rc io.ReadCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *compressedProcessor) MediaType() string {
|
||||||
|
return ocispec.MediaTypeImageLayer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *compressedProcessor) Read(p []byte) (int, error) {
|
||||||
|
return c.rc.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *compressedProcessor) Close() error {
|
||||||
|
return c.rc.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func BinaryHandler(id, returnsMediaType string, mediaTypes []string, path string, args []string) Handler {
|
||||||
|
set := make(map[string]struct{}, len(mediaTypes))
|
||||||
|
for _, m := range mediaTypes {
|
||||||
|
set[m] = struct{}{}
|
||||||
|
}
|
||||||
|
return func(_ context.Context, mediaType string) (StreamProcessorInit, bool) {
|
||||||
|
if _, ok := set[mediaType]; ok {
|
||||||
|
return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
|
||||||
|
payload := payloads[id]
|
||||||
|
return NewBinaryProcessor(ctx, mediaType, returnsMediaType, stream, path, args, payload)
|
||||||
|
}, true
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const mediaTypeEnvVar = "STREAM_PROCESSOR_MEDIATYPE"
|
146
vendor/github.com/containerd/containerd/diff/stream_unix.go
generated
vendored
Normal file
146
vendor/github.com/containerd/containerd/diff/stream_unix.go
generated
vendored
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
// +build !windows
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package diff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
"github.com/gogo/protobuf/types"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewBinaryProcessor returns a binary processor for use with processing content streams
|
||||||
|
func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) {
|
||||||
|
cmd := exec.CommandContext(ctx, name, args...)
|
||||||
|
cmd.Env = os.Environ()
|
||||||
|
|
||||||
|
var payloadC io.Closer
|
||||||
|
if payload != nil {
|
||||||
|
data, err := proto.Marshal(payload)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r, w, err := os.Pipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
io.Copy(w, bytes.NewReader(data))
|
||||||
|
w.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
cmd.ExtraFiles = append(cmd.ExtraFiles, r)
|
||||||
|
payloadC = r
|
||||||
|
}
|
||||||
|
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt))
|
||||||
|
var (
|
||||||
|
stdin io.Reader
|
||||||
|
closer func() error
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if f, ok := stream.(RawProcessor); ok {
|
||||||
|
stdin = f.File()
|
||||||
|
closer = f.File().Close
|
||||||
|
} else {
|
||||||
|
stdin = stream
|
||||||
|
}
|
||||||
|
cmd.Stdin = stdin
|
||||||
|
r, w, err := os.Pipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cmd.Stdout = w
|
||||||
|
|
||||||
|
stderr := bytes.NewBuffer(nil)
|
||||||
|
cmd.Stderr = stderr
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
p := &binaryProcessor{
|
||||||
|
cmd: cmd,
|
||||||
|
r: r,
|
||||||
|
mt: rmt,
|
||||||
|
stderr: stderr,
|
||||||
|
}
|
||||||
|
go p.wait()
|
||||||
|
|
||||||
|
// close after start and dup
|
||||||
|
w.Close()
|
||||||
|
if closer != nil {
|
||||||
|
closer()
|
||||||
|
}
|
||||||
|
if payloadC != nil {
|
||||||
|
payloadC.Close()
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type binaryProcessor struct {
|
||||||
|
cmd *exec.Cmd
|
||||||
|
r *os.File
|
||||||
|
mt string
|
||||||
|
stderr *bytes.Buffer
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *binaryProcessor) Err() error {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
return c.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *binaryProcessor) wait() {
|
||||||
|
if err := c.cmd.Wait(); err != nil {
|
||||||
|
if _, ok := err.(*exec.ExitError); ok {
|
||||||
|
c.mu.Lock()
|
||||||
|
c.err = errors.New(c.stderr.String())
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *binaryProcessor) File() *os.File {
|
||||||
|
return c.r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *binaryProcessor) MediaType() string {
|
||||||
|
return c.mt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *binaryProcessor) Read(p []byte) (int, error) {
|
||||||
|
return c.r.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *binaryProcessor) Close() error {
|
||||||
|
err := c.r.Close()
|
||||||
|
if kerr := c.cmd.Process.Kill(); err == nil {
|
||||||
|
err = kerr
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
165
vendor/github.com/containerd/containerd/diff/stream_windows.go
generated
vendored
Normal file
165
vendor/github.com/containerd/containerd/diff/stream_windows.go
generated
vendored
Normal file
@ -0,0 +1,165 @@
|
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package diff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
winio "github.com/Microsoft/go-winio"
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
"github.com/gogo/protobuf/types"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const processorPipe = "STREAM_PROCESSOR_PIPE"
|
||||||
|
|
||||||
|
// NewBinaryProcessor returns a binary processor for use with processing content streams
|
||||||
|
func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) {
|
||||||
|
cmd := exec.CommandContext(ctx, name, args...)
|
||||||
|
cmd.Env = os.Environ()
|
||||||
|
|
||||||
|
if payload != nil {
|
||||||
|
data, err := proto.Marshal(payload)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
up, err := getUiqPath()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
path := fmt.Sprintf("\\\\.\\pipe\\containerd-processor-%s-pipe", up)
|
||||||
|
l, err := winio.ListenPipe(path, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
defer l.Close()
|
||||||
|
conn, err := l.Accept()
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithError(err).Error("accept npipe connection")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
io.Copy(conn, bytes.NewReader(data))
|
||||||
|
conn.Close()
|
||||||
|
}()
|
||||||
|
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", processorPipe, path))
|
||||||
|
}
|
||||||
|
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt))
|
||||||
|
var (
|
||||||
|
stdin io.Reader
|
||||||
|
closer func() error
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if f, ok := stream.(RawProcessor); ok {
|
||||||
|
stdin = f.File()
|
||||||
|
closer = f.File().Close
|
||||||
|
} else {
|
||||||
|
stdin = stream
|
||||||
|
}
|
||||||
|
cmd.Stdin = stdin
|
||||||
|
r, w, err := os.Pipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cmd.Stdout = w
|
||||||
|
stderr := bytes.NewBuffer(nil)
|
||||||
|
cmd.Stderr = stderr
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
p := &binaryProcessor{
|
||||||
|
cmd: cmd,
|
||||||
|
r: r,
|
||||||
|
mt: rmt,
|
||||||
|
stderr: stderr,
|
||||||
|
}
|
||||||
|
go p.wait()
|
||||||
|
|
||||||
|
// close after start and dup
|
||||||
|
w.Close()
|
||||||
|
if closer != nil {
|
||||||
|
closer()
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type binaryProcessor struct {
|
||||||
|
cmd *exec.Cmd
|
||||||
|
r *os.File
|
||||||
|
mt string
|
||||||
|
stderr *bytes.Buffer
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *binaryProcessor) Err() error {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
return c.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *binaryProcessor) wait() {
|
||||||
|
if err := c.cmd.Wait(); err != nil {
|
||||||
|
if _, ok := err.(*exec.ExitError); ok {
|
||||||
|
c.mu.Lock()
|
||||||
|
c.err = errors.New(c.stderr.String())
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *binaryProcessor) File() *os.File {
|
||||||
|
return c.r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *binaryProcessor) MediaType() string {
|
||||||
|
return c.mt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *binaryProcessor) Read(p []byte) (int, error) {
|
||||||
|
return c.r.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *binaryProcessor) Close() error {
|
||||||
|
err := c.r.Close()
|
||||||
|
if kerr := c.cmd.Process.Kill(); err == nil {
|
||||||
|
err = kerr
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func getUiqPath() (string, error) {
|
||||||
|
dir, err := ioutil.TempDir("", "")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
os.Remove(dir)
|
||||||
|
return filepath.Base(dir), nil
|
||||||
|
}
|
4
vendor/github.com/containerd/containerd/diff/walking/differ.go
generated
vendored
4
vendor/github.com/containerd/containerd/diff/walking/differ.go
generated
vendored
@ -150,7 +150,9 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to get info from content store")
|
return errors.Wrap(err, "failed to get info from content store")
|
||||||
}
|
}
|
||||||
|
if info.Labels == nil {
|
||||||
|
info.Labels = make(map[string]string)
|
||||||
|
}
|
||||||
// Set uncompressed label if digest already existed without label
|
// Set uncompressed label if digest already existed without label
|
||||||
if _, ok := info.Labels[uncompressed]; !ok {
|
if _, ok := info.Labels[uncompressed]; !ok {
|
||||||
info.Labels[uncompressed] = config.Labels[uncompressed]
|
info.Labels[uncompressed] = config.Labels[uncompressed]
|
||||||
|
7
vendor/github.com/containerd/containerd/gc/gc.go
generated
vendored
7
vendor/github.com/containerd/containerd/gc/gc.go
generated
vendored
@ -30,6 +30,11 @@ import (
|
|||||||
// ResourceType represents type of resource at a node
|
// ResourceType represents type of resource at a node
|
||||||
type ResourceType uint8
|
type ResourceType uint8
|
||||||
|
|
||||||
|
// ResourceMax represents the max resource.
|
||||||
|
// Upper bits are stripped out during the mark phase, allowing the upper 3 bits
|
||||||
|
// to be used by the caller reference function.
|
||||||
|
const ResourceMax = ResourceType(0x1F)
|
||||||
|
|
||||||
// Node presents a resource which has a type and key,
|
// Node presents a resource which has a type and key,
|
||||||
// this node can be used to lookup other nodes.
|
// this node can be used to lookup other nodes.
|
||||||
type Node struct {
|
type Node struct {
|
||||||
@ -80,6 +85,8 @@ func Tricolor(roots []Node, refs func(ref Node) ([]Node, error)) (map[Node]struc
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// strip bits above max resource type
|
||||||
|
id.Type = id.Type & ResourceMax
|
||||||
// mark as black when done
|
// mark as black when done
|
||||||
reachable[id] = struct{}{}
|
reachable[id] = struct{}{}
|
||||||
}
|
}
|
||||||
|
42
vendor/github.com/containerd/containerd/image.go
generated
vendored
42
vendor/github.com/containerd/containerd/image.go
generated
vendored
@ -21,11 +21,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/containerd/containerd/content"
|
"github.com/containerd/containerd/content"
|
||||||
|
"github.com/containerd/containerd/diff"
|
||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
"github.com/containerd/containerd/platforms"
|
"github.com/containerd/containerd/platforms"
|
||||||
"github.com/containerd/containerd/rootfs"
|
"github.com/containerd/containerd/rootfs"
|
||||||
digest "github.com/opencontainers/go-digest"
|
"github.com/containerd/containerd/snapshots"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/image-spec/identity"
|
"github.com/opencontainers/image-spec/identity"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -40,7 +42,7 @@ type Image interface {
|
|||||||
// Labels of the image
|
// Labels of the image
|
||||||
Labels() map[string]string
|
Labels() map[string]string
|
||||||
// Unpack unpacks the image's content into a snapshot
|
// Unpack unpacks the image's content into a snapshot
|
||||||
Unpack(context.Context, string) error
|
Unpack(context.Context, string, ...UnpackOpt) error
|
||||||
// RootFS returns the unpacked diffids that make up images rootfs.
|
// RootFS returns the unpacked diffids that make up images rootfs.
|
||||||
RootFS(ctx context.Context) ([]digest.Digest, error)
|
RootFS(ctx context.Context) ([]digest.Digest, error)
|
||||||
// Size returns the total size of the image's packed resources.
|
// Size returns the total size of the image's packed resources.
|
||||||
@ -108,7 +110,10 @@ func (i *image) Config(ctx context.Context) (ocispec.Descriptor, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, error) {
|
func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, error) {
|
||||||
sn := i.client.SnapshotService(snapshotterName)
|
sn, err := i.client.getSnapshotter(ctx, snapshotterName)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
cs := i.client.ContentStore()
|
cs := i.client.ContentStore()
|
||||||
|
|
||||||
diffs, err := i.i.RootFS(ctx, cs, i.platform)
|
diffs, err := i.i.RootFS(ctx, cs, i.platform)
|
||||||
@ -127,28 +132,53 @@ func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, e
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *image) Unpack(ctx context.Context, snapshotterName string) error {
|
// UnpackConfig provides configuration for the unpack of an image
|
||||||
|
type UnpackConfig struct {
|
||||||
|
// ApplyOpts for applying a diff to a snapshotter
|
||||||
|
ApplyOpts []diff.ApplyOpt
|
||||||
|
// SnapshotOpts for configuring a snapshotter
|
||||||
|
SnapshotOpts []snapshots.Opt
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnpackOpt provides configuration for unpack
|
||||||
|
type UnpackOpt func(context.Context, *UnpackConfig) error
|
||||||
|
|
||||||
|
func (i *image) Unpack(ctx context.Context, snapshotterName string, opts ...UnpackOpt) error {
|
||||||
ctx, done, err := i.client.WithLease(ctx)
|
ctx, done, err := i.client.WithLease(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer done(ctx)
|
defer done(ctx)
|
||||||
|
|
||||||
|
var config UnpackConfig
|
||||||
|
for _, o := range opts {
|
||||||
|
if err := o(ctx, &config); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
layers, err := i.getLayers(ctx, i.platform)
|
layers, err := i.getLayers(ctx, i.platform)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
sn = i.client.SnapshotService(snapshotterName)
|
|
||||||
a = i.client.DiffService()
|
a = i.client.DiffService()
|
||||||
cs = i.client.ContentStore()
|
cs = i.client.ContentStore()
|
||||||
|
|
||||||
chain []digest.Digest
|
chain []digest.Digest
|
||||||
unpacked bool
|
unpacked bool
|
||||||
)
|
)
|
||||||
|
snapshotterName, err = i.client.resolveSnapshotterName(ctx, snapshotterName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sn, err := i.client.getSnapshotter(ctx, snapshotterName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
for _, layer := range layers {
|
for _, layer := range layers {
|
||||||
unpacked, err = rootfs.ApplyLayer(ctx, layer, chain, sn, a)
|
unpacked, err = rootfs.ApplyLayerWithOpts(ctx, layer, chain, sn, a, config.SnapshotOpts, config.ApplyOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
32
vendor/github.com/containerd/containerd/images/archive/exporter.go
generated
vendored
32
vendor/github.com/containerd/containerd/images/archive/exporter.go
generated
vendored
@ -89,31 +89,29 @@ func WithImage(is images.Store, name string) ExportOpt {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithManifest adds a manifest to the exported archive.
|
// WithManifest adds a manifest to the exported archive.
|
||||||
// It is up to caller to put name annotation to on the manifest
|
// When names are given they will be set on the manifest in the
|
||||||
// descriptor if needed.
|
// exported archive, creating an index record for each name.
|
||||||
func WithManifest(manifest ocispec.Descriptor) ExportOpt {
|
// When no names are provided, it is up to caller to put name annotation to
|
||||||
|
// on the manifest descriptor if needed.
|
||||||
|
func WithManifest(manifest ocispec.Descriptor, names ...string) ExportOpt {
|
||||||
return func(ctx context.Context, o *exportOptions) error {
|
return func(ctx context.Context, o *exportOptions) error {
|
||||||
o.manifests = append(o.manifests, manifest)
|
if len(names) == 0 {
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithNamedManifest adds a manifest to the exported archive
|
|
||||||
// with the provided names.
|
|
||||||
func WithNamedManifest(manifest ocispec.Descriptor, names ...string) ExportOpt {
|
|
||||||
return func(ctx context.Context, o *exportOptions) error {
|
|
||||||
for _, name := range names {
|
|
||||||
manifest.Annotations = addNameAnnotation(name, manifest.Annotations)
|
|
||||||
o.manifests = append(o.manifests, manifest)
|
o.manifests = append(o.manifests, manifest)
|
||||||
}
|
}
|
||||||
|
for _, name := range names {
|
||||||
|
mc := manifest
|
||||||
|
mc.Annotations = addNameAnnotation(name, manifest.Annotations)
|
||||||
|
o.manifests = append(o.manifests, mc)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func addNameAnnotation(name string, annotations map[string]string) map[string]string {
|
func addNameAnnotation(name string, base map[string]string) map[string]string {
|
||||||
if annotations == nil {
|
annotations := map[string]string{}
|
||||||
annotations = map[string]string{}
|
for k, v := range base {
|
||||||
|
annotations[k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
annotations[images.AnnotationImageName] = name
|
annotations[images.AnnotationImageName] = name
|
||||||
|
149
vendor/github.com/containerd/containerd/images/archive/importer.go
generated
vendored
149
vendor/github.com/containerd/containerd/images/archive/importer.go
generated
vendored
@ -22,12 +22,14 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
"github.com/containerd/containerd/archive/compression"
|
"github.com/containerd/containerd/archive/compression"
|
||||||
"github.com/containerd/containerd/content"
|
"github.com/containerd/containerd/content"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
"github.com/containerd/containerd/log"
|
"github.com/containerd/containerd/log"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
@ -36,6 +38,22 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type importOpts struct {
|
||||||
|
compress bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImportOpt is an option for importing an OCI index
|
||||||
|
type ImportOpt func(*importOpts) error
|
||||||
|
|
||||||
|
// WithImportCompression compresses uncompressed layers on import.
|
||||||
|
// This is used for import formats which do not include the manifest.
|
||||||
|
func WithImportCompression() ImportOpt {
|
||||||
|
return func(io *importOpts) error {
|
||||||
|
io.compress = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ImportIndex imports an index from a tar archive image bundle
|
// ImportIndex imports an index from a tar archive image bundle
|
||||||
// - implements Docker v1.1, v1.2 and OCI v1.
|
// - implements Docker v1.1, v1.2 and OCI v1.
|
||||||
// - prefers OCI v1 when provided
|
// - prefers OCI v1 when provided
|
||||||
@ -43,8 +61,7 @@ import (
|
|||||||
// - normalizes Docker references and adds as OCI ref name
|
// - normalizes Docker references and adds as OCI ref name
|
||||||
// e.g. alpine:latest -> docker.io/library/alpine:latest
|
// e.g. alpine:latest -> docker.io/library/alpine:latest
|
||||||
// - existing OCI reference names are untouched
|
// - existing OCI reference names are untouched
|
||||||
// - TODO: support option to compress layers on ingest
|
func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opts ...ImportOpt) (ocispec.Descriptor, error) {
|
||||||
func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) {
|
|
||||||
var (
|
var (
|
||||||
tr = tar.NewReader(reader)
|
tr = tar.NewReader(reader)
|
||||||
|
|
||||||
@ -56,7 +73,15 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc
|
|||||||
}
|
}
|
||||||
symlinks = make(map[string]string)
|
symlinks = make(map[string]string)
|
||||||
blobs = make(map[string]ocispec.Descriptor)
|
blobs = make(map[string]ocispec.Descriptor)
|
||||||
|
iopts importOpts
|
||||||
)
|
)
|
||||||
|
|
||||||
|
for _, o := range opts {
|
||||||
|
if err := o(&iopts); err != nil {
|
||||||
|
return ocispec.Descriptor{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
hdr, err := tr.Next()
|
hdr, err := tr.Next()
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
@ -137,19 +162,23 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc
|
|||||||
if !ok {
|
if !ok {
|
||||||
return ocispec.Descriptor{}, errors.Errorf("image config %q not found", mfst.Config)
|
return ocispec.Descriptor{}, errors.Errorf("image config %q not found", mfst.Config)
|
||||||
}
|
}
|
||||||
config.MediaType = ocispec.MediaTypeImageConfig
|
config.MediaType = images.MediaTypeDockerSchema2Config
|
||||||
|
|
||||||
layers, err := resolveLayers(ctx, store, mfst.Layers, blobs)
|
layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers")
|
return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers")
|
||||||
}
|
}
|
||||||
|
|
||||||
manifest := ocispec.Manifest{
|
manifest := struct {
|
||||||
Versioned: specs.Versioned{
|
SchemaVersion int `json:"schemaVersion"`
|
||||||
SchemaVersion: 2,
|
MediaType string `json:"mediaType"`
|
||||||
},
|
Config ocispec.Descriptor `json:"config"`
|
||||||
Config: config,
|
Layers []ocispec.Descriptor `json:"layers"`
|
||||||
Layers: layers,
|
}{
|
||||||
|
SchemaVersion: 2,
|
||||||
|
MediaType: images.MediaTypeDockerSchema2Manifest,
|
||||||
|
Config: config,
|
||||||
|
Layers: layers,
|
||||||
}
|
}
|
||||||
|
|
||||||
desc, err := writeManifest(ctx, store, manifest, ocispec.MediaTypeImageManifest)
|
desc, err := writeManifest(ctx, store, manifest, ocispec.MediaTypeImageManifest)
|
||||||
@ -211,36 +240,118 @@ func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size
|
|||||||
return dgstr.Digest(), nil
|
return dgstr.Digest(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor, compress bool) ([]ocispec.Descriptor, error) {
|
||||||
var layers []ocispec.Descriptor
|
layers := make([]ocispec.Descriptor, len(layerFiles))
|
||||||
for _, f := range layerFiles {
|
descs := map[digest.Digest]*ocispec.Descriptor{}
|
||||||
|
filters := []string{}
|
||||||
|
for i, f := range layerFiles {
|
||||||
desc, ok := blobs[f]
|
desc, ok := blobs[f]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.Errorf("layer %q not found", f)
|
return nil, errors.Errorf("layer %q not found", f)
|
||||||
}
|
}
|
||||||
|
layers[i] = desc
|
||||||
|
descs[desc.Digest] = &layers[i]
|
||||||
|
filters = append(filters, "labels.\"containerd.io/uncompressed\"=="+desc.Digest.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
err := store.Walk(ctx, func(info content.Info) error {
|
||||||
|
dgst, ok := info.Labels["containerd.io/uncompressed"]
|
||||||
|
if ok {
|
||||||
|
desc := descs[digest.Digest(dgst)]
|
||||||
|
if desc != nil {
|
||||||
|
desc.MediaType = images.MediaTypeDockerSchema2LayerGzip
|
||||||
|
desc.Digest = info.Digest
|
||||||
|
desc.Size = info.Size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, filters...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failure checking for compressed blobs")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, desc := range layers {
|
||||||
|
if desc.MediaType != "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
// Open blob, resolve media type
|
// Open blob, resolve media type
|
||||||
ra, err := store.ReaderAt(ctx, desc)
|
ra, err := store.ReaderAt(ctx, desc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to open %q (%s)", f, desc.Digest)
|
return nil, errors.Wrapf(err, "failed to open %q (%s)", layerFiles[i], desc.Digest)
|
||||||
}
|
}
|
||||||
s, err := compression.DecompressStream(content.NewReader(ra))
|
s, err := compression.DecompressStream(content.NewReader(ra))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to detect compression for %q", f)
|
return nil, errors.Wrapf(err, "failed to detect compression for %q", layerFiles[i])
|
||||||
}
|
}
|
||||||
if s.GetCompression() == compression.Uncompressed {
|
if s.GetCompression() == compression.Uncompressed {
|
||||||
// TODO: Support compressing and writing back to content store
|
if compress {
|
||||||
desc.MediaType = ocispec.MediaTypeImageLayer
|
ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded())
|
||||||
|
labels := map[string]string{
|
||||||
|
"containerd.io/uncompressed": desc.Digest.String(),
|
||||||
|
}
|
||||||
|
layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels))
|
||||||
|
if err != nil {
|
||||||
|
s.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
|
||||||
|
} else {
|
||||||
|
layers[i].MediaType = images.MediaTypeDockerSchema2Layer
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
desc.MediaType = ocispec.MediaTypeImageLayerGzip
|
layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
|
||||||
}
|
}
|
||||||
s.Close()
|
s.Close()
|
||||||
|
|
||||||
layers = append(layers, desc)
|
|
||||||
}
|
}
|
||||||
return layers, nil
|
return layers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string, opts ...content.Opt) (desc ocispec.Descriptor, err error) {
|
||||||
|
w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
|
||||||
|
if err != nil {
|
||||||
|
return ocispec.Descriptor{}, errors.Wrap(err, "failed to open writer")
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
w.Close()
|
||||||
|
if err != nil {
|
||||||
|
cs.Abort(ctx, ref)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err := w.Truncate(0); err != nil {
|
||||||
|
return ocispec.Descriptor{}, errors.Wrap(err, "failed to truncate writer")
|
||||||
|
}
|
||||||
|
|
||||||
|
cw, err := compression.CompressStream(w, compression.Gzip)
|
||||||
|
if err != nil {
|
||||||
|
return ocispec.Descriptor{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(cw, r); err != nil {
|
||||||
|
return ocispec.Descriptor{}, err
|
||||||
|
}
|
||||||
|
if err := cw.Close(); err != nil {
|
||||||
|
return ocispec.Descriptor{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cst, err := w.Status()
|
||||||
|
if err != nil {
|
||||||
|
return ocispec.Descriptor{}, errors.Wrap(err, "failed to get writer status")
|
||||||
|
}
|
||||||
|
|
||||||
|
desc.Digest = w.Digest()
|
||||||
|
desc.Size = cst.Offset
|
||||||
|
|
||||||
|
if err := w.Commit(ctx, desc.Size, desc.Digest, opts...); err != nil {
|
||||||
|
if !errdefs.IsAlreadyExists(err) {
|
||||||
|
return ocispec.Descriptor{}, errors.Wrap(err, "failed to commit")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return desc, nil
|
||||||
|
}
|
||||||
|
|
||||||
func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{}, mediaType string) (ocispec.Descriptor, error) {
|
func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{}, mediaType string) (ocispec.Descriptor, error) {
|
||||||
manifestBytes, err := json.Marshal(manifest)
|
manifestBytes, err := json.Marshal(manifest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
2
vendor/github.com/containerd/containerd/images/archive/reference.go
generated
vendored
2
vendor/github.com/containerd/containerd/images/archive/reference.go
generated
vendored
@ -91,7 +91,7 @@ func familiarizeReference(ref string) (string, error) {
|
|||||||
func ociReferenceName(name string) string {
|
func ociReferenceName(name string) string {
|
||||||
// OCI defines the reference name as only a tag excluding the
|
// OCI defines the reference name as only a tag excluding the
|
||||||
// repository. The containerd annotation contains the full image name
|
// repository. The containerd annotation contains the full image name
|
||||||
// since the tag is insufficent for correctly naming and referring to an
|
// since the tag is insufficient for correctly naming and referring to an
|
||||||
// image
|
// image
|
||||||
var ociRef string
|
var ociRef string
|
||||||
if spec, err := reference.Parse(name); err == nil {
|
if spec, err := reference.Parse(name); err == nil {
|
||||||
|
77
vendor/github.com/containerd/containerd/images/image.go
generated
vendored
77
vendor/github.com/containerd/containerd/images/image.go
generated
vendored
@ -142,6 +142,7 @@ type platformManifest struct {
|
|||||||
// this direction because this abstraction is not needed.`
|
// this direction because this abstraction is not needed.`
|
||||||
func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) {
|
func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) {
|
||||||
var (
|
var (
|
||||||
|
limit = 1
|
||||||
m []platformManifest
|
m []platformManifest
|
||||||
wasIndex bool
|
wasIndex bool
|
||||||
)
|
)
|
||||||
@ -210,10 +211,22 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sort.SliceStable(descs, func(i, j int) bool {
|
||||||
|
if descs[i].Platform == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if descs[j].Platform == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return platform.Less(*descs[i].Platform, *descs[j].Platform)
|
||||||
|
})
|
||||||
|
|
||||||
wasIndex = true
|
wasIndex = true
|
||||||
|
|
||||||
|
if len(descs) > limit {
|
||||||
|
return descs[:limit], nil
|
||||||
|
}
|
||||||
return descs, nil
|
return descs, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest)
|
return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest)
|
||||||
}), image); err != nil {
|
}), image); err != nil {
|
||||||
@ -227,17 +240,6 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
|
|||||||
}
|
}
|
||||||
return ocispec.Manifest{}, err
|
return ocispec.Manifest{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.SliceStable(m, func(i, j int) bool {
|
|
||||||
if m[i].p == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if m[j].p == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return platform.Less(*m[i].p, *m[j].p)
|
|
||||||
})
|
|
||||||
|
|
||||||
return *m[0].m, nil
|
return *m[0].m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -357,6 +359,7 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr
|
|||||||
|
|
||||||
descs = append(descs, index.Manifests...)
|
descs = append(descs, index.Manifests...)
|
||||||
case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip,
|
case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip,
|
||||||
|
MediaTypeDockerSchema2LayerEnc, MediaTypeDockerSchema2LayerGzipEnc,
|
||||||
MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip,
|
MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip,
|
||||||
MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
|
MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
|
||||||
ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip,
|
ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip,
|
||||||
@ -406,3 +409,53 @@ func IsCompressedDiff(ctx context.Context, mediaType string) (bool, error) {
|
|||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetImageLayerDescriptors gets the image layer Descriptors of an image; the array contains
|
||||||
|
// a list of Descriptors belonging to one platform followed by lists of other platforms
|
||||||
|
func GetImageLayerDescriptors(ctx context.Context, cs content.Store, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||||
|
var lis []ocispec.Descriptor
|
||||||
|
|
||||||
|
ds := platforms.DefaultSpec()
|
||||||
|
platform := &ds
|
||||||
|
|
||||||
|
switch desc.MediaType {
|
||||||
|
case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex,
|
||||||
|
MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||||
|
children, err := Children(ctx, cs, desc)
|
||||||
|
if err != nil {
|
||||||
|
if errdefs.IsNotFound(err) {
|
||||||
|
return []ocispec.Descriptor{}, nil
|
||||||
|
}
|
||||||
|
return []ocispec.Descriptor{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if desc.Platform != nil {
|
||||||
|
platform = desc.Platform
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, child := range children {
|
||||||
|
var tmp []ocispec.Descriptor
|
||||||
|
|
||||||
|
switch child.MediaType {
|
||||||
|
case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2Layer,
|
||||||
|
ocispec.MediaTypeImageLayerGzip, ocispec.MediaTypeImageLayer,
|
||||||
|
MediaTypeDockerSchema2LayerGzipEnc, MediaTypeDockerSchema2LayerEnc:
|
||||||
|
tdesc := child
|
||||||
|
tdesc.Platform = platform
|
||||||
|
tmp = append(tmp, tdesc)
|
||||||
|
default:
|
||||||
|
tmp, err = GetImageLayerDescriptors(ctx, cs, child)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return []ocispec.Descriptor{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
lis = append(lis, tmp...)
|
||||||
|
}
|
||||||
|
case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
|
||||||
|
default:
|
||||||
|
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "GetImageLayerInfo: unhandled media type %s", desc.MediaType)
|
||||||
|
}
|
||||||
|
return lis, nil
|
||||||
|
}
|
||||||
|
2
vendor/github.com/containerd/containerd/images/mediatypes.go
generated
vendored
2
vendor/github.com/containerd/containerd/images/mediatypes.go
generated
vendored
@ -22,8 +22,10 @@ package images
|
|||||||
// here for clarity.
|
// here for clarity.
|
||||||
const (
|
const (
|
||||||
MediaTypeDockerSchema2Layer = "application/vnd.docker.image.rootfs.diff.tar"
|
MediaTypeDockerSchema2Layer = "application/vnd.docker.image.rootfs.diff.tar"
|
||||||
|
MediaTypeDockerSchema2LayerEnc = "application/vnd.docker.image.rootfs.diff.tar+enc"
|
||||||
MediaTypeDockerSchema2LayerForeign = "application/vnd.docker.image.rootfs.foreign.diff.tar"
|
MediaTypeDockerSchema2LayerForeign = "application/vnd.docker.image.rootfs.foreign.diff.tar"
|
||||||
MediaTypeDockerSchema2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip"
|
MediaTypeDockerSchema2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip"
|
||||||
|
MediaTypeDockerSchema2LayerGzipEnc = "application/vnd.docker.image.rootfs.diff.tar.gzip+enc"
|
||||||
MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
|
MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
|
||||||
MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json"
|
MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json"
|
||||||
MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json"
|
MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json"
|
||||||
|
17
vendor/github.com/containerd/containerd/import.go
generated
vendored
17
vendor/github.com/containerd/containerd/import.go
generated
vendored
@ -35,6 +35,7 @@ type importOpts struct {
|
|||||||
imageRefT func(string) string
|
imageRefT func(string) string
|
||||||
dgstRefT func(digest.Digest) string
|
dgstRefT func(digest.Digest) string
|
||||||
allPlatforms bool
|
allPlatforms bool
|
||||||
|
compress bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImportOpt allows the caller to specify import specific options
|
// ImportOpt allows the caller to specify import specific options
|
||||||
@ -74,6 +75,15 @@ func WithAllPlatforms(allPlatforms bool) ImportOpt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithImportCompression compresses uncompressed layers on import.
|
||||||
|
// This is used for import formats which do not include the manifest.
|
||||||
|
func WithImportCompression() ImportOpt {
|
||||||
|
return func(c *importOpts) error {
|
||||||
|
c.compress = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Import imports an image from a Tar stream using reader.
|
// Import imports an image from a Tar stream using reader.
|
||||||
// Caller needs to specify importer. Future version may use oci.v1 as the default.
|
// Caller needs to specify importer. Future version may use oci.v1 as the default.
|
||||||
// Note that unreferrenced blobs may be imported to the content store as well.
|
// Note that unreferrenced blobs may be imported to the content store as well.
|
||||||
@ -91,7 +101,12 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
|
|||||||
}
|
}
|
||||||
defer done(ctx)
|
defer done(ctx)
|
||||||
|
|
||||||
index, err := archive.ImportIndex(ctx, c.ContentStore(), reader)
|
var aio []archive.ImportOpt
|
||||||
|
if iopts.compress {
|
||||||
|
aio = append(aio, archive.WithImportCompression())
|
||||||
|
}
|
||||||
|
|
||||||
|
index, err := archive.ImportIndex(ctx, c.ContentStore(), reader, aio...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containerd/containerd/metadata/content.go
generated
vendored
2
vendor/github.com/containerd/containerd/metadata/content.go
generated
vendored
@ -637,11 +637,11 @@ func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64,
|
|||||||
return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q failed size validation: %v != %v", nw.ref, status.Offset, size)
|
return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q failed size validation: %v != %v", nw.ref, status.Offset, size)
|
||||||
}
|
}
|
||||||
size = status.Offset
|
size = status.Offset
|
||||||
actual = nw.w.Digest()
|
|
||||||
|
|
||||||
if err := nw.w.Commit(ctx, size, expected); err != nil && !errdefs.IsAlreadyExists(err) {
|
if err := nw.w.Commit(ctx, size, expected); err != nil && !errdefs.IsAlreadyExists(err) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
actual = nw.w.Digest()
|
||||||
}
|
}
|
||||||
|
|
||||||
bkt, err := createBlobBucket(tx, nw.namespace, actual)
|
bkt, err := createBlobBucket(tx, nw.namespace, actual)
|
||||||
|
39
vendor/github.com/containerd/containerd/metadata/gc.go
generated
vendored
39
vendor/github.com/containerd/containerd/metadata/gc.go
generated
vendored
@ -46,11 +46,17 @@ const (
|
|||||||
ResourceIngest
|
ResourceIngest
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
resourceContentFlat = ResourceContent | 0x20
|
||||||
|
resourceSnapshotFlat = ResourceSnapshot | 0x20
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
labelGCRoot = []byte("containerd.io/gc.root")
|
labelGCRoot = []byte("containerd.io/gc.root")
|
||||||
labelGCSnapRef = []byte("containerd.io/gc.ref.snapshot.")
|
labelGCSnapRef = []byte("containerd.io/gc.ref.snapshot.")
|
||||||
labelGCContentRef = []byte("containerd.io/gc.ref.content")
|
labelGCContentRef = []byte("containerd.io/gc.ref.content")
|
||||||
labelGCExpire = []byte("containerd.io/gc.expire")
|
labelGCExpire = []byte("containerd.io/gc.expire")
|
||||||
|
labelGCFlat = []byte("containerd.io/gc.flat")
|
||||||
)
|
)
|
||||||
|
|
||||||
func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error {
|
func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error {
|
||||||
@ -90,6 +96,7 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
libkt := lbkt.Bucket(k)
|
libkt := lbkt.Bucket(k)
|
||||||
|
var flat bool
|
||||||
|
|
||||||
if lblbkt := libkt.Bucket(bucketKeyObjectLabels); lblbkt != nil {
|
if lblbkt := libkt.Bucket(bucketKeyObjectLabels); lblbkt != nil {
|
||||||
if expV := lblbkt.Get(labelGCExpire); expV != nil {
|
if expV := lblbkt.Get(labelGCExpire); expV != nil {
|
||||||
@ -102,6 +109,10 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if flatV := lblbkt.Get(labelGCFlat); flatV != nil {
|
||||||
|
flat = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn(gcnode(ResourceLease, ns, string(k)))
|
fn(gcnode(ResourceLease, ns, string(k)))
|
||||||
@ -111,16 +122,26 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error {
|
|||||||
// no need to allow the lookup to be recursive, handling here
|
// no need to allow the lookup to be recursive, handling here
|
||||||
// therefore reduces the number of database seeks.
|
// therefore reduces the number of database seeks.
|
||||||
|
|
||||||
|
ctype := ResourceContent
|
||||||
|
if flat {
|
||||||
|
ctype = resourceContentFlat
|
||||||
|
}
|
||||||
|
|
||||||
cbkt := libkt.Bucket(bucketKeyObjectContent)
|
cbkt := libkt.Bucket(bucketKeyObjectContent)
|
||||||
if cbkt != nil {
|
if cbkt != nil {
|
||||||
if err := cbkt.ForEach(func(k, v []byte) error {
|
if err := cbkt.ForEach(func(k, v []byte) error {
|
||||||
fn(gcnode(ResourceContent, ns, string(k)))
|
fn(gcnode(ctype, ns, string(k)))
|
||||||
return nil
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
stype := ResourceSnapshot
|
||||||
|
if flat {
|
||||||
|
stype = resourceSnapshotFlat
|
||||||
|
}
|
||||||
|
|
||||||
sbkt := libkt.Bucket(bucketKeyObjectSnapshots)
|
sbkt := libkt.Bucket(bucketKeyObjectSnapshots)
|
||||||
if sbkt != nil {
|
if sbkt != nil {
|
||||||
if err := sbkt.ForEach(func(sk, sv []byte) error {
|
if err := sbkt.ForEach(func(sk, sv []byte) error {
|
||||||
@ -130,7 +151,7 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error {
|
|||||||
snbkt := sbkt.Bucket(sk)
|
snbkt := sbkt.Bucket(sk)
|
||||||
|
|
||||||
return snbkt.ForEach(func(k, v []byte) error {
|
return snbkt.ForEach(func(k, v []byte) error {
|
||||||
fn(gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", sk, k)))
|
fn(gcnode(stype, ns, fmt.Sprintf("%s/%s", sk, k)))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
@ -257,7 +278,8 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)) error {
|
func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)) error {
|
||||||
if node.Type == ResourceContent {
|
switch node.Type {
|
||||||
|
case ResourceContent:
|
||||||
bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(node.Key))
|
bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(node.Key))
|
||||||
if bkt == nil {
|
if bkt == nil {
|
||||||
// Node may be created from dead edge
|
// Node may be created from dead edge
|
||||||
@ -265,7 +287,7 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)
|
|||||||
}
|
}
|
||||||
|
|
||||||
return sendLabelRefs(node.Namespace, bkt, fn)
|
return sendLabelRefs(node.Namespace, bkt, fn)
|
||||||
} else if node.Type == ResourceSnapshot {
|
case ResourceSnapshot, resourceSnapshotFlat:
|
||||||
parts := strings.SplitN(node.Key, "/", 2)
|
parts := strings.SplitN(node.Key, "/", 2)
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
return errors.Errorf("invalid snapshot gc key %s", node.Key)
|
return errors.Errorf("invalid snapshot gc key %s", node.Key)
|
||||||
@ -280,11 +302,16 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if pv := bkt.Get(bucketKeyParent); len(pv) > 0 {
|
if pv := bkt.Get(bucketKeyParent); len(pv) > 0 {
|
||||||
fn(gcnode(ResourceSnapshot, node.Namespace, fmt.Sprintf("%s/%s", ss, pv)))
|
fn(gcnode(node.Type, node.Namespace, fmt.Sprintf("%s/%s", ss, pv)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do not send labeled references for flat snapshot refs
|
||||||
|
if node.Type == resourceSnapshotFlat {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return sendLabelRefs(node.Namespace, bkt, fn)
|
return sendLabelRefs(node.Namespace, bkt, fn)
|
||||||
} else if node.Type == ResourceIngest {
|
case ResourceIngest:
|
||||||
// Send expected value
|
// Send expected value
|
||||||
bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectIngests, []byte(node.Key))
|
bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectIngests, []byte(node.Key))
|
||||||
if bkt == nil {
|
if bkt == nil {
|
||||||
|
10
vendor/github.com/containerd/containerd/metadata/namespaces.go
generated
vendored
10
vendor/github.com/containerd/containerd/metadata/namespaces.go
generated
vendored
@ -129,7 +129,15 @@ func (s *namespaceStore) List(ctx context.Context) ([]string, error) {
|
|||||||
return namespaces, nil
|
return namespaces, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *namespaceStore) Delete(ctx context.Context, namespace string) error {
|
func (s *namespaceStore) Delete(ctx context.Context, namespace string, opts ...namespaces.DeleteOpts) error {
|
||||||
|
i := &namespaces.DeleteInfo{
|
||||||
|
Name: namespace,
|
||||||
|
}
|
||||||
|
for _, o := range opts {
|
||||||
|
if err := o(ctx, i); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
bkt := getBucket(s.tx, bucketKeyVersion)
|
bkt := getBucket(s.tx, bucketKeyVersion)
|
||||||
if empty, err := s.namespaceEmpty(ctx, namespace); err != nil {
|
if empty, err := s.namespaceEmpty(ctx, namespace); err != nil {
|
||||||
return err
|
return err
|
||||||
|
39
vendor/github.com/containerd/containerd/metadata/snapshot.go
generated
vendored
39
vendor/github.com/containerd/containerd/metadata/snapshot.go
generated
vendored
@ -34,6 +34,10 @@ import (
|
|||||||
bolt "go.etcd.io/bbolt"
|
bolt "go.etcd.io/bbolt"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
inheritedLabelsPrefix = "containerd.io/snapshot/"
|
||||||
|
)
|
||||||
|
|
||||||
type snapshotter struct {
|
type snapshotter struct {
|
||||||
snapshots.Snapshotter
|
snapshots.Snapshotter
|
||||||
name string
|
name string
|
||||||
@ -209,6 +213,15 @@ func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpath
|
|||||||
bkey = string(sbkt.Get(bucketKeyName))
|
bkey = string(sbkt.Get(bucketKeyName))
|
||||||
local.Parent = string(sbkt.Get(bucketKeyParent))
|
local.Parent = string(sbkt.Get(bucketKeyParent))
|
||||||
|
|
||||||
|
inner := snapshots.Info{
|
||||||
|
Name: bkey,
|
||||||
|
Labels: filterInheritedLabels(local.Labels),
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := s.Snapshotter.Update(ctx, inner, fieldpaths...); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return snapshots.Info{}, err
|
return snapshots.Info{}, err
|
||||||
@ -338,12 +351,14 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inheritedOpt := snapshots.WithLabels(filterInheritedLabels(base.Labels))
|
||||||
|
|
||||||
// TODO: Consider doing this outside of transaction to lessen
|
// TODO: Consider doing this outside of transaction to lessen
|
||||||
// metadata lock time
|
// metadata lock time
|
||||||
if readonly {
|
if readonly {
|
||||||
m, err = s.Snapshotter.View(ctx, bkey, bparent)
|
m, err = s.Snapshotter.View(ctx, bkey, bparent, inheritedOpt)
|
||||||
} else {
|
} else {
|
||||||
m, err = s.Snapshotter.Prepare(ctx, bkey, bparent)
|
m, err = s.Snapshotter.Prepare(ctx, bkey, bparent, inheritedOpt)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
@ -445,9 +460,11 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inheritedOpt := snapshots.WithLabels(filterInheritedLabels(base.Labels))
|
||||||
|
|
||||||
// TODO: Consider doing this outside of transaction to lessen
|
// TODO: Consider doing this outside of transaction to lessen
|
||||||
// metadata lock time
|
// metadata lock time
|
||||||
return s.Snapshotter.Commit(ctx, nameKey, bkey)
|
return s.Snapshotter.Commit(ctx, nameKey, bkey, inheritedOpt)
|
||||||
})
|
})
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -761,3 +778,19 @@ func (s *snapshotter) pruneBranch(ctx context.Context, node *treeNode) error {
|
|||||||
func (s *snapshotter) Close() error {
|
func (s *snapshotter) Close() error {
|
||||||
return s.Snapshotter.Close()
|
return s.Snapshotter.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// filterInheritedLabels filters the provided labels by removing any key which doesn't have
|
||||||
|
// a prefix of "containerd.io/snapshot/".
|
||||||
|
func filterInheritedLabels(labels map[string]string) map[string]string {
|
||||||
|
if labels == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
filtered := make(map[string]string)
|
||||||
|
for k, v := range labels {
|
||||||
|
if strings.HasPrefix(k, inheritedLabelsPrefix) {
|
||||||
|
filtered[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return filtered
|
||||||
|
}
|
||||||
|
16
vendor/github.com/containerd/containerd/namespaces.go
generated
vendored
16
vendor/github.com/containerd/containerd/namespaces.go
generated
vendored
@ -100,10 +100,18 @@ func (r *remoteNamespaces) List(ctx context.Context) ([]string, error) {
|
|||||||
return namespaces, nil
|
return namespaces, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *remoteNamespaces) Delete(ctx context.Context, namespace string) error {
|
func (r *remoteNamespaces) Delete(ctx context.Context, namespace string, opts ...namespaces.DeleteOpts) error {
|
||||||
var req api.DeleteNamespaceRequest
|
i := namespaces.DeleteInfo{
|
||||||
|
Name: namespace,
|
||||||
req.Name = namespace
|
}
|
||||||
|
for _, o := range opts {
|
||||||
|
if err := o(ctx, &i); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
req := api.DeleteNamespaceRequest{
|
||||||
|
Name: namespace,
|
||||||
|
}
|
||||||
_, err := r.client.Delete(ctx, &req)
|
_, err := r.client.Delete(ctx, &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errdefs.FromGRPC(err)
|
return errdefs.FromGRPC(err)
|
||||||
|
11
vendor/github.com/containerd/containerd/namespaces/store.go
generated
vendored
11
vendor/github.com/containerd/containerd/namespaces/store.go
generated
vendored
@ -33,5 +33,14 @@ type Store interface {
|
|||||||
List(ctx context.Context) ([]string, error)
|
List(ctx context.Context) ([]string, error)
|
||||||
|
|
||||||
// Delete removes the namespace. The namespace must be empty to be deleted.
|
// Delete removes the namespace. The namespace must be empty to be deleted.
|
||||||
Delete(ctx context.Context, namespace string) error
|
Delete(ctx context.Context, namespace string, opts ...DeleteOpts) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteInfo specifies information for the deletion of a namespace
|
||||||
|
type DeleteInfo struct {
|
||||||
|
// Name of the namespace
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteOpts allows the caller to set options for namespace deletion
|
||||||
|
type DeleteOpts func(context.Context, *DeleteInfo) error
|
||||||
|
2
vendor/github.com/containerd/containerd/namespaces/ttrpc.go
generated
vendored
2
vendor/github.com/containerd/containerd/namespaces/ttrpc.go
generated
vendored
@ -30,7 +30,7 @@ const (
|
|||||||
func withTTRPCNamespaceHeader(ctx context.Context, namespace string) context.Context {
|
func withTTRPCNamespaceHeader(ctx context.Context, namespace string) context.Context {
|
||||||
md, ok := ttrpc.GetMetadata(ctx)
|
md, ok := ttrpc.GetMetadata(ctx)
|
||||||
if !ok {
|
if !ok {
|
||||||
md = ttrpc.Metadata{}
|
md = ttrpc.MD{}
|
||||||
}
|
}
|
||||||
md.Set(TTRPCHeader, namespace)
|
md.Set(TTRPCHeader, namespace)
|
||||||
return ttrpc.WithMetadata(ctx, md)
|
return ttrpc.WithMetadata(ctx, md)
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright The containerd Authors.
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
@ -16,27 +14,23 @@
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package proc
|
package containerd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/pkg/errors"
|
"context"
|
||||||
|
|
||||||
|
"github.com/containerd/cgroups"
|
||||||
|
"github.com/containerd/containerd/namespaces"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RuncRoot is the path to the root runc state directory
|
// WithNamespaceCgroupDeletion removes the cgroup directory that was created for the namespace
|
||||||
const RuncRoot = "/run/containerd/runc"
|
func WithNamespaceCgroupDeletion(ctx context.Context, i *namespaces.DeleteInfo) error {
|
||||||
|
cg, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(i.Name))
|
||||||
func stateName(v interface{}) string {
|
if err != nil {
|
||||||
switch v.(type) {
|
if err == cgroups.ErrCgroupDeleted {
|
||||||
case *runningState, *execRunningState:
|
return nil
|
||||||
return "running"
|
}
|
||||||
case *createdState, *execCreatedState, *createdCheckpointState:
|
return err
|
||||||
return "created"
|
|
||||||
case *pausedState:
|
|
||||||
return "paused"
|
|
||||||
case *deletedState:
|
|
||||||
return "deleted"
|
|
||||||
case *stoppedState:
|
|
||||||
return "stopped"
|
|
||||||
}
|
}
|
||||||
panic(errors.Errorf("invalid state %v", v))
|
return cg.Delete()
|
||||||
}
|
}
|
50
vendor/github.com/containerd/containerd/oci/spec_opts.go
generated
vendored
50
vendor/github.com/containerd/containerd/oci/spec_opts.go
generated
vendored
@ -76,6 +76,20 @@ func setLinux(s *Spec) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nolint
|
||||||
|
func setResources(s *Spec) {
|
||||||
|
if s.Linux != nil {
|
||||||
|
if s.Linux.Resources == nil {
|
||||||
|
s.Linux.Resources = &specs.LinuxResources{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if s.Windows != nil {
|
||||||
|
if s.Windows.Resources == nil {
|
||||||
|
s.Windows.Resources = &specs.WindowsResources{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// setCapabilities sets Linux Capabilities to empty if unset
|
// setCapabilities sets Linux Capabilities to empty if unset
|
||||||
func setCapabilities(s *Spec) {
|
func setCapabilities(s *Spec) {
|
||||||
setProcess(s)
|
setProcess(s)
|
||||||
@ -1139,3 +1153,39 @@ func WithAnnotations(annotations map[string]string) SpecOpts {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithLinuxDevices adds the provided linux devices to the spec
|
||||||
|
func WithLinuxDevices(devices []specs.LinuxDevice) SpecOpts {
|
||||||
|
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
|
||||||
|
setLinux(s)
|
||||||
|
s.Linux.Devices = append(s.Linux.Devices, devices...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrNotADevice = errors.New("not a device node")
|
||||||
|
|
||||||
|
// WithLinuxDevice adds the device specified by path to the spec
|
||||||
|
func WithLinuxDevice(path, permissions string) SpecOpts {
|
||||||
|
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
|
||||||
|
setLinux(s)
|
||||||
|
setResources(s)
|
||||||
|
|
||||||
|
dev, err := deviceFromPath(path, permissions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Linux.Devices = append(s.Linux.Devices, *dev)
|
||||||
|
|
||||||
|
s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, specs.LinuxDeviceCgroup{
|
||||||
|
Type: dev.Type,
|
||||||
|
Allow: true,
|
||||||
|
Major: &dev.Major,
|
||||||
|
Minor: &dev.Minor,
|
||||||
|
Access: permissions,
|
||||||
|
})
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
63
vendor/github.com/containerd/containerd/oci/spec_opts_linux.go
generated
vendored
Normal file
63
vendor/github.com/containerd/containerd/oci/spec_opts_linux.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package oci
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) {
|
||||||
|
var stat unix.Stat_t
|
||||||
|
if err := unix.Lstat(path, &stat); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
devNumber = stat.Rdev
|
||||||
|
major = unix.Major(devNumber)
|
||||||
|
minor = unix.Minor(devNumber)
|
||||||
|
)
|
||||||
|
if major == 0 {
|
||||||
|
return nil, ErrNotADevice
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
devType string
|
||||||
|
mode = stat.Mode
|
||||||
|
)
|
||||||
|
switch {
|
||||||
|
case mode&unix.S_IFBLK == unix.S_IFBLK:
|
||||||
|
devType = "b"
|
||||||
|
case mode&unix.S_IFCHR == unix.S_IFCHR:
|
||||||
|
devType = "c"
|
||||||
|
}
|
||||||
|
fm := os.FileMode(mode)
|
||||||
|
return &specs.LinuxDevice{
|
||||||
|
Type: devType,
|
||||||
|
Path: path,
|
||||||
|
Major: int64(major),
|
||||||
|
Minor: int64(minor),
|
||||||
|
FileMode: &fm,
|
||||||
|
UID: &stat.Uid,
|
||||||
|
GID: &stat.Gid,
|
||||||
|
}, nil
|
||||||
|
}
|
63
vendor/github.com/containerd/containerd/oci/spec_opts_unix.go
generated
vendored
Normal file
63
vendor/github.com/containerd/containerd/oci/spec_opts_unix.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
// +build !linux,!windows
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package oci
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) {
|
||||||
|
var stat unix.Stat_t
|
||||||
|
if err := unix.Lstat(path, &stat); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
devNumber = uint64(stat.Rdev)
|
||||||
|
major = unix.Major(devNumber)
|
||||||
|
minor = unix.Minor(devNumber)
|
||||||
|
)
|
||||||
|
if major == 0 {
|
||||||
|
return nil, ErrNotADevice
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
devType string
|
||||||
|
mode = stat.Mode
|
||||||
|
)
|
||||||
|
switch {
|
||||||
|
case mode&unix.S_IFBLK == unix.S_IFBLK:
|
||||||
|
devType = "b"
|
||||||
|
case mode&unix.S_IFCHR == unix.S_IFCHR:
|
||||||
|
devType = "c"
|
||||||
|
}
|
||||||
|
fm := os.FileMode(mode)
|
||||||
|
return &specs.LinuxDevice{
|
||||||
|
Type: devType,
|
||||||
|
Path: path,
|
||||||
|
Major: int64(major),
|
||||||
|
Minor: int64(minor),
|
||||||
|
FileMode: &fm,
|
||||||
|
UID: &stat.Uid,
|
||||||
|
GID: &stat.Gid,
|
||||||
|
}, nil
|
||||||
|
}
|
5
vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
generated
vendored
5
vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
|||||||
|
|
||||||
"github.com/containerd/containerd/containers"
|
"github.com/containerd/containerd/containers"
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the
|
// WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the
|
||||||
@ -65,3 +66,7 @@ func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) {
|
||||||
|
return nil, errors.New("device from path not supported on Windows")
|
||||||
|
}
|
||||||
|
@ -16,14 +16,13 @@
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package proc
|
package process
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/containerd/console"
|
"github.com/containerd/console"
|
||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/runtime/proc"
|
|
||||||
google_protobuf "github.com/gogo/protobuf/types"
|
google_protobuf "github.com/gogo/protobuf/types"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
@ -67,6 +66,6 @@ func (s *deletedState) SetExited(status int) {
|
|||||||
// no op
|
// no op
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *deletedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
|
func (s *deletedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) {
|
||||||
return nil, errors.Errorf("cannot exec in a deleted state")
|
return nil, errors.Errorf("cannot exec in a deleted state")
|
||||||
}
|
}
|
@ -16,7 +16,7 @@
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package proc
|
package process
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -31,7 +31,8 @@ import (
|
|||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
|
|
||||||
"github.com/containerd/console"
|
"github.com/containerd/console"
|
||||||
"github.com/containerd/containerd/runtime/proc"
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
"github.com/containerd/containerd/pkg/stdio"
|
||||||
"github.com/containerd/fifo"
|
"github.com/containerd/fifo"
|
||||||
runc "github.com/containerd/go-runc"
|
runc "github.com/containerd/go-runc"
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
@ -49,10 +50,10 @@ type execProcess struct {
|
|||||||
io *processIO
|
io *processIO
|
||||||
status int
|
status int
|
||||||
exited time.Time
|
exited time.Time
|
||||||
pid *safePid
|
pid safePid
|
||||||
closers []io.Closer
|
closers []io.Closer
|
||||||
stdin io.Closer
|
stdin io.Closer
|
||||||
stdio proc.Stdio
|
stdio stdio.Stdio
|
||||||
path string
|
path string
|
||||||
spec specs.Process
|
spec specs.Process
|
||||||
|
|
||||||
@ -95,6 +96,7 @@ func (e *execProcess) setExited(status int) {
|
|||||||
e.status = status
|
e.status = status
|
||||||
e.exited = time.Now()
|
e.exited = time.Now()
|
||||||
e.parent.Platform.ShutdownConsole(context.Background(), e.console)
|
e.parent.Platform.ShutdownConsole(context.Background(), e.console)
|
||||||
|
e.pid.set(StoppedPID)
|
||||||
close(e.waitBlock)
|
close(e.waitBlock)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,7 +108,7 @@ func (e *execProcess) Delete(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *execProcess) delete(ctx context.Context) error {
|
func (e *execProcess) delete(ctx context.Context) error {
|
||||||
e.wg.Wait()
|
waitTimeout(ctx, &e.wg, 2*time.Second)
|
||||||
if e.io != nil {
|
if e.io != nil {
|
||||||
for _, c := range e.closers {
|
for _, c := range e.closers {
|
||||||
c.Close()
|
c.Close()
|
||||||
@ -142,7 +144,12 @@ func (e *execProcess) Kill(ctx context.Context, sig uint32, _ bool) error {
|
|||||||
|
|
||||||
func (e *execProcess) kill(ctx context.Context, sig uint32, _ bool) error {
|
func (e *execProcess) kill(ctx context.Context, sig uint32, _ bool) error {
|
||||||
pid := e.pid.get()
|
pid := e.pid.get()
|
||||||
if pid != 0 {
|
switch {
|
||||||
|
case pid == 0:
|
||||||
|
return errors.Wrap(errdefs.ErrFailedPrecondition, "process not created")
|
||||||
|
case pid < 0:
|
||||||
|
return errors.Wrapf(errdefs.ErrNotFound, "process already finished")
|
||||||
|
default:
|
||||||
if err := unix.Kill(pid, syscall.Signal(sig)); err != nil {
|
if err := unix.Kill(pid, syscall.Signal(sig)); err != nil {
|
||||||
return errors.Wrapf(checkKillError(err), "exec kill error")
|
return errors.Wrapf(checkKillError(err), "exec kill error")
|
||||||
}
|
}
|
||||||
@ -154,7 +161,7 @@ func (e *execProcess) Stdin() io.Closer {
|
|||||||
return e.stdin
|
return e.stdin
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *execProcess) Stdio() proc.Stdio {
|
func (e *execProcess) Stdio() stdio.Stdio {
|
||||||
return e.stdio
|
return e.stdio
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -254,10 +261,13 @@ func (e *execProcess) Status(ctx context.Context) (string, error) {
|
|||||||
}
|
}
|
||||||
e.mu.Lock()
|
e.mu.Lock()
|
||||||
defer e.mu.Unlock()
|
defer e.mu.Unlock()
|
||||||
// if we don't have a pid then the exec process has just been created
|
// if we don't have a pid(pid=0) then the exec process has just been created
|
||||||
if e.pid.get() == 0 {
|
if e.pid.get() == 0 {
|
||||||
return "created", nil
|
return "created", nil
|
||||||
}
|
}
|
||||||
|
if e.pid.get() == StoppedPID {
|
||||||
|
return "stopped", nil
|
||||||
|
}
|
||||||
// if we have a pid and it can be signaled, the process is running
|
// if we have a pid and it can be signaled, the process is running
|
||||||
if err := unix.Kill(e.pid.get(), 0); err == nil {
|
if err := unix.Kill(e.pid.get(), 0); err == nil {
|
||||||
return "running", nil
|
return "running", nil
|
@ -16,7 +16,7 @@
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package proc
|
package process
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
@ -16,7 +16,7 @@
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package proc
|
package process
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -33,7 +33,7 @@ import (
|
|||||||
"github.com/containerd/console"
|
"github.com/containerd/console"
|
||||||
"github.com/containerd/containerd/log"
|
"github.com/containerd/containerd/log"
|
||||||
"github.com/containerd/containerd/mount"
|
"github.com/containerd/containerd/mount"
|
||||||
"github.com/containerd/containerd/runtime/proc"
|
"github.com/containerd/containerd/pkg/stdio"
|
||||||
"github.com/containerd/fifo"
|
"github.com/containerd/fifo"
|
||||||
runc "github.com/containerd/go-runc"
|
runc "github.com/containerd/go-runc"
|
||||||
google_protobuf "github.com/gogo/protobuf/types"
|
google_protobuf "github.com/gogo/protobuf/types"
|
||||||
@ -59,15 +59,15 @@ type Init struct {
|
|||||||
id string
|
id string
|
||||||
Bundle string
|
Bundle string
|
||||||
console console.Console
|
console console.Console
|
||||||
Platform proc.Platform
|
Platform stdio.Platform
|
||||||
io *processIO
|
io *processIO
|
||||||
runtime *runc.Runc
|
runtime *runc.Runc
|
||||||
status int
|
status int
|
||||||
exited time.Time
|
exited time.Time
|
||||||
pid int
|
pid safePid
|
||||||
closers []io.Closer
|
closers []io.Closer
|
||||||
stdin io.Closer
|
stdin io.Closer
|
||||||
stdio proc.Stdio
|
stdio stdio.Stdio
|
||||||
Rootfs string
|
Rootfs string
|
||||||
IoUID int
|
IoUID int
|
||||||
IoGID int
|
IoGID int
|
||||||
@ -93,7 +93,7 @@ func NewRunc(root, path, namespace, runtime, criu string, systemd bool) *runc.Ru
|
|||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new process
|
// New returns a new process
|
||||||
func New(id string, runtime *runc.Runc, stdio proc.Stdio) *Init {
|
func New(id string, runtime *runc.Runc, stdio stdio.Stdio) *Init {
|
||||||
p := &Init{
|
p := &Init{
|
||||||
id: id,
|
id: id,
|
||||||
runtime: runtime,
|
runtime: runtime,
|
||||||
@ -113,6 +113,9 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error {
|
|||||||
pio *processIO
|
pio *processIO
|
||||||
pidFile = newPidFile(p.Bundle)
|
pidFile = newPidFile(p.Bundle)
|
||||||
)
|
)
|
||||||
|
p.pid.Lock()
|
||||||
|
defer p.pid.Unlock()
|
||||||
|
|
||||||
if r.Terminal {
|
if r.Terminal {
|
||||||
if socket, err = runc.NewTempConsoleSocket(); err != nil {
|
if socket, err = runc.NewTempConsoleSocket(); err != nil {
|
||||||
return errors.Wrap(err, "failed to create OCI runtime console socket")
|
return errors.Wrap(err, "failed to create OCI runtime console socket")
|
||||||
@ -167,7 +170,7 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to retrieve OCI runtime container pid")
|
return errors.Wrap(err, "failed to retrieve OCI runtime container pid")
|
||||||
}
|
}
|
||||||
p.pid = pid
|
p.pid.pid = pid
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -213,7 +216,7 @@ func (p *Init) ID() string {
|
|||||||
|
|
||||||
// Pid of the process
|
// Pid of the process
|
||||||
func (p *Init) Pid() int {
|
func (p *Init) Pid() int {
|
||||||
return p.pid
|
return p.pid.get()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExitStatus of the process
|
// ExitStatus of the process
|
||||||
@ -272,6 +275,7 @@ func (p *Init) setExited(status int) {
|
|||||||
p.exited = time.Now()
|
p.exited = time.Now()
|
||||||
p.status = status
|
p.status = status
|
||||||
p.Platform.ShutdownConsole(context.Background(), p.console)
|
p.Platform.ShutdownConsole(context.Background(), p.console)
|
||||||
|
p.pid.set(StoppedPID)
|
||||||
close(p.waitBlock)
|
close(p.waitBlock)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -284,7 +288,7 @@ func (p *Init) Delete(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Init) delete(ctx context.Context) error {
|
func (p *Init) delete(ctx context.Context) error {
|
||||||
p.wg.Wait()
|
waitTimeout(ctx, &p.wg, 2*time.Second)
|
||||||
err := p.runtime.Delete(ctx, p.id, nil)
|
err := p.runtime.Delete(ctx, p.id, nil)
|
||||||
// ignore errors if a runtime has already deleted the process
|
// ignore errors if a runtime has already deleted the process
|
||||||
// but we still hold metadata and pipes
|
// but we still hold metadata and pipes
|
||||||
@ -377,7 +381,7 @@ func (p *Init) Runtime() *runc.Runc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Exec returns a new child process
|
// Exec returns a new child process
|
||||||
func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
|
func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) {
|
||||||
p.mu.Lock()
|
p.mu.Lock()
|
||||||
defer p.mu.Unlock()
|
defer p.mu.Unlock()
|
||||||
|
|
||||||
@ -385,7 +389,7 @@ func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Proce
|
|||||||
}
|
}
|
||||||
|
|
||||||
// exec returns a new exec'd process
|
// exec returns a new exec'd process
|
||||||
func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
|
func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (Process, error) {
|
||||||
// process exec request
|
// process exec request
|
||||||
var spec specs.Process
|
var spec specs.Process
|
||||||
if err := json.Unmarshal(r.Spec.Value, &spec); err != nil {
|
if err := json.Unmarshal(r.Spec.Value, &spec); err != nil {
|
||||||
@ -398,14 +402,13 @@ func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Proce
|
|||||||
path: path,
|
path: path,
|
||||||
parent: p,
|
parent: p,
|
||||||
spec: spec,
|
spec: spec,
|
||||||
stdio: proc.Stdio{
|
stdio: stdio.Stdio{
|
||||||
Stdin: r.Stdin,
|
Stdin: r.Stdin,
|
||||||
Stdout: r.Stdout,
|
Stdout: r.Stdout,
|
||||||
Stderr: r.Stderr,
|
Stderr: r.Stderr,
|
||||||
Terminal: r.Terminal,
|
Terminal: r.Terminal,
|
||||||
},
|
},
|
||||||
waitBlock: make(chan struct{}),
|
waitBlock: make(chan struct{}),
|
||||||
pid: &safePid{},
|
|
||||||
}
|
}
|
||||||
e.execState = &execCreatedState{p: e}
|
e.execState = &execCreatedState{p: e}
|
||||||
return e, nil
|
return e, nil
|
||||||
@ -465,7 +468,7 @@ func (p *Init) update(ctx context.Context, r *google_protobuf.Any) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stdio of the process
|
// Stdio of the process
|
||||||
func (p *Init) Stdio() proc.Stdio {
|
func (p *Init) Stdio() stdio.Stdio {
|
||||||
return p.stdio
|
return p.stdio
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -485,7 +488,7 @@ func (p *Init) runtimeError(rErr error, msg string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func withConditionalIO(c proc.Stdio) runc.IOOpt {
|
func withConditionalIO(c stdio.Stdio) runc.IOOpt {
|
||||||
return func(o *runc.IOOption) {
|
return func(o *runc.IOOption) {
|
||||||
o.OpenStdin = c.Stdin != ""
|
o.OpenStdin = c.Stdin != ""
|
||||||
o.OpenStdout = c.Stdout != ""
|
o.OpenStdout = c.Stdout != ""
|
@ -16,12 +16,11 @@
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package proc
|
package process
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/containerd/containerd/runtime/proc"
|
|
||||||
runc "github.com/containerd/go-runc"
|
runc "github.com/containerd/go-runc"
|
||||||
google_protobuf "github.com/gogo/protobuf/types"
|
google_protobuf "github.com/gogo/protobuf/types"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -35,7 +34,7 @@ type initState interface {
|
|||||||
Resume(context.Context) error
|
Resume(context.Context) error
|
||||||
Update(context.Context, *google_protobuf.Any) error
|
Update(context.Context, *google_protobuf.Any) error
|
||||||
Checkpoint(context.Context, *CheckpointConfig) error
|
Checkpoint(context.Context, *CheckpointConfig) error
|
||||||
Exec(context.Context, string, *ExecConfig) (proc.Process, error)
|
Exec(context.Context, string, *ExecConfig) (Process, error)
|
||||||
Kill(context.Context, uint32, bool) error
|
Kill(context.Context, uint32, bool) error
|
||||||
SetExited(int)
|
SetExited(int)
|
||||||
}
|
}
|
||||||
@ -100,7 +99,7 @@ func (s *createdState) SetExited(status int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *createdState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
|
func (s *createdState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) {
|
||||||
return s.p.exec(ctx, path, r)
|
return s.p.exec(ctx, path, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -143,6 +142,9 @@ func (s *createdCheckpointState) Start(ctx context.Context) error {
|
|||||||
p := s.p
|
p := s.p
|
||||||
sio := p.stdio
|
sio := p.stdio
|
||||||
|
|
||||||
|
p.pid.Lock()
|
||||||
|
defer p.pid.Unlock()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
socket *runc.Socket
|
socket *runc.Socket
|
||||||
@ -182,7 +184,7 @@ func (s *createdCheckpointState) Start(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to retrieve OCI runtime container pid")
|
return errors.Wrap(err, "failed to retrieve OCI runtime container pid")
|
||||||
}
|
}
|
||||||
p.pid = pid
|
p.pid.pid = pid
|
||||||
return s.transition("running")
|
return s.transition("running")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,7 +207,7 @@ func (s *createdCheckpointState) SetExited(status int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *createdCheckpointState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
|
func (s *createdCheckpointState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) {
|
||||||
return nil, errors.Errorf("cannot exec in a created state")
|
return nil, errors.Errorf("cannot exec in a created state")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -265,7 +267,7 @@ func (s *runningState) SetExited(status int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *runningState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
|
func (s *runningState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) {
|
||||||
return s.p.exec(ctx, path, r)
|
return s.p.exec(ctx, path, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -329,7 +331,7 @@ func (s *pausedState) SetExited(status int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *pausedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
|
func (s *pausedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) {
|
||||||
return nil, errors.Errorf("cannot exec in a paused state")
|
return nil, errors.Errorf("cannot exec in a paused state")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -382,6 +384,6 @@ func (s *stoppedState) SetExited(status int) {
|
|||||||
// no op
|
// no op
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
|
func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) {
|
||||||
return nil, errors.Errorf("cannot exec in a stopped state")
|
return nil, errors.Errorf("cannot exec in a stopped state")
|
||||||
}
|
}
|
@ -16,7 +16,7 @@
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package proc
|
package process
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -32,7 +32,7 @@ import (
|
|||||||
|
|
||||||
"github.com/containerd/containerd/log"
|
"github.com/containerd/containerd/log"
|
||||||
"github.com/containerd/containerd/namespaces"
|
"github.com/containerd/containerd/namespaces"
|
||||||
"github.com/containerd/containerd/runtime/proc"
|
"github.com/containerd/containerd/pkg/stdio"
|
||||||
"github.com/containerd/fifo"
|
"github.com/containerd/fifo"
|
||||||
runc "github.com/containerd/go-runc"
|
runc "github.com/containerd/go-runc"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -50,7 +50,7 @@ type processIO struct {
|
|||||||
|
|
||||||
uri *url.URL
|
uri *url.URL
|
||||||
copy bool
|
copy bool
|
||||||
stdio proc.Stdio
|
stdio stdio.Stdio
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *processIO) Close() error {
|
func (p *processIO) Close() error {
|
||||||
@ -76,7 +76,7 @@ func (p *processIO) Copy(ctx context.Context, wg *sync.WaitGroup) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio proc.Stdio) (*processIO, error) {
|
func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio stdio.Stdio) (*processIO, error) {
|
||||||
pio := &processIO{
|
pio := &processIO{
|
||||||
stdio: stdio,
|
stdio: stdio,
|
||||||
}
|
}
|
||||||
@ -101,17 +101,20 @@ func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio proc.Stdio
|
|||||||
pio.copy = true
|
pio.copy = true
|
||||||
pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio))
|
pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio))
|
||||||
case "binary":
|
case "binary":
|
||||||
pio.io, err = newBinaryIO(ctx, id, u)
|
pio.io, err = NewBinaryIO(ctx, id, u)
|
||||||
case "file":
|
case "file":
|
||||||
if err := os.MkdirAll(filepath.Dir(u.Host), 0755); err != nil {
|
filePath := u.Path
|
||||||
|
if err := os.MkdirAll(filepath.Dir(filePath), 0755); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var f *os.File
|
var f *os.File
|
||||||
f, err = os.OpenFile(u.Host, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
f, err = os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f.Close()
|
f.Close()
|
||||||
|
pio.stdio.Stdout = filePath
|
||||||
|
pio.stdio.Stderr = filePath
|
||||||
pio.copy = true
|
pio.copy = true
|
||||||
pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio))
|
pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio))
|
||||||
default:
|
default:
|
||||||
@ -179,10 +182,10 @@ func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, w
|
|||||||
)
|
)
|
||||||
if ok {
|
if ok {
|
||||||
if fw, err = fifo.OpenFifo(ctx, i.name, syscall.O_WRONLY, 0); err != nil {
|
if fw, err = fifo.OpenFifo(ctx, i.name, syscall.O_WRONLY, 0); err != nil {
|
||||||
return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err)
|
return errors.Wrapf(err, "containerd-shim: opening w/o fifo %q failed", i.name)
|
||||||
}
|
}
|
||||||
if fr, err = fifo.OpenFifo(ctx, i.name, syscall.O_RDONLY, 0); err != nil {
|
if fr, err = fifo.OpenFifo(ctx, i.name, syscall.O_RDONLY, 0); err != nil {
|
||||||
return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err)
|
return errors.Wrapf(err, "containerd-shim: opening r/o fifo %q failed", i.name)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if sameFile != nil {
|
if sameFile != nil {
|
||||||
@ -191,7 +194,7 @@ func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, w
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if fw, err = os.OpenFile(i.name, syscall.O_WRONLY|syscall.O_APPEND, 0); err != nil {
|
if fw, err = os.OpenFile(i.name, syscall.O_WRONLY|syscall.O_APPEND, 0); err != nil {
|
||||||
return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err)
|
return errors.Wrapf(err, "containerd-shim: opening file %q failed", i.name)
|
||||||
}
|
}
|
||||||
if stdout == stderr {
|
if stdout == stderr {
|
||||||
sameFile = &countingWriteCloser{
|
sameFile = &countingWriteCloser{
|
||||||
@ -251,7 +254,8 @@ func isFifo(path string) (bool, error) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error) {
|
// NewBinaryIO runs a custom binary process for pluggable shim logging
|
||||||
|
func NewBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error) {
|
||||||
ns, err := namespaces.NamespaceRequired(ctx)
|
ns, err := namespaces.NamespaceRequired(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -264,7 +268,7 @@ func newBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
cmd := exec.CommandContext(ctx, uri.Host, args...)
|
cmd := exec.CommandContext(ctx, uri.Path, args...)
|
||||||
cmd.Env = append(cmd.Env,
|
cmd.Env = append(cmd.Env,
|
||||||
"CONTAINER_ID="+id,
|
"CONTAINER_ID="+id,
|
||||||
"CONTAINER_NAMESPACE="+ns,
|
"CONTAINER_NAMESPACE="+ns,
|
@ -14,30 +14,17 @@
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package proc
|
package process
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/console"
|
"github.com/containerd/console"
|
||||||
|
"github.com/containerd/containerd/pkg/stdio"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Stdio of a process
|
|
||||||
type Stdio struct {
|
|
||||||
Stdin string
|
|
||||||
Stdout string
|
|
||||||
Stderr string
|
|
||||||
Terminal bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNull returns true if the stdio is not defined
|
|
||||||
func (s Stdio) IsNull() bool {
|
|
||||||
return s.Stdin == "" && s.Stdout == "" && s.Stderr == ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process on a system
|
// Process on a system
|
||||||
type Process interface {
|
type Process interface {
|
||||||
// ID returns the id for the process
|
// ID returns the id for the process
|
||||||
@ -51,7 +38,7 @@ type Process interface {
|
|||||||
// Stdin returns the process STDIN
|
// Stdin returns the process STDIN
|
||||||
Stdin() io.Closer
|
Stdin() io.Closer
|
||||||
// Stdio returns io information for the container
|
// Stdio returns io information for the container
|
||||||
Stdio() Stdio
|
Stdio() stdio.Stdio
|
||||||
// Status returns the process status
|
// Status returns the process status
|
||||||
Status(context.Context) (string, error)
|
Status(context.Context) (string, error)
|
||||||
// Wait blocks until the process has exited
|
// Wait blocks until the process has exited
|
||||||
@ -67,12 +54,3 @@ type Process interface {
|
|||||||
// SetExited sets the exit status for the process
|
// SetExited sets the exit status for the process
|
||||||
SetExited(status int)
|
SetExited(status int)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Platform handles platform-specific behavior that may differs across
|
|
||||||
// platform implementations
|
|
||||||
type Platform interface {
|
|
||||||
CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string,
|
|
||||||
wg *sync.WaitGroup) (console.Console, error)
|
|
||||||
ShutdownConsole(ctx context.Context, console console.Console) error
|
|
||||||
Close() error
|
|
||||||
}
|
|
@ -14,7 +14,7 @@
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package proc
|
package process
|
||||||
|
|
||||||
import (
|
import (
|
||||||
google_protobuf "github.com/gogo/protobuf/types"
|
google_protobuf "github.com/gogo/protobuf/types"
|
@ -16,9 +16,10 @@
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package proc
|
package process
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -34,6 +35,15 @@ import (
|
|||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RuncRoot is the path to the root runc state directory
|
||||||
|
RuncRoot = "/run/containerd/runc"
|
||||||
|
// StoppedPID is the pid assigned after a container has run and stopped
|
||||||
|
StoppedPID = -1
|
||||||
|
// InitPidFile name of the file that contains the init pid
|
||||||
|
InitPidFile = "init.pid"
|
||||||
|
)
|
||||||
|
|
||||||
// safePid is a thread safe wrapper for pid.
|
// safePid is a thread safe wrapper for pid.
|
||||||
type safePid struct {
|
type safePid struct {
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
@ -46,6 +56,12 @@ func (s *safePid) get() int {
|
|||||||
return s.pid
|
return s.pid
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *safePid) set(pid int) {
|
||||||
|
s.Lock()
|
||||||
|
s.pid = pid
|
||||||
|
s.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
// TODO(mlaventure): move to runc package?
|
// TODO(mlaventure): move to runc package?
|
||||||
func getLastRuntimeError(r *runc.Runc) (string, error) {
|
func getLastRuntimeError(r *runc.Runc) (string, error) {
|
||||||
if r.Log == "" {
|
if r.Log == "" {
|
||||||
@ -117,9 +133,6 @@ func checkKillError(err error) error {
|
|||||||
return errors.Wrapf(err, "unknown error after kill")
|
return errors.Wrapf(err, "unknown error after kill")
|
||||||
}
|
}
|
||||||
|
|
||||||
// InitPidFile name of the file that contains the init pid
|
|
||||||
const InitPidFile = "init.pid"
|
|
||||||
|
|
||||||
func newPidFile(bundle string) *pidFile {
|
func newPidFile(bundle string) *pidFile {
|
||||||
return &pidFile{
|
return &pidFile{
|
||||||
path: filepath.Join(bundle, InitPidFile),
|
path: filepath.Join(bundle, InitPidFile),
|
||||||
@ -143,3 +156,37 @@ func (p *pidFile) Path() string {
|
|||||||
func (p *pidFile) Read() (int, error) {
|
func (p *pidFile) Read() (int, error) {
|
||||||
return runc.ReadPidFile(p.path)
|
return runc.ReadPidFile(p.path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// waitTimeout handles waiting on a waitgroup with a specified timeout.
|
||||||
|
// this is commonly used for waiting on IO to finish after a process has exited
|
||||||
|
func waitTimeout(ctx context.Context, wg *sync.WaitGroup, timeout time.Duration) error {
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||||
|
defer cancel()
|
||||||
|
done := make(chan struct{}, 1)
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
return nil
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func stateName(v interface{}) string {
|
||||||
|
switch v.(type) {
|
||||||
|
case *runningState, *execRunningState:
|
||||||
|
return "running"
|
||||||
|
case *createdState, *execCreatedState, *createdCheckpointState:
|
||||||
|
return "created"
|
||||||
|
case *pausedState:
|
||||||
|
return "paused"
|
||||||
|
case *deletedState:
|
||||||
|
return "deleted"
|
||||||
|
case *stoppedState:
|
||||||
|
return "stopped"
|
||||||
|
}
|
||||||
|
panic(errors.Errorf("invalid state %v", v))
|
||||||
|
}
|
33
vendor/github.com/containerd/containerd/pkg/stdio/platform.go
generated
vendored
Normal file
33
vendor/github.com/containerd/containerd/pkg/stdio/platform.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package stdio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Platform handles platform-specific behavior that may differs across
|
||||||
|
// platform implementations
|
||||||
|
type Platform interface {
|
||||||
|
CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string,
|
||||||
|
wg *sync.WaitGroup) (console.Console, error)
|
||||||
|
ShutdownConsole(ctx context.Context, console console.Console) error
|
||||||
|
Close() error
|
||||||
|
}
|
30
vendor/github.com/containerd/containerd/pkg/stdio/stdio.go
generated
vendored
Normal file
30
vendor/github.com/containerd/containerd/pkg/stdio/stdio.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package stdio
|
||||||
|
|
||||||
|
// Stdio of a process
|
||||||
|
type Stdio struct {
|
||||||
|
Stdin string
|
||||||
|
Stdout string
|
||||||
|
Stderr string
|
||||||
|
Terminal bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNull returns true if the stdio is not defined
|
||||||
|
func (s Stdio) IsNull() bool {
|
||||||
|
return s.Stdin == "" && s.Stdout == "" && s.Stderr == ""
|
||||||
|
}
|
51
vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_windows.go
generated
vendored
51
vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_windows.go
generated
vendored
@ -19,6 +19,7 @@
|
|||||||
package ttrpcutil
|
package ttrpcutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
@ -28,33 +29,31 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func ttrpcDial(address string, timeout time.Duration) (net.Conn, error) {
|
func ttrpcDial(address string, timeout time.Duration) (net.Conn, error) {
|
||||||
var c net.Conn
|
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||||
var lastError error
|
defer cancel()
|
||||||
timedOutError := errors.Errorf("timed out waiting for npipe %s", address)
|
|
||||||
start := time.Now()
|
// If there is nobody serving the pipe we limit the timeout for this case to
|
||||||
|
// 5 seconds because any shim that would serve this endpoint should serve it
|
||||||
|
// within 5 seconds.
|
||||||
|
serveTimer := time.NewTimer(5 * time.Second)
|
||||||
|
defer serveTimer.Stop()
|
||||||
for {
|
for {
|
||||||
remaining := timeout - time.Since(start)
|
c, err := winio.DialPipeContext(ctx, address)
|
||||||
if remaining <= 0 {
|
if err != nil {
|
||||||
lastError = timedOutError
|
if os.IsNotExist(err) {
|
||||||
break
|
select {
|
||||||
|
case <-serveTimer.C:
|
||||||
|
return nil, errors.Wrap(os.ErrNotExist, "pipe not found before timeout")
|
||||||
|
default:
|
||||||
|
// Wait 10ms for the shim to serve and try again.
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if err == context.DeadlineExceeded {
|
||||||
|
return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
c, lastError = winio.DialPipe(address, &remaining)
|
return c, nil
|
||||||
if lastError == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if !os.IsNotExist(lastError) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// There is nobody serving the pipe. We limit the timeout for this case
|
|
||||||
// to 5 seconds because any shim that would serve this endpoint should
|
|
||||||
// serve it within 5 seconds. We use the passed in timeout for the
|
|
||||||
// `DialPipe` timeout if the pipe exists however to give the pipe time
|
|
||||||
// to `Accept` the connection.
|
|
||||||
if time.Since(start) >= 5*time.Second {
|
|
||||||
lastError = timedOutError
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
}
|
||||||
return c, lastError
|
|
||||||
}
|
}
|
||||||
|
299
vendor/github.com/containerd/containerd/remotes/docker/authorizer.go
generated
vendored
299
vendor/github.com/containerd/containerd/remotes/docker/authorizer.go
generated
vendored
@ -31,7 +31,6 @@ import (
|
|||||||
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/log"
|
"github.com/containerd/containerd/log"
|
||||||
"github.com/containerd/containerd/version"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/net/context/ctxhttp"
|
"golang.org/x/net/context/ctxhttp"
|
||||||
@ -41,128 +40,278 @@ type dockerAuthorizer struct {
|
|||||||
credentials func(string) (string, string, error)
|
credentials func(string) (string, string, error)
|
||||||
|
|
||||||
client *http.Client
|
client *http.Client
|
||||||
ua string
|
header http.Header
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
|
||||||
auth map[string]string
|
// indexed by host name
|
||||||
|
handlers map[string]*authHandler
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAuthorizer creates a Docker authorizer using the provided function to
|
// NewAuthorizer creates a Docker authorizer using the provided function to
|
||||||
// get credentials for the token server or basic auth.
|
// get credentials for the token server or basic auth.
|
||||||
|
// Deprecated: Use NewDockerAuthorizer
|
||||||
func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer {
|
func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer {
|
||||||
if client == nil {
|
return NewDockerAuthorizer(WithAuthClient(client), WithAuthCreds(f))
|
||||||
client = http.DefaultClient
|
}
|
||||||
}
|
|
||||||
return &dockerAuthorizer{
|
type authorizerConfig struct {
|
||||||
credentials: f,
|
credentials func(string) (string, string, error)
|
||||||
client: client,
|
client *http.Client
|
||||||
ua: "containerd/" + version.Version,
|
header http.Header
|
||||||
auth: map[string]string{},
|
}
|
||||||
|
|
||||||
|
// AuthorizerOpt configures an authorizer
|
||||||
|
type AuthorizerOpt func(*authorizerConfig)
|
||||||
|
|
||||||
|
// WithAuthClient provides the HTTP client for the authorizer
|
||||||
|
func WithAuthClient(client *http.Client) AuthorizerOpt {
|
||||||
|
return func(opt *authorizerConfig) {
|
||||||
|
opt.client = client
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error {
|
// WithAuthCreds provides a credential function to the authorizer
|
||||||
// TODO: Lookup matching challenge and scope rather than just host
|
func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt {
|
||||||
if auth := a.getAuth(req.URL.Host); auth != "" {
|
return func(opt *authorizerConfig) {
|
||||||
req.Header.Set("Authorization", auth)
|
opt.credentials = creds
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAuthHeader provides HTTP headers for authorization
|
||||||
|
func WithAuthHeader(hdr http.Header) AuthorizerOpt {
|
||||||
|
return func(opt *authorizerConfig) {
|
||||||
|
opt.header = hdr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDockerAuthorizer creates an authorizer using Docker's registry
|
||||||
|
// authentication spec.
|
||||||
|
// See https://docs.docker.com/registry/spec/auth/
|
||||||
|
func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer {
|
||||||
|
var ao authorizerConfig
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(&ao)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ao.client == nil {
|
||||||
|
ao.client = http.DefaultClient
|
||||||
|
}
|
||||||
|
|
||||||
|
return &dockerAuthorizer{
|
||||||
|
credentials: ao.credentials,
|
||||||
|
client: ao.client,
|
||||||
|
header: ao.header,
|
||||||
|
handlers: make(map[string]*authHandler),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Authorize handles auth request.
|
||||||
|
func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error {
|
||||||
|
// skip if there is no auth handler
|
||||||
|
ah := a.getAuthHandler(req.URL.Host)
|
||||||
|
if ah == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
auth, err := ah.authorize(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Authorization", auth)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *dockerAuthorizer) getAuthHandler(host string) *authHandler {
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
|
|
||||||
|
return a.handlers[host]
|
||||||
|
}
|
||||||
|
|
||||||
func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error {
|
func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error {
|
||||||
last := responses[len(responses)-1]
|
last := responses[len(responses)-1]
|
||||||
host := last.Request.URL.Host
|
host := last.Request.URL.Host
|
||||||
|
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
for _, c := range parseAuthHeader(last.Header) {
|
for _, c := range parseAuthHeader(last.Header) {
|
||||||
if c.scheme == bearerAuth {
|
if c.scheme == bearerAuth {
|
||||||
if err := invalidAuthorization(c, responses); err != nil {
|
if err := invalidAuthorization(c, responses); err != nil {
|
||||||
// TODO: Clear token
|
delete(a.handlers, host)
|
||||||
a.setAuth(host, "")
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(dmcg): Store challenge, not token
|
// reuse existing handler
|
||||||
// Move token fetching to authorize
|
//
|
||||||
return a.setTokenAuth(ctx, host, c.parameters)
|
// assume that one registry will return the common
|
||||||
|
// challenge information, including realm and service.
|
||||||
|
// and the resource scope is only different part
|
||||||
|
// which can be provided by each request.
|
||||||
|
if _, ok := a.handlers[host]; ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
common, err := a.generateTokenOptions(ctx, host, c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common)
|
||||||
|
return nil
|
||||||
} else if c.scheme == basicAuth && a.credentials != nil {
|
} else if c.scheme == basicAuth && a.credentials != nil {
|
||||||
// TODO: Resolve credentials on authorize
|
|
||||||
username, secret, err := a.credentials(host)
|
username, secret, err := a.credentials(host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if username != "" && secret != "" {
|
if username != "" && secret != "" {
|
||||||
auth := username + ":" + secret
|
common := tokenOptions{
|
||||||
a.setAuth(host, fmt.Sprintf("Basic %s", base64.StdEncoding.EncodeToString([]byte(auth))))
|
username: username,
|
||||||
|
secret: secret,
|
||||||
|
}
|
||||||
|
|
||||||
|
a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme")
|
return errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *dockerAuthorizer) getAuth(host string) string {
|
func (a *dockerAuthorizer) generateTokenOptions(ctx context.Context, host string, c challenge) (tokenOptions, error) {
|
||||||
a.mu.Lock()
|
realm, ok := c.parameters["realm"]
|
||||||
defer a.mu.Unlock()
|
|
||||||
|
|
||||||
return a.auth[host]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *dockerAuthorizer) setAuth(host string, auth string) bool {
|
|
||||||
a.mu.Lock()
|
|
||||||
defer a.mu.Unlock()
|
|
||||||
|
|
||||||
changed := a.auth[host] != auth
|
|
||||||
a.auth[host] = auth
|
|
||||||
|
|
||||||
return changed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *dockerAuthorizer) setTokenAuth(ctx context.Context, host string, params map[string]string) error {
|
|
||||||
realm, ok := params["realm"]
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.New("no realm specified for token auth challenge")
|
return tokenOptions{}, errors.New("no realm specified for token auth challenge")
|
||||||
}
|
}
|
||||||
|
|
||||||
realmURL, err := url.Parse(realm)
|
realmURL, err := url.Parse(realm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "invalid token auth challenge realm")
|
return tokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm")
|
||||||
}
|
}
|
||||||
|
|
||||||
to := tokenOptions{
|
to := tokenOptions{
|
||||||
realm: realmURL.String(),
|
realm: realmURL.String(),
|
||||||
service: params["service"],
|
service: c.parameters["service"],
|
||||||
}
|
}
|
||||||
|
|
||||||
to.scopes = getTokenScopes(ctx, params)
|
scope, ok := c.parameters["scope"]
|
||||||
if len(to.scopes) == 0 {
|
if !ok {
|
||||||
return errors.Errorf("no scope specified for token auth challenge")
|
return tokenOptions{}, errors.Errorf("no scope specified for token auth challenge")
|
||||||
}
|
}
|
||||||
|
to.scopes = append(to.scopes, scope)
|
||||||
|
|
||||||
if a.credentials != nil {
|
if a.credentials != nil {
|
||||||
to.username, to.secret, err = a.credentials(host)
|
to.username, to.secret, err = a.credentials(host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return tokenOptions{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return to, nil
|
||||||
|
}
|
||||||
|
|
||||||
var token string
|
// authResult is used to control limit rate.
|
||||||
|
type authResult struct {
|
||||||
|
sync.WaitGroup
|
||||||
|
token string
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// authHandler is used to handle auth request per registry server.
|
||||||
|
type authHandler struct {
|
||||||
|
sync.Mutex
|
||||||
|
|
||||||
|
header http.Header
|
||||||
|
|
||||||
|
client *http.Client
|
||||||
|
|
||||||
|
// only support basic and bearer schemes
|
||||||
|
scheme authenticationScheme
|
||||||
|
|
||||||
|
// common contains common challenge answer
|
||||||
|
common tokenOptions
|
||||||
|
|
||||||
|
// scopedTokens caches token indexed by scopes, which used in
|
||||||
|
// bearer auth case
|
||||||
|
scopedTokens map[string]*authResult
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAuthHandler(client *http.Client, hdr http.Header, scheme authenticationScheme, opts tokenOptions) *authHandler {
|
||||||
|
return &authHandler{
|
||||||
|
header: hdr,
|
||||||
|
client: client,
|
||||||
|
scheme: scheme,
|
||||||
|
common: opts,
|
||||||
|
scopedTokens: map[string]*authResult{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *authHandler) authorize(ctx context.Context) (string, error) {
|
||||||
|
switch ah.scheme {
|
||||||
|
case basicAuth:
|
||||||
|
return ah.doBasicAuth(ctx)
|
||||||
|
case bearerAuth:
|
||||||
|
return ah.doBearerAuth(ctx)
|
||||||
|
default:
|
||||||
|
return "", errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) {
|
||||||
|
username, secret := ah.common.username, ah.common.secret
|
||||||
|
|
||||||
|
if username == "" || secret == "" {
|
||||||
|
return "", fmt.Errorf("failed to handle basic auth because missing username or secret")
|
||||||
|
}
|
||||||
|
|
||||||
|
auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret))
|
||||||
|
return fmt.Sprintf("Basic %s", auth), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ah *authHandler) doBearerAuth(ctx context.Context) (string, error) {
|
||||||
|
// copy common tokenOptions
|
||||||
|
to := ah.common
|
||||||
|
|
||||||
|
to.scopes = getTokenScopes(ctx, to.scopes)
|
||||||
|
if len(to.scopes) == 0 {
|
||||||
|
return "", errors.Errorf("no scope specified for token auth challenge")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Docs: https://docs.docker.com/registry/spec/auth/scope
|
||||||
|
scoped := strings.Join(to.scopes, " ")
|
||||||
|
|
||||||
|
ah.Lock()
|
||||||
|
if r, exist := ah.scopedTokens[scoped]; exist {
|
||||||
|
ah.Unlock()
|
||||||
|
r.Wait()
|
||||||
|
return r.token, r.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// only one fetch token job
|
||||||
|
r := new(authResult)
|
||||||
|
r.Add(1)
|
||||||
|
ah.scopedTokens[scoped] = r
|
||||||
|
ah.Unlock()
|
||||||
|
|
||||||
|
// fetch token for the resource scope
|
||||||
|
var (
|
||||||
|
token string
|
||||||
|
err error
|
||||||
|
)
|
||||||
if to.secret != "" {
|
if to.secret != "" {
|
||||||
// Credential information is provided, use oauth POST endpoint
|
// credential information is provided, use oauth POST endpoint
|
||||||
token, err = a.fetchTokenWithOAuth(ctx, to)
|
token, err = ah.fetchTokenWithOAuth(ctx, to)
|
||||||
if err != nil {
|
err = errors.Wrap(err, "failed to fetch oauth token")
|
||||||
return errors.Wrap(err, "failed to fetch oauth token")
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// Do request anonymously
|
// do request anonymously
|
||||||
token, err = a.fetchToken(ctx, to)
|
token, err = ah.fetchToken(ctx, to)
|
||||||
if err != nil {
|
err = errors.Wrap(err, "failed to fetch anonymous token")
|
||||||
return errors.Wrap(err, "failed to fetch anonymous token")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
a.setAuth(host, fmt.Sprintf("Bearer %s", token))
|
token = fmt.Sprintf("Bearer %s", token)
|
||||||
|
|
||||||
return nil
|
r.token, r.err = token, err
|
||||||
|
r.Done()
|
||||||
|
return r.token, r.err
|
||||||
}
|
}
|
||||||
|
|
||||||
type tokenOptions struct {
|
type tokenOptions struct {
|
||||||
@ -181,7 +330,7 @@ type postTokenResponse struct {
|
|||||||
Scope string `json:"scope"`
|
Scope string `json:"scope"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) {
|
func (ah *authHandler) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) {
|
||||||
form := url.Values{}
|
form := url.Values{}
|
||||||
form.Set("scope", strings.Join(to.scopes, " "))
|
form.Set("scope", strings.Join(to.scopes, " "))
|
||||||
form.Set("service", to.service)
|
form.Set("service", to.service)
|
||||||
@ -202,11 +351,13 @@ func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOpti
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
|
||||||
if a.ua != "" {
|
if ah.header != nil {
|
||||||
req.Header.Set("User-Agent", a.ua)
|
for k, v := range ah.header {
|
||||||
|
req.Header[k] = append(req.Header[k], v...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := ctxhttp.Do(ctx, a.client, req)
|
resp, err := ctxhttp.Do(ctx, ah.client, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -216,7 +367,7 @@ func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOpti
|
|||||||
// As of September 2017, GCR is known to return 404.
|
// As of September 2017, GCR is known to return 404.
|
||||||
// As of February 2018, JFrog Artifactory is known to return 401.
|
// As of February 2018, JFrog Artifactory is known to return 401.
|
||||||
if (resp.StatusCode == 405 && to.username != "") || resp.StatusCode == 404 || resp.StatusCode == 401 {
|
if (resp.StatusCode == 405 && to.username != "") || resp.StatusCode == 404 || resp.StatusCode == 401 {
|
||||||
return a.fetchToken(ctx, to)
|
return ah.fetchToken(ctx, to)
|
||||||
} else if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
} else if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||||
b, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB
|
b, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB
|
||||||
log.G(ctx).WithFields(logrus.Fields{
|
log.G(ctx).WithFields(logrus.Fields{
|
||||||
@ -245,15 +396,17 @@ type getTokenResponse struct {
|
|||||||
RefreshToken string `json:"refresh_token"`
|
RefreshToken string `json:"refresh_token"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// getToken fetches a token using a GET request
|
// fetchToken fetches a token using a GET request
|
||||||
func (a *dockerAuthorizer) fetchToken(ctx context.Context, to tokenOptions) (string, error) {
|
func (ah *authHandler) fetchToken(ctx context.Context, to tokenOptions) (string, error) {
|
||||||
req, err := http.NewRequest("GET", to.realm, nil)
|
req, err := http.NewRequest("GET", to.realm, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if a.ua != "" {
|
if ah.header != nil {
|
||||||
req.Header.Set("User-Agent", a.ua)
|
for k, v := range ah.header {
|
||||||
|
req.Header[k] = append(req.Header[k], v...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
reqParams := req.URL.Query()
|
reqParams := req.URL.Query()
|
||||||
@ -272,7 +425,7 @@ func (a *dockerAuthorizer) fetchToken(ctx context.Context, to tokenOptions) (str
|
|||||||
|
|
||||||
req.URL.RawQuery = reqParams.Encode()
|
req.URL.RawQuery = reqParams.Encode()
|
||||||
|
|
||||||
resp, err := ctxhttp.Do(ctx, a.client, req)
|
resp, err := ctxhttp.Do(ctx, ah.client, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
131
vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
generated
vendored
131
vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
@ -32,7 +32,6 @@ import (
|
|||||||
"github.com/docker/distribution/registry/api/errcode"
|
"github.com/docker/distribution/registry/api/errcode"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type dockerFetcher struct {
|
type dockerFetcher struct {
|
||||||
@ -40,26 +39,46 @@ type dockerFetcher struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
|
func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
|
||||||
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(
|
ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", desc.Digest))
|
||||||
logrus.Fields{
|
|
||||||
"base": r.base.String(),
|
|
||||||
"digest": desc.Digest,
|
|
||||||
},
|
|
||||||
))
|
|
||||||
|
|
||||||
urls, err := r.getV2URLPaths(ctx, desc)
|
hosts := r.filterHosts(HostCapabilityPull)
|
||||||
if err != nil {
|
if len(hosts) == 0 {
|
||||||
return nil, err
|
return nil, errors.Wrap(errdefs.ErrNotFound, "no pull hosts")
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, err = contextWithRepositoryScope(ctx, r.refspec, false)
|
ctx, err := contextWithRepositoryScope(ctx, r.refspec, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) {
|
return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) {
|
||||||
for _, u := range urls {
|
// firstly try fetch via external urls
|
||||||
rc, err := r.open(ctx, u, desc.MediaType, offset)
|
for _, us := range desc.URLs {
|
||||||
|
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", us))
|
||||||
|
|
||||||
|
u, err := url.Parse(us)
|
||||||
|
if err != nil {
|
||||||
|
log.G(ctx).WithError(err).Debug("failed to parse")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log.G(ctx).Debug("trying alternative url")
|
||||||
|
|
||||||
|
// Try this first, parse it
|
||||||
|
host := RegistryHost{
|
||||||
|
Client: http.DefaultClient,
|
||||||
|
Host: u.Host,
|
||||||
|
Scheme: u.Scheme,
|
||||||
|
Path: u.Path,
|
||||||
|
Capabilities: HostCapabilityPull,
|
||||||
|
}
|
||||||
|
req := r.request(host, http.MethodGet)
|
||||||
|
// Strip namespace from base
|
||||||
|
req.path = u.Path
|
||||||
|
if u.RawQuery != "" {
|
||||||
|
req.path = req.path + "?" + u.RawQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
rc, err := r.open(ctx, req, desc.MediaType, offset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errdefs.IsNotFound(err) {
|
if errdefs.IsNotFound(err) {
|
||||||
continue // try one of the other urls.
|
continue // try one of the other urls.
|
||||||
@ -71,6 +90,44 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
|
|||||||
return rc, nil
|
return rc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Try manifests endpoints for manifests types
|
||||||
|
switch desc.MediaType {
|
||||||
|
case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
|
||||||
|
images.MediaTypeDockerSchema1Manifest,
|
||||||
|
ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
|
||||||
|
|
||||||
|
for _, host := range r.hosts {
|
||||||
|
req := r.request(host, http.MethodGet, "manifests", desc.Digest.String())
|
||||||
|
|
||||||
|
rc, err := r.open(ctx, req, desc.MediaType, offset)
|
||||||
|
if err != nil {
|
||||||
|
if errdefs.IsNotFound(err) {
|
||||||
|
continue // try another host
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return rc, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally use blobs endpoints
|
||||||
|
for _, host := range r.hosts {
|
||||||
|
req := r.request(host, http.MethodGet, "blobs", desc.Digest.String())
|
||||||
|
|
||||||
|
rc, err := r.open(ctx, req, desc.MediaType, offset)
|
||||||
|
if err != nil {
|
||||||
|
if errdefs.IsNotFound(err) {
|
||||||
|
continue // try another host
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return rc, nil
|
||||||
|
}
|
||||||
|
|
||||||
return nil, errors.Wrapf(errdefs.ErrNotFound,
|
return nil, errors.Wrapf(errdefs.ErrNotFound,
|
||||||
"could not fetch content descriptor %v (%v) from remote",
|
"could not fetch content descriptor %v (%v) from remote",
|
||||||
desc.Digest, desc.MediaType)
|
desc.Digest, desc.MediaType)
|
||||||
@ -78,22 +135,17 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int64) (io.ReadCloser, error) {
|
func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (io.ReadCloser, error) {
|
||||||
req, err := http.NewRequest(http.MethodGet, u, nil)
|
req.header.Set("Accept", strings.Join([]string{mediatype, `*`}, ", "))
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("Accept", strings.Join([]string{mediatype, `*`}, ", "))
|
|
||||||
|
|
||||||
if offset > 0 {
|
if offset > 0 {
|
||||||
// Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints
|
// Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints
|
||||||
// will return the header without supporting the range. The content
|
// will return the header without supporting the range. The content
|
||||||
// range must always be checked.
|
// range must always be checked.
|
||||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
|
req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := r.doRequestWithRetries(ctx, req, nil)
|
resp, err := req.doWithRetries(ctx, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -106,13 +158,13 @@ func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int
|
|||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
if resp.StatusCode == http.StatusNotFound {
|
if resp.StatusCode == http.StatusNotFound {
|
||||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", u)
|
return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", req.String())
|
||||||
}
|
}
|
||||||
var registryErr errcode.Errors
|
var registryErr errcode.Errors
|
||||||
if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 {
|
if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 {
|
||||||
return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
|
return nil, errors.Errorf("unexpected status code %v: %v", req.String(), resp.Status)
|
||||||
}
|
}
|
||||||
return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", u, resp.Status, registryErr.Error())
|
return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error())
|
||||||
}
|
}
|
||||||
if offset > 0 {
|
if offset > 0 {
|
||||||
cr := resp.Header.Get("content-range")
|
cr := resp.Header.Get("content-range")
|
||||||
@ -141,30 +193,3 @@ func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int
|
|||||||
|
|
||||||
return resp.Body, nil
|
return resp.Body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getV2URLPaths generates the candidate urls paths for the object based on the
|
|
||||||
// set of hints and the provided object id. URLs are returned in the order of
|
|
||||||
// most to least likely succeed.
|
|
||||||
func (r *dockerFetcher) getV2URLPaths(ctx context.Context, desc ocispec.Descriptor) ([]string, error) {
|
|
||||||
var urls []string
|
|
||||||
|
|
||||||
if len(desc.URLs) > 0 {
|
|
||||||
// handle fetch via external urls.
|
|
||||||
for _, u := range desc.URLs {
|
|
||||||
log.G(ctx).WithField("url", u).Debug("adding alternative url")
|
|
||||||
urls = append(urls, u)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch desc.MediaType {
|
|
||||||
case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
|
|
||||||
images.MediaTypeDockerSchema1Manifest,
|
|
||||||
ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
|
|
||||||
urls = append(urls, r.url(path.Join("manifests", desc.Digest.String())))
|
|
||||||
}
|
|
||||||
|
|
||||||
// always fallback to attempting to get the object out of the blobs store.
|
|
||||||
urls = append(urls, r.url(path.Join("blobs", desc.Digest.String())))
|
|
||||||
|
|
||||||
return urls, nil
|
|
||||||
}
|
|
||||||
|
42
vendor/github.com/containerd/containerd/remotes/docker/handler.go
generated
vendored
42
vendor/github.com/containerd/containerd/remotes/docker/handler.go
generated
vendored
@ -110,3 +110,45 @@ func appendDistributionSourceLabel(originLabel, repo string) string {
|
|||||||
func distributionSourceLabelKey(source string) string {
|
func distributionSourceLabelKey(source string) string {
|
||||||
return fmt.Sprintf("%s.%s", labelDistributionSource, source)
|
return fmt.Sprintf("%s.%s", labelDistributionSource, source)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// selectRepositoryMountCandidate will select the repo which has longest
|
||||||
|
// common prefix components as the candidate.
|
||||||
|
func selectRepositoryMountCandidate(refspec reference.Spec, sources map[string]string) string {
|
||||||
|
u, err := url.Parse("dummy://" + refspec.Locator)
|
||||||
|
if err != nil {
|
||||||
|
// NOTE: basically, it won't be error here
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
source, target := u.Hostname(), strings.TrimPrefix(u.Path, "/")
|
||||||
|
repoLabel, ok := sources[distributionSourceLabelKey(source)]
|
||||||
|
if !ok || repoLabel == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
n, match := 0, ""
|
||||||
|
components := strings.Split(target, "/")
|
||||||
|
for _, repo := range strings.Split(repoLabel, ",") {
|
||||||
|
// the target repo is not a candidate
|
||||||
|
if repo == target {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if l := commonPrefixComponents(components, repo); l >= n {
|
||||||
|
n, match = l, repo
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return match
|
||||||
|
}
|
||||||
|
|
||||||
|
func commonPrefixComponents(components []string, target string) int {
|
||||||
|
targetComponents := strings.Split(target, "/")
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for ; i < len(components) && i < len(targetComponents); i++ {
|
||||||
|
if components[i] != targetComponents[i] {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
192
vendor/github.com/containerd/containerd/remotes/docker/pusher.go
generated
vendored
192
vendor/github.com/containerd/containerd/remotes/docker/pusher.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -37,7 +37,7 @@ import (
|
|||||||
|
|
||||||
type dockerPusher struct {
|
type dockerPusher struct {
|
||||||
*dockerBase
|
*dockerBase
|
||||||
tag string
|
object string
|
||||||
|
|
||||||
// TODO: namespace tracker
|
// TODO: namespace tracker
|
||||||
tracker StatusTracker
|
tracker StatusTracker
|
||||||
@ -59,31 +59,32 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
|
|||||||
return nil, errors.Wrap(err, "failed to get status")
|
return nil, errors.Wrap(err, "failed to get status")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
hosts := p.filterHosts(HostCapabilityPush)
|
||||||
|
if len(hosts) == 0 {
|
||||||
|
return nil, errors.Wrap(errdefs.ErrNotFound, "no push hosts")
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
isManifest bool
|
isManifest bool
|
||||||
existCheck string
|
existCheck []string
|
||||||
|
host = hosts[0]
|
||||||
)
|
)
|
||||||
|
|
||||||
switch desc.MediaType {
|
switch desc.MediaType {
|
||||||
case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
|
case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
|
||||||
ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
|
ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
|
||||||
isManifest = true
|
isManifest = true
|
||||||
if p.tag == "" {
|
existCheck = getManifestPath(p.object, desc.Digest)
|
||||||
existCheck = path.Join("manifests", desc.Digest.String())
|
|
||||||
} else {
|
|
||||||
existCheck = path.Join("manifests", p.tag)
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
existCheck = path.Join("blobs", desc.Digest.String())
|
existCheck = []string{"blobs", desc.Digest.String()}
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodHead, p.url(existCheck), nil)
|
req := p.request(host, http.MethodHead, existCheck...)
|
||||||
if err != nil {
|
req.header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", "))
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", "))
|
log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to")
|
||||||
resp, err := p.doRequestWithRetries(ctx, req, nil)
|
|
||||||
|
resp, err := req.doWithRetries(ctx, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Cause(err) != ErrInvalidAuthorization {
|
if errors.Cause(err) != ErrInvalidAuthorization {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -92,7 +93,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
|
|||||||
} else {
|
} else {
|
||||||
if resp.StatusCode == http.StatusOK {
|
if resp.StatusCode == http.StatusOK {
|
||||||
var exists bool
|
var exists bool
|
||||||
if isManifest && p.tag != "" {
|
if isManifest && existCheck[1] != desc.Digest.String() {
|
||||||
dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
|
dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
|
||||||
if dgstHeader == desc.Digest {
|
if dgstHeader == desc.Digest {
|
||||||
exists = true
|
exists = true
|
||||||
@ -116,67 +117,95 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Lookup related objects for cross repository push
|
|
||||||
|
|
||||||
if isManifest {
|
if isManifest {
|
||||||
var putPath string
|
putPath := getManifestPath(p.object, desc.Digest)
|
||||||
if p.tag != "" {
|
req = p.request(host, http.MethodPut, putPath...)
|
||||||
putPath = path.Join("manifests", p.tag)
|
req.header.Add("Content-Type", desc.MediaType)
|
||||||
} else {
|
|
||||||
putPath = path.Join("manifests", desc.Digest.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err = http.NewRequest(http.MethodPut, p.url(putPath), nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
req.Header.Add("Content-Type", desc.MediaType)
|
|
||||||
} else {
|
} else {
|
||||||
// TODO: Do monolithic upload if size is small
|
|
||||||
|
|
||||||
// Start upload request
|
// Start upload request
|
||||||
req, err = http.NewRequest(http.MethodPost, p.url("blobs", "uploads")+"/", nil)
|
req = p.request(host, http.MethodPost, "blobs", "uploads/")
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
var resp *http.Response
|
||||||
|
if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" {
|
||||||
|
preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo)
|
||||||
|
pctx := contextWithAppendPullRepositoryScope(ctx, fromRepo)
|
||||||
|
|
||||||
|
// NOTE: the fromRepo might be private repo and
|
||||||
|
// auth service still can grant token without error.
|
||||||
|
// but the post request will fail because of 401.
|
||||||
|
//
|
||||||
|
// for the private repo, we should remove mount-from
|
||||||
|
// query and send the request again.
|
||||||
|
resp, err = preq.do(pctx)
|
||||||
|
//resp, err = p.doRequest(pctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusUnauthorized {
|
||||||
|
log.G(ctx).Debugf("failed to mount from repository %s", fromRepo)
|
||||||
|
|
||||||
|
resp.Body.Close()
|
||||||
|
resp = nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := p.doRequestWithRetries(ctx, req, nil)
|
if resp == nil {
|
||||||
if err != nil {
|
resp, err = req.doWithRetries(ctx, nil)
|
||||||
return nil, err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
switch resp.StatusCode {
|
switch resp.StatusCode {
|
||||||
case http.StatusOK, http.StatusAccepted, http.StatusNoContent:
|
case http.StatusOK, http.StatusAccepted, http.StatusNoContent:
|
||||||
|
case http.StatusCreated:
|
||||||
|
p.tracker.SetStatus(ref, Status{
|
||||||
|
Status: content.Status{
|
||||||
|
Ref: ref,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest)
|
||||||
default:
|
default:
|
||||||
// TODO: log error
|
// TODO: log error
|
||||||
return nil, errors.Errorf("unexpected response: %s", resp.Status)
|
return nil, errors.Errorf("unexpected response: %s", resp.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
location := resp.Header.Get("Location")
|
var (
|
||||||
|
location = resp.Header.Get("Location")
|
||||||
|
lurl *url.URL
|
||||||
|
lhost = host
|
||||||
|
)
|
||||||
// Support paths without host in location
|
// Support paths without host in location
|
||||||
if strings.HasPrefix(location, "/") {
|
if strings.HasPrefix(location, "/") {
|
||||||
// Support location string containing path and query
|
lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location)
|
||||||
qmIndex := strings.Index(location, "?")
|
if err != nil {
|
||||||
if qmIndex > 0 {
|
return nil, errors.Wrapf(err, "unable to parse location %v", location)
|
||||||
u := p.base
|
}
|
||||||
u.Path = location[:qmIndex]
|
} else {
|
||||||
u.RawQuery = location[qmIndex+1:]
|
if !strings.Contains(location, "://") {
|
||||||
location = u.String()
|
location = lhost.Scheme + "://" + location
|
||||||
} else {
|
}
|
||||||
u := p.base
|
lurl, err = url.Parse(location)
|
||||||
u.Path = location
|
if err != nil {
|
||||||
location = u.String()
|
return nil, errors.Wrapf(err, "unable to parse location %v", location)
|
||||||
|
}
|
||||||
|
|
||||||
|
if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme {
|
||||||
|
|
||||||
|
lhost.Scheme = lurl.Scheme
|
||||||
|
lhost.Host = lurl.Host
|
||||||
|
log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination")
|
||||||
|
|
||||||
|
// Strip authorizer if change to host or scheme
|
||||||
|
lhost.Authorizer = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
q := lurl.Query()
|
||||||
req, err = http.NewRequest(http.MethodPut, location, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
q := req.URL.Query()
|
|
||||||
q.Add("digest", desc.Digest.String())
|
q.Add("digest", desc.Digest.String())
|
||||||
req.URL.RawQuery = q.Encode()
|
|
||||||
|
|
||||||
|
req = p.request(lhost, http.MethodPut)
|
||||||
|
req.path = lurl.Path + "?" + q.Encode()
|
||||||
}
|
}
|
||||||
p.tracker.SetStatus(ref, Status{
|
p.tracker.SetStatus(ref, Status{
|
||||||
Status: content.Status{
|
Status: content.Status{
|
||||||
@ -191,13 +220,22 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
|
|||||||
|
|
||||||
pr, pw := io.Pipe()
|
pr, pw := io.Pipe()
|
||||||
respC := make(chan *http.Response, 1)
|
respC := make(chan *http.Response, 1)
|
||||||
|
body := ioutil.NopCloser(pr)
|
||||||
|
|
||||||
req.Body = ioutil.NopCloser(pr)
|
req.body = func() (io.ReadCloser, error) {
|
||||||
req.ContentLength = desc.Size
|
if body == nil {
|
||||||
|
return nil, errors.New("cannot reuse body, request must be retried")
|
||||||
|
}
|
||||||
|
// Only use the body once since pipe cannot be seeked
|
||||||
|
ob := body
|
||||||
|
body = nil
|
||||||
|
return ob, nil
|
||||||
|
}
|
||||||
|
req.size = desc.Size
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer close(respC)
|
defer close(respC)
|
||||||
resp, err = p.doRequest(ctx, req)
|
resp, err = req.do(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pr.CloseWithError(err)
|
pr.CloseWithError(err)
|
||||||
return
|
return
|
||||||
@ -223,6 +261,25 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getManifestPath(object string, dgst digest.Digest) []string {
|
||||||
|
if i := strings.IndexByte(object, '@'); i >= 0 {
|
||||||
|
if object[i+1:] != dgst.String() {
|
||||||
|
// use digest, not tag
|
||||||
|
object = ""
|
||||||
|
} else {
|
||||||
|
// strip @<digest> for registry path to make tag
|
||||||
|
object = object[:i]
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if object == "" {
|
||||||
|
return []string{"manifests", dgst.String()}
|
||||||
|
}
|
||||||
|
|
||||||
|
return []string{"manifests", object}
|
||||||
|
}
|
||||||
|
|
||||||
type pushWriter struct {
|
type pushWriter struct {
|
||||||
base *dockerBase
|
base *dockerBase
|
||||||
ref string
|
ref string
|
||||||
@ -296,7 +353,7 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di
|
|||||||
}
|
}
|
||||||
|
|
||||||
if size > 0 && size != status.Offset {
|
if size > 0 && size != status.Offset {
|
||||||
return errors.Errorf("unxpected size %d, expected %d", status.Offset, size)
|
return errors.Errorf("unexpected size %d, expected %d", status.Offset, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
if expected == "" {
|
if expected == "" {
|
||||||
@ -320,3 +377,16 @@ func (pw *pushWriter) Truncate(size int64) error {
|
|||||||
// TODO: always error on manifest
|
// TODO: always error on manifest
|
||||||
return errors.New("cannot truncate remote upload")
|
return errors.New("cannot truncate remote upload")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func requestWithMountFrom(req *request, mount, from string) *request {
|
||||||
|
creq := *req
|
||||||
|
|
||||||
|
sep := "?"
|
||||||
|
if strings.Contains(creq.path, sep) {
|
||||||
|
sep = "&"
|
||||||
|
}
|
||||||
|
|
||||||
|
creq.path = creq.path + sep + "mount=" + mount + "&from=" + from
|
||||||
|
|
||||||
|
return &creq
|
||||||
|
}
|
||||||
|
202
vendor/github.com/containerd/containerd/remotes/docker/registry.go
generated
vendored
Normal file
202
vendor/github.com/containerd/containerd/remotes/docker/registry.go
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package docker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HostCapabilities represent the capabilities of the registry
|
||||||
|
// host. This also represents the set of operations for which
|
||||||
|
// the registry host may be trusted to perform.
|
||||||
|
//
|
||||||
|
// For example pushing is a capability which should only be
|
||||||
|
// performed on an upstream source, not a mirror.
|
||||||
|
// Resolving (the process of converting a name into a digest)
|
||||||
|
// must be considered a trusted operation and only done by
|
||||||
|
// a host which is trusted (or more preferably by secure process
|
||||||
|
// which can prove the provenance of the mapping). A public
|
||||||
|
// mirror should never be trusted to do a resolve action.
|
||||||
|
//
|
||||||
|
// | Registry Type | Pull | Resolve | Push |
|
||||||
|
// |------------------|------|---------|------|
|
||||||
|
// | Public Registry | yes | yes | yes |
|
||||||
|
// | Private Registry | yes | yes | yes |
|
||||||
|
// | Public Mirror | yes | no | no |
|
||||||
|
// | Private Mirror | yes | yes | no |
|
||||||
|
type HostCapabilities uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
// HostCapabilityPull represents the capability to fetch manifests
|
||||||
|
// and blobs by digest
|
||||||
|
HostCapabilityPull HostCapabilities = 1 << iota
|
||||||
|
|
||||||
|
// HostCapabilityResolve represents the capability to fetch manifests
|
||||||
|
// by name
|
||||||
|
HostCapabilityResolve
|
||||||
|
|
||||||
|
// HostCapabilityPush represents the capability to push blobs and
|
||||||
|
// manifests
|
||||||
|
HostCapabilityPush
|
||||||
|
|
||||||
|
// Reserved for future capabilities (i.e. search, catalog, remove)
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c HostCapabilities) Has(t HostCapabilities) bool {
|
||||||
|
return c&t == t
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegistryHost represents a complete configuration for a registry
|
||||||
|
// host, representing the capabilities, authorizations, connection
|
||||||
|
// configuration, and location.
|
||||||
|
type RegistryHost struct {
|
||||||
|
Client *http.Client
|
||||||
|
Authorizer Authorizer
|
||||||
|
Host string
|
||||||
|
Scheme string
|
||||||
|
Path string
|
||||||
|
Capabilities HostCapabilities
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegistryHosts fetches the registry hosts for a given namespace,
|
||||||
|
// provided by the host component of an distribution image reference.
|
||||||
|
type RegistryHosts func(string) ([]RegistryHost, error)
|
||||||
|
|
||||||
|
// Registries joins multiple registry configuration functions, using the same
|
||||||
|
// order as provided within the arguments. When an empty registry configuration
|
||||||
|
// is returned with a nil error, the next function will be called.
|
||||||
|
// NOTE: This function will not join configurations, as soon as a non-empty
|
||||||
|
// configuration is returned from a configuration function, it will be returned
|
||||||
|
// to the caller.
|
||||||
|
func Registries(registries ...RegistryHosts) RegistryHosts {
|
||||||
|
return func(host string) ([]RegistryHost, error) {
|
||||||
|
for _, registry := range registries {
|
||||||
|
config, err := registry(host)
|
||||||
|
if err != nil {
|
||||||
|
return config, err
|
||||||
|
}
|
||||||
|
if len(config) > 0 {
|
||||||
|
return config, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type registryOpts struct {
|
||||||
|
authorizer Authorizer
|
||||||
|
plainHTTP func(string) (bool, error)
|
||||||
|
host func(string) (string, error)
|
||||||
|
client *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegistryOpt defines a registry default option
|
||||||
|
type RegistryOpt func(*registryOpts)
|
||||||
|
|
||||||
|
// WithPlainHTTP configures registries to use plaintext http scheme
|
||||||
|
// for the provided host match function.
|
||||||
|
func WithPlainHTTP(f func(string) (bool, error)) RegistryOpt {
|
||||||
|
return func(opts *registryOpts) {
|
||||||
|
opts.plainHTTP = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAuthorizer configures the default authorizer for a registry
|
||||||
|
func WithAuthorizer(a Authorizer) RegistryOpt {
|
||||||
|
return func(opts *registryOpts) {
|
||||||
|
opts.authorizer = a
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHostTranslator defines the default translator to use for registry hosts
|
||||||
|
func WithHostTranslator(h func(string) (string, error)) RegistryOpt {
|
||||||
|
return func(opts *registryOpts) {
|
||||||
|
opts.host = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithClient configures the default http client for a registry
|
||||||
|
func WithClient(c *http.Client) RegistryOpt {
|
||||||
|
return func(opts *registryOpts) {
|
||||||
|
opts.client = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigureDefaultRegistries is used to create a default configuration for
|
||||||
|
// registries. For more advanced configurations or per-domain setups,
|
||||||
|
// the RegistryHosts interface should be used directly.
|
||||||
|
// NOTE: This function will always return a non-empty value or error
|
||||||
|
func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts {
|
||||||
|
var opts registryOpts
|
||||||
|
for _, opt := range ropts {
|
||||||
|
opt(&opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(host string) ([]RegistryHost, error) {
|
||||||
|
config := RegistryHost{
|
||||||
|
Client: opts.client,
|
||||||
|
Authorizer: opts.authorizer,
|
||||||
|
Host: host,
|
||||||
|
Scheme: "https",
|
||||||
|
Path: "/v2",
|
||||||
|
Capabilities: HostCapabilityPull | HostCapabilityResolve | HostCapabilityPush,
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.Client == nil {
|
||||||
|
config.Client = http.DefaultClient
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.plainHTTP != nil {
|
||||||
|
match, err := opts.plainHTTP(host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if match {
|
||||||
|
config.Scheme = "http"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.host != nil {
|
||||||
|
var err error
|
||||||
|
config.Host, err = opts.host(config.Host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else if host == "docker.io" {
|
||||||
|
config.Host = "registry-1.docker.io"
|
||||||
|
}
|
||||||
|
|
||||||
|
return []RegistryHost{config}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchAllHosts is a host match function which is always true.
|
||||||
|
func MatchAllHosts(string) (bool, error) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchLocalhost is a host match function which returns true for
|
||||||
|
// localhost.
|
||||||
|
func MatchLocalhost(host string) (bool, error) {
|
||||||
|
for _, s := range []string{"localhost", "127.0.0.1", "[::1]"} {
|
||||||
|
if len(host) >= len(s) && host[0:len(s)] == s && (len(host) == len(s) || host[len(s)] == ':') {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return host == "::1", nil
|
||||||
|
|
||||||
|
}
|
530
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
530
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
@ -18,9 +18,10 @@ package docker
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -46,6 +47,19 @@ var (
|
|||||||
// ErrInvalidAuthorization is used when credentials are passed to a server but
|
// ErrInvalidAuthorization is used when credentials are passed to a server but
|
||||||
// those credentials are rejected.
|
// those credentials are rejected.
|
||||||
ErrInvalidAuthorization = errors.New("authorization failed")
|
ErrInvalidAuthorization = errors.New("authorization failed")
|
||||||
|
|
||||||
|
// MaxManifestSize represents the largest size accepted from a registry
|
||||||
|
// during resolution. Larger manifests may be accepted using a
|
||||||
|
// resolution method other than the registry.
|
||||||
|
//
|
||||||
|
// NOTE: The max supported layers by some runtimes is 128 and individual
|
||||||
|
// layers will not contribute more than 256 bytes, making a
|
||||||
|
// reasonable limit for a large image manifests of 32K bytes.
|
||||||
|
// 4M bytes represents a much larger upper bound for images which may
|
||||||
|
// contain large annotations or be non-images. A proper manifest
|
||||||
|
// design puts large metadata in subobjects, as is consistent the
|
||||||
|
// intent of the manifest design.
|
||||||
|
MaxManifestSize int64 = 4 * 1048 * 1048
|
||||||
)
|
)
|
||||||
|
|
||||||
// Authorizer is used to authorize HTTP requests based on 401 HTTP responses.
|
// Authorizer is used to authorize HTTP requests based on 401 HTTP responses.
|
||||||
@ -72,31 +86,38 @@ type Authorizer interface {
|
|||||||
|
|
||||||
// ResolverOptions are used to configured a new Docker register resolver
|
// ResolverOptions are used to configured a new Docker register resolver
|
||||||
type ResolverOptions struct {
|
type ResolverOptions struct {
|
||||||
// Authorizer is used to authorize registry requests
|
// Hosts returns registry host configurations for a namespace.
|
||||||
Authorizer Authorizer
|
Hosts RegistryHosts
|
||||||
|
|
||||||
// Credentials provides username and secret given a host.
|
|
||||||
// If username is empty but a secret is given, that secret
|
|
||||||
// is interpreted as a long lived token.
|
|
||||||
// Deprecated: use Authorizer
|
|
||||||
Credentials func(string) (string, string, error)
|
|
||||||
|
|
||||||
// Host provides the hostname given a namespace.
|
|
||||||
Host func(string) (string, error)
|
|
||||||
|
|
||||||
// Headers are the HTTP request header fields sent by the resolver
|
// Headers are the HTTP request header fields sent by the resolver
|
||||||
Headers http.Header
|
Headers http.Header
|
||||||
|
|
||||||
// PlainHTTP specifies to use plain http and not https
|
|
||||||
PlainHTTP bool
|
|
||||||
|
|
||||||
// Client is the http client to used when making registry requests
|
|
||||||
Client *http.Client
|
|
||||||
|
|
||||||
// Tracker is used to track uploads to the registry. This is used
|
// Tracker is used to track uploads to the registry. This is used
|
||||||
// since the registry does not have upload tracking and the existing
|
// since the registry does not have upload tracking and the existing
|
||||||
// mechanism for getting blob upload status is expensive.
|
// mechanism for getting blob upload status is expensive.
|
||||||
Tracker StatusTracker
|
Tracker StatusTracker
|
||||||
|
|
||||||
|
// Authorizer is used to authorize registry requests
|
||||||
|
// Deprecated: use Hosts
|
||||||
|
Authorizer Authorizer
|
||||||
|
|
||||||
|
// Credentials provides username and secret given a host.
|
||||||
|
// If username is empty but a secret is given, that secret
|
||||||
|
// is interpreted as a long lived token.
|
||||||
|
// Deprecated: use Hosts
|
||||||
|
Credentials func(string) (string, string, error)
|
||||||
|
|
||||||
|
// Host provides the hostname given a namespace.
|
||||||
|
// Deprecated: use Hosts
|
||||||
|
Host func(string) (string, error)
|
||||||
|
|
||||||
|
// PlainHTTP specifies to use plain http and not https
|
||||||
|
// Deprecated: use Hosts
|
||||||
|
PlainHTTP bool
|
||||||
|
|
||||||
|
// Client is the http client to used when making registry requests
|
||||||
|
// Deprecated: use Hosts
|
||||||
|
Client *http.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultHost is the default host function.
|
// DefaultHost is the default host function.
|
||||||
@ -108,13 +129,10 @@ func DefaultHost(ns string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type dockerResolver struct {
|
type dockerResolver struct {
|
||||||
auth Authorizer
|
hosts RegistryHosts
|
||||||
host func(string) (string, error)
|
header http.Header
|
||||||
headers http.Header
|
resolveHeader http.Header
|
||||||
uagent string
|
tracker StatusTracker
|
||||||
plainHTTP bool
|
|
||||||
client *http.Client
|
|
||||||
tracker StatusTracker
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewResolver returns a new resolver to a Docker registry
|
// NewResolver returns a new resolver to a Docker registry
|
||||||
@ -122,39 +140,56 @@ func NewResolver(options ResolverOptions) remotes.Resolver {
|
|||||||
if options.Tracker == nil {
|
if options.Tracker == nil {
|
||||||
options.Tracker = NewInMemoryTracker()
|
options.Tracker = NewInMemoryTracker()
|
||||||
}
|
}
|
||||||
if options.Host == nil {
|
|
||||||
options.Host = DefaultHost
|
|
||||||
}
|
|
||||||
if options.Headers == nil {
|
if options.Headers == nil {
|
||||||
options.Headers = make(http.Header)
|
options.Headers = make(http.Header)
|
||||||
}
|
}
|
||||||
|
if _, ok := options.Headers["User-Agent"]; !ok {
|
||||||
|
options.Headers.Set("User-Agent", "containerd/"+version.Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
resolveHeader := http.Header{}
|
||||||
if _, ok := options.Headers["Accept"]; !ok {
|
if _, ok := options.Headers["Accept"]; !ok {
|
||||||
// set headers for all the types we support for resolution.
|
// set headers for all the types we support for resolution.
|
||||||
options.Headers.Set("Accept", strings.Join([]string{
|
resolveHeader.Set("Accept", strings.Join([]string{
|
||||||
images.MediaTypeDockerSchema2Manifest,
|
images.MediaTypeDockerSchema2Manifest,
|
||||||
images.MediaTypeDockerSchema2ManifestList,
|
images.MediaTypeDockerSchema2ManifestList,
|
||||||
ocispec.MediaTypeImageManifest,
|
ocispec.MediaTypeImageManifest,
|
||||||
ocispec.MediaTypeImageIndex, "*"}, ", "))
|
ocispec.MediaTypeImageIndex, "*"}, ", "))
|
||||||
}
|
|
||||||
ua := options.Headers.Get("User-Agent")
|
|
||||||
if ua != "" {
|
|
||||||
options.Headers.Del("User-Agent")
|
|
||||||
} else {
|
} else {
|
||||||
ua = "containerd/" + version.Version
|
resolveHeader["Accept"] = options.Headers["Accept"]
|
||||||
|
delete(options.Headers, "Accept")
|
||||||
}
|
}
|
||||||
|
|
||||||
if options.Authorizer == nil {
|
if options.Hosts == nil {
|
||||||
options.Authorizer = NewAuthorizer(options.Client, options.Credentials)
|
opts := []RegistryOpt{}
|
||||||
options.Authorizer.(*dockerAuthorizer).ua = ua
|
if options.Host != nil {
|
||||||
|
opts = append(opts, WithHostTranslator(options.Host))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.Authorizer == nil {
|
||||||
|
options.Authorizer = NewDockerAuthorizer(
|
||||||
|
WithAuthClient(options.Client),
|
||||||
|
WithAuthHeader(options.Headers),
|
||||||
|
WithAuthCreds(options.Credentials))
|
||||||
|
}
|
||||||
|
opts = append(opts, WithAuthorizer(options.Authorizer))
|
||||||
|
|
||||||
|
if options.Client != nil {
|
||||||
|
opts = append(opts, WithClient(options.Client))
|
||||||
|
}
|
||||||
|
if options.PlainHTTP {
|
||||||
|
opts = append(opts, WithPlainHTTP(MatchAllHosts))
|
||||||
|
} else {
|
||||||
|
opts = append(opts, WithPlainHTTP(MatchLocalhost))
|
||||||
|
}
|
||||||
|
options.Hosts = ConfigureDefaultRegistries(opts...)
|
||||||
}
|
}
|
||||||
return &dockerResolver{
|
return &dockerResolver{
|
||||||
auth: options.Authorizer,
|
hosts: options.Hosts,
|
||||||
host: options.Host,
|
header: options.Headers,
|
||||||
headers: options.Headers,
|
resolveHeader: resolveHeader,
|
||||||
uagent: ua,
|
tracker: options.Tracker,
|
||||||
plainHTTP: options.PlainHTTP,
|
|
||||||
client: options.Client,
|
|
||||||
tracker: options.Tracker,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -201,13 +236,11 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
|||||||
return "", ocispec.Descriptor{}, err
|
return "", ocispec.Descriptor{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fetcher := dockerFetcher{
|
|
||||||
dockerBase: base,
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
urls []string
|
lastErr error
|
||||||
dgst = refspec.Digest()
|
paths [][]string
|
||||||
|
dgst = refspec.Digest()
|
||||||
|
caps = HostCapabilityPull
|
||||||
)
|
)
|
||||||
|
|
||||||
if dgst != "" {
|
if dgst != "" {
|
||||||
@ -218,100 +251,130 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
|||||||
}
|
}
|
||||||
|
|
||||||
// turns out, we have a valid digest, make a url.
|
// turns out, we have a valid digest, make a url.
|
||||||
urls = append(urls, fetcher.url("manifests", dgst.String()))
|
paths = append(paths, []string{"manifests", dgst.String()})
|
||||||
|
|
||||||
// fallback to blobs on not found.
|
// fallback to blobs on not found.
|
||||||
urls = append(urls, fetcher.url("blobs", dgst.String()))
|
paths = append(paths, []string{"blobs", dgst.String()})
|
||||||
} else {
|
} else {
|
||||||
urls = append(urls, fetcher.url("manifests", refspec.Object))
|
// Add
|
||||||
|
paths = append(paths, []string{"manifests", refspec.Object})
|
||||||
|
caps |= HostCapabilityResolve
|
||||||
|
}
|
||||||
|
|
||||||
|
hosts := base.filterHosts(caps)
|
||||||
|
if len(hosts) == 0 {
|
||||||
|
return "", ocispec.Descriptor{}, errors.Wrap(errdefs.ErrNotFound, "no resolve hosts")
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, err = contextWithRepositoryScope(ctx, refspec, false)
|
ctx, err = contextWithRepositoryScope(ctx, refspec, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", ocispec.Descriptor{}, err
|
return "", ocispec.Descriptor{}, err
|
||||||
}
|
}
|
||||||
for _, u := range urls {
|
|
||||||
req, err := http.NewRequest(http.MethodHead, u, nil)
|
|
||||||
if err != nil {
|
|
||||||
return "", ocispec.Descriptor{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header = r.headers
|
for _, u := range paths {
|
||||||
|
for _, host := range hosts {
|
||||||
|
ctx := log.WithLogger(ctx, log.G(ctx).WithField("host", host.Host))
|
||||||
|
|
||||||
log.G(ctx).Debug("resolving")
|
req := base.request(host, http.MethodHead, u...)
|
||||||
resp, err := fetcher.doRequestWithRetries(ctx, req, nil)
|
for key, value := range r.resolveHeader {
|
||||||
if err != nil {
|
req.header[key] = append(req.header[key], value...)
|
||||||
if errors.Cause(err) == ErrInvalidAuthorization {
|
|
||||||
err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization")
|
|
||||||
}
|
}
|
||||||
return "", ocispec.Descriptor{}, err
|
|
||||||
}
|
|
||||||
resp.Body.Close() // don't care about body contents.
|
|
||||||
|
|
||||||
if resp.StatusCode > 299 {
|
log.G(ctx).Debug("resolving")
|
||||||
if resp.StatusCode == http.StatusNotFound {
|
resp, err := req.doWithRetries(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Cause(err) == ErrInvalidAuthorization {
|
||||||
|
err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization")
|
||||||
|
}
|
||||||
|
return "", ocispec.Descriptor{}, err
|
||||||
|
}
|
||||||
|
resp.Body.Close() // don't care about body contents.
|
||||||
|
|
||||||
|
if resp.StatusCode > 299 {
|
||||||
|
if resp.StatusCode == http.StatusNotFound {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
|
||||||
|
}
|
||||||
|
size := resp.ContentLength
|
||||||
|
contentType := getManifestMediaType(resp)
|
||||||
|
|
||||||
|
// if no digest was provided, then only a resolve
|
||||||
|
// trusted registry was contacted, in this case use
|
||||||
|
// the digest header (or content from GET)
|
||||||
|
if dgst == "" {
|
||||||
|
// this is the only point at which we trust the registry. we use the
|
||||||
|
// content headers to assemble a descriptor for the name. when this becomes
|
||||||
|
// more robust, we mostly get this information from a secure trust store.
|
||||||
|
dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
|
||||||
|
|
||||||
|
if dgstHeader != "" && size != -1 {
|
||||||
|
if err := dgstHeader.Validate(); err != nil {
|
||||||
|
return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader)
|
||||||
|
}
|
||||||
|
dgst = dgstHeader
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if dgst == "" || size == -1 {
|
||||||
|
log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead")
|
||||||
|
|
||||||
|
req = base.request(host, http.MethodGet, u...)
|
||||||
|
for key, value := range r.resolveHeader {
|
||||||
|
req.header[key] = append(req.header[key], value...)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := req.doWithRetries(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", ocispec.Descriptor{}, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
bodyReader := countingReader{reader: resp.Body}
|
||||||
|
|
||||||
|
contentType = getManifestMediaType(resp)
|
||||||
|
if dgst == "" {
|
||||||
|
if contentType == images.MediaTypeDockerSchema1Manifest {
|
||||||
|
b, err := schema1.ReadStripSignature(&bodyReader)
|
||||||
|
if err != nil {
|
||||||
|
return "", ocispec.Descriptor{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dgst = digest.FromBytes(b)
|
||||||
|
} else {
|
||||||
|
dgst, err = digest.FromReader(&bodyReader)
|
||||||
|
if err != nil {
|
||||||
|
return "", ocispec.Descriptor{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if _, err := io.Copy(ioutil.Discard, &bodyReader); err != nil {
|
||||||
|
return "", ocispec.Descriptor{}, err
|
||||||
|
}
|
||||||
|
size = bodyReader.bytesRead
|
||||||
|
}
|
||||||
|
// Prevent resolving to excessively large manifests
|
||||||
|
if size > MaxManifestSize {
|
||||||
|
if lastErr == nil {
|
||||||
|
lastErr = errors.Wrapf(errdefs.ErrNotFound, "rejecting %d byte manifest for %s", size, ref)
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
|
|
||||||
|
desc := ocispec.Descriptor{
|
||||||
|
Digest: dgst,
|
||||||
|
MediaType: contentType,
|
||||||
|
Size: size,
|
||||||
|
}
|
||||||
|
|
||||||
|
log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved")
|
||||||
|
return ref, desc, nil
|
||||||
}
|
}
|
||||||
size := resp.ContentLength
|
|
||||||
|
|
||||||
// this is the only point at which we trust the registry. we use the
|
|
||||||
// content headers to assemble a descriptor for the name. when this becomes
|
|
||||||
// more robust, we mostly get this information from a secure trust store.
|
|
||||||
dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
|
|
||||||
contentType := getManifestMediaType(resp)
|
|
||||||
|
|
||||||
if dgstHeader != "" && size != -1 {
|
|
||||||
if err := dgstHeader.Validate(); err != nil {
|
|
||||||
return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader)
|
|
||||||
}
|
|
||||||
dgst = dgstHeader
|
|
||||||
} else {
|
|
||||||
log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead")
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, u, nil)
|
|
||||||
if err != nil {
|
|
||||||
return "", ocispec.Descriptor{}, err
|
|
||||||
}
|
|
||||||
req.Header = r.headers
|
|
||||||
|
|
||||||
resp, err := fetcher.doRequestWithRetries(ctx, req, nil)
|
|
||||||
if err != nil {
|
|
||||||
return "", ocispec.Descriptor{}, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
bodyReader := countingReader{reader: resp.Body}
|
|
||||||
|
|
||||||
contentType = getManifestMediaType(resp)
|
|
||||||
if contentType == images.MediaTypeDockerSchema1Manifest {
|
|
||||||
b, err := schema1.ReadStripSignature(&bodyReader)
|
|
||||||
if err != nil {
|
|
||||||
return "", ocispec.Descriptor{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dgst = digest.FromBytes(b)
|
|
||||||
} else {
|
|
||||||
dgst, err = digest.FromReader(&bodyReader)
|
|
||||||
if err != nil {
|
|
||||||
return "", ocispec.Descriptor{}, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
size = bodyReader.bytesRead
|
|
||||||
}
|
|
||||||
|
|
||||||
desc := ocispec.Descriptor{
|
|
||||||
Digest: dgst,
|
|
||||||
MediaType: contentType,
|
|
||||||
Size: size,
|
|
||||||
}
|
|
||||||
|
|
||||||
log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved")
|
|
||||||
return ref, desc, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", ocispec.Descriptor{}, errors.Errorf("%v not found", ref)
|
if lastErr == nil {
|
||||||
|
lastErr = errors.Wrap(errdefs.ErrNotFound, ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", ocispec.Descriptor{}, lastErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
|
func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
|
||||||
@ -336,13 +399,6 @@ func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Manifests can be pushed by digest like any other object, but the passed in
|
|
||||||
// reference cannot take a digest without the associated content. A tag is allowed
|
|
||||||
// and will be used to tag pushed manifests.
|
|
||||||
if refspec.Object != "" && strings.Contains(refspec.Object, "@") {
|
|
||||||
return nil, errors.New("cannot use digest reference for push locator")
|
|
||||||
}
|
|
||||||
|
|
||||||
base, err := r.base(refspec)
|
base, err := r.base(refspec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -350,62 +406,64 @@ func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher
|
|||||||
|
|
||||||
return dockerPusher{
|
return dockerPusher{
|
||||||
dockerBase: base,
|
dockerBase: base,
|
||||||
tag: refspec.Object,
|
object: refspec.Object,
|
||||||
tracker: r.tracker,
|
tracker: r.tracker,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type dockerBase struct {
|
type dockerBase struct {
|
||||||
refspec reference.Spec
|
refspec reference.Spec
|
||||||
base url.URL
|
namespace string
|
||||||
uagent string
|
hosts []RegistryHost
|
||||||
|
header http.Header
|
||||||
client *http.Client
|
|
||||||
auth Authorizer
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) {
|
func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) {
|
||||||
var (
|
|
||||||
err error
|
|
||||||
base url.URL
|
|
||||||
)
|
|
||||||
|
|
||||||
host := refspec.Hostname()
|
host := refspec.Hostname()
|
||||||
base.Host = host
|
hosts, err := r.hosts(host)
|
||||||
if r.host != nil {
|
if err != nil {
|
||||||
base.Host, err = r.host(host)
|
return nil, err
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
base.Scheme = "https"
|
|
||||||
if r.plainHTTP || strings.HasPrefix(base.Host, "localhost:") {
|
|
||||||
base.Scheme = "http"
|
|
||||||
}
|
|
||||||
|
|
||||||
prefix := strings.TrimPrefix(refspec.Locator, host+"/")
|
|
||||||
base.Path = path.Join("/v2", prefix)
|
|
||||||
|
|
||||||
return &dockerBase{
|
return &dockerBase{
|
||||||
refspec: refspec,
|
refspec: refspec,
|
||||||
base: base,
|
namespace: strings.TrimPrefix(refspec.Locator, host+"/"),
|
||||||
uagent: r.uagent,
|
hosts: hosts,
|
||||||
client: r.client,
|
header: r.header,
|
||||||
auth: r.auth,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *dockerBase) url(ps ...string) string {
|
func (r *dockerBase) filterHosts(caps HostCapabilities) (hosts []RegistryHost) {
|
||||||
url := r.base
|
for _, host := range r.hosts {
|
||||||
url.Path = path.Join(url.Path, path.Join(ps...))
|
if host.Capabilities.Has(caps) {
|
||||||
return url.String()
|
hosts = append(hosts, host)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *dockerBase) authorize(ctx context.Context, req *http.Request) error {
|
func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *request {
|
||||||
|
header := http.Header{}
|
||||||
|
for key, value := range r.header {
|
||||||
|
header[key] = append(header[key], value...)
|
||||||
|
}
|
||||||
|
parts := append([]string{"/", host.Path, r.namespace}, ps...)
|
||||||
|
p := path.Join(parts...)
|
||||||
|
// Join strips trailing slash, re-add ending "/" if included
|
||||||
|
if len(parts) > 0 && strings.HasSuffix(parts[len(parts)-1], "/") {
|
||||||
|
p = p + "/"
|
||||||
|
}
|
||||||
|
return &request{
|
||||||
|
method: method,
|
||||||
|
path: p,
|
||||||
|
header: header,
|
||||||
|
host: host,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *request) authorize(ctx context.Context, req *http.Request) error {
|
||||||
// Check if has header for host
|
// Check if has header for host
|
||||||
if r.auth != nil {
|
if r.host.Authorizer != nil {
|
||||||
if err := r.auth.Authorize(ctx, req); err != nil {
|
if err := r.host.Authorizer.Authorize(ctx, req); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -413,81 +471,137 @@ func (r *dockerBase) authorize(ctx context.Context, req *http.Request) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *dockerBase) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
|
type request struct {
|
||||||
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String()))
|
method string
|
||||||
log.G(ctx).WithField("request.headers", req.Header).WithField("request.method", req.Method).Debug("do request")
|
path string
|
||||||
req.Header.Set("User-Agent", r.uagent)
|
header http.Header
|
||||||
|
host RegistryHost
|
||||||
|
body func() (io.ReadCloser, error)
|
||||||
|
size int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *request) do(ctx context.Context) (*http.Response, error) {
|
||||||
|
u := r.host.Scheme + "://" + r.host.Host + r.path
|
||||||
|
req, err := http.NewRequest(r.method, u, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header = r.header
|
||||||
|
if r.body != nil {
|
||||||
|
body, err := r.body()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Body = body
|
||||||
|
req.GetBody = r.body
|
||||||
|
if r.size > 0 {
|
||||||
|
req.ContentLength = r.size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u))
|
||||||
|
log.G(ctx).WithFields(requestFields(req)).Debug("do request")
|
||||||
if err := r.authorize(ctx, req); err != nil {
|
if err := r.authorize(ctx, req); err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to authorize")
|
return nil, errors.Wrap(err, "failed to authorize")
|
||||||
}
|
}
|
||||||
resp, err := ctxhttp.Do(ctx, r.client, req)
|
resp, err := ctxhttp.Do(ctx, r.host.Client, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to do request")
|
return nil, errors.Wrap(err, "failed to do request")
|
||||||
}
|
}
|
||||||
log.G(ctx).WithFields(logrus.Fields{
|
log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received")
|
||||||
"status": resp.Status,
|
|
||||||
"response.headers": resp.Header,
|
|
||||||
}).Debug("fetch response received")
|
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *dockerBase) doRequestWithRetries(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Response, error) {
|
func (r *request) doWithRetries(ctx context.Context, responses []*http.Response) (*http.Response, error) {
|
||||||
resp, err := r.doRequest(ctx, req)
|
resp, err := r.do(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
responses = append(responses, resp)
|
responses = append(responses, resp)
|
||||||
req, err = r.retryRequest(ctx, req, responses)
|
retry, err := r.retryRequest(ctx, responses)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if req != nil {
|
if retry {
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
return r.doRequestWithRetries(ctx, req, responses)
|
return r.doWithRetries(ctx, responses)
|
||||||
}
|
}
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *dockerBase) retryRequest(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Request, error) {
|
func (r *request) retryRequest(ctx context.Context, responses []*http.Response) (bool, error) {
|
||||||
if len(responses) > 5 {
|
if len(responses) > 5 {
|
||||||
return nil, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
last := responses[len(responses)-1]
|
last := responses[len(responses)-1]
|
||||||
if last.StatusCode == http.StatusUnauthorized {
|
switch last.StatusCode {
|
||||||
|
case http.StatusUnauthorized:
|
||||||
log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized")
|
log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized")
|
||||||
if r.auth != nil {
|
if r.host.Authorizer != nil {
|
||||||
if err := r.auth.AddResponses(ctx, responses); err == nil {
|
if err := r.host.Authorizer.AddResponses(ctx, responses); err == nil {
|
||||||
return copyRequest(req)
|
return true, nil
|
||||||
} else if !errdefs.IsNotImplemented(err) {
|
} else if !errdefs.IsNotImplemented(err) {
|
||||||
return nil, err
|
return false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return false, nil
|
||||||
} else if last.StatusCode == http.StatusMethodNotAllowed && req.Method == http.MethodHead {
|
case http.StatusMethodNotAllowed:
|
||||||
// Support registries which have not properly implemented the HEAD method for
|
// Support registries which have not properly implemented the HEAD method for
|
||||||
// manifests endpoint
|
// manifests endpoint
|
||||||
if strings.Contains(req.URL.Path, "/manifests/") {
|
if r.method == http.MethodHead && strings.Contains(r.path, "/manifests/") {
|
||||||
// TODO: copy request?
|
r.method = http.MethodGet
|
||||||
req.Method = http.MethodGet
|
return true, nil
|
||||||
return copyRequest(req)
|
|
||||||
}
|
}
|
||||||
|
case http.StatusRequestTimeout, http.StatusTooManyRequests:
|
||||||
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Handle 50x errors accounting for attempt history
|
// TODO: Handle 50x errors accounting for attempt history
|
||||||
return nil, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyRequest(req *http.Request) (*http.Request, error) {
|
func (r *request) String() string {
|
||||||
ireq := *req
|
return r.host.Scheme + "://" + r.host.Host + r.path
|
||||||
if ireq.GetBody != nil {
|
}
|
||||||
var err error
|
|
||||||
ireq.Body, err = ireq.GetBody()
|
func requestFields(req *http.Request) logrus.Fields {
|
||||||
if err != nil {
|
fields := map[string]interface{}{
|
||||||
return nil, err
|
"request.method": req.Method,
|
||||||
|
}
|
||||||
|
for k, vals := range req.Header {
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
if k == "authorization" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for i, v := range vals {
|
||||||
|
field := "request.header." + k
|
||||||
|
if i > 0 {
|
||||||
|
field = fmt.Sprintf("%s.%d", field, i)
|
||||||
|
}
|
||||||
|
fields[field] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &ireq, nil
|
|
||||||
|
return logrus.Fields(fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func responseFields(resp *http.Response) logrus.Fields {
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"response.status": resp.Status,
|
||||||
|
}
|
||||||
|
for k, vals := range resp.Header {
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
for i, v := range vals {
|
||||||
|
field := "response.header." + k
|
||||||
|
if i > 0 {
|
||||||
|
field = fmt.Sprintf("%s.%d", field, i)
|
||||||
|
}
|
||||||
|
fields[field] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return logrus.Fields(fields)
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go
generated
vendored
4
vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go
generated
vendored
@ -216,12 +216,12 @@ func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.De
|
|||||||
|
|
||||||
ref := remotes.MakeRefKey(ctx, desc)
|
ref := remotes.MakeRefKey(ctx, desc)
|
||||||
if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil {
|
if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil {
|
||||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to write config")
|
return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image manifest")
|
||||||
}
|
}
|
||||||
|
|
||||||
ref = remotes.MakeRefKey(ctx, config)
|
ref = remotes.MakeRefKey(ctx, config)
|
||||||
if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil {
|
if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil {
|
||||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to write config")
|
return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image config")
|
||||||
}
|
}
|
||||||
|
|
||||||
return desc, nil
|
return desc, nil
|
||||||
|
45
vendor/github.com/containerd/containerd/remotes/docker/scope.go
generated
vendored
45
vendor/github.com/containerd/containerd/remotes/docker/scope.go
generated
vendored
@ -18,6 +18,7 @@ package docker
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
@ -53,24 +54,38 @@ func contextWithRepositoryScope(ctx context.Context, refspec reference.Spec, pus
|
|||||||
return context.WithValue(ctx, tokenScopesKey{}, []string{s}), nil
|
return context.WithValue(ctx, tokenScopesKey{}, []string{s}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and params["scope"].
|
// contextWithAppendPullRepositoryScope is used to append repository pull
|
||||||
func getTokenScopes(ctx context.Context, params map[string]string) []string {
|
// scope into existing scopes indexed by the tokenScopesKey{}.
|
||||||
|
func contextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context {
|
||||||
|
var scopes []string
|
||||||
|
|
||||||
|
if v := ctx.Value(tokenScopesKey{}); v != nil {
|
||||||
|
scopes = append(scopes, v.([]string)...)
|
||||||
|
}
|
||||||
|
scopes = append(scopes, fmt.Sprintf("repository:%s:pull", repo))
|
||||||
|
return context.WithValue(ctx, tokenScopesKey{}, scopes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and common scopes.
|
||||||
|
func getTokenScopes(ctx context.Context, common []string) []string {
|
||||||
var scopes []string
|
var scopes []string
|
||||||
if x := ctx.Value(tokenScopesKey{}); x != nil {
|
if x := ctx.Value(tokenScopesKey{}); x != nil {
|
||||||
scopes = append(scopes, x.([]string)...)
|
scopes = append(scopes, x.([]string)...)
|
||||||
}
|
}
|
||||||
if scope, ok := params["scope"]; ok {
|
|
||||||
for _, s := range scopes {
|
scopes = append(scopes, common...)
|
||||||
// Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/)
|
|
||||||
// So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal.
|
|
||||||
if s == scope {
|
|
||||||
// already appended
|
|
||||||
goto Sort
|
|
||||||
}
|
|
||||||
}
|
|
||||||
scopes = append(scopes, scope)
|
|
||||||
}
|
|
||||||
Sort:
|
|
||||||
sort.Strings(scopes)
|
sort.Strings(scopes)
|
||||||
return scopes
|
|
||||||
|
l := 0
|
||||||
|
for idx := 1; idx < len(scopes); idx++ {
|
||||||
|
// Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/)
|
||||||
|
// So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal.
|
||||||
|
if scopes[l] == scopes[idx] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
l++
|
||||||
|
scopes[l] = scopes[idx]
|
||||||
|
}
|
||||||
|
return scopes[:l+1]
|
||||||
}
|
}
|
||||||
|
55
vendor/github.com/containerd/containerd/remotes/handlers.go
generated
vendored
55
vendor/github.com/containerd/containerd/remotes/handlers.go
generated
vendored
@ -48,7 +48,8 @@ func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string {
|
|||||||
case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip,
|
case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip,
|
||||||
images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip,
|
images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip,
|
||||||
ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip,
|
ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip,
|
||||||
ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip:
|
ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip,
|
||||||
|
images.MediaTypeDockerSchema2LayerEnc, images.MediaTypeDockerSchema2LayerGzipEnc:
|
||||||
return "layer-" + desc.Digest.String()
|
return "layer-" + desc.Digest.String()
|
||||||
case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
|
case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
|
||||||
return "config-" + desc.Digest.String()
|
return "config-" + desc.Digest.String()
|
||||||
@ -156,7 +157,7 @@ func push(ctx context.Context, provider content.Provider, pusher Pusher, desc oc
|
|||||||
//
|
//
|
||||||
// Base handlers can be provided which will be called before any push specific
|
// Base handlers can be provided which will be called before any push specific
|
||||||
// handlers.
|
// handlers.
|
||||||
func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, provider content.Provider, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error {
|
func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Store, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error {
|
||||||
var m sync.Mutex
|
var m sync.Mutex
|
||||||
manifestStack := []ocispec.Descriptor{}
|
manifestStack := []ocispec.Descriptor{}
|
||||||
|
|
||||||
@ -173,10 +174,14 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, pr
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
pushHandler := PushHandler(pusher, provider)
|
pushHandler := PushHandler(pusher, store)
|
||||||
|
|
||||||
|
platformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform)
|
||||||
|
|
||||||
|
annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, store)
|
||||||
|
|
||||||
var handler images.Handler = images.Handlers(
|
var handler images.Handler = images.Handlers(
|
||||||
images.FilterPlatforms(images.ChildrenHandler(provider), platform),
|
annotateHandler,
|
||||||
filterHandler,
|
filterHandler,
|
||||||
pushHandler,
|
pushHandler,
|
||||||
)
|
)
|
||||||
@ -241,3 +246,45 @@ func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher)
|
|||||||
return descs, nil
|
return descs, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// annotateDistributionSourceHandler add distribution source label into
|
||||||
|
// annotation of config or blob descriptor.
|
||||||
|
func annotateDistributionSourceHandler(f images.HandlerFunc, manager content.Manager) images.HandlerFunc {
|
||||||
|
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||||
|
children, err := f(ctx, desc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// only add distribution source for the config or blob data descriptor
|
||||||
|
switch desc.MediaType {
|
||||||
|
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
|
||||||
|
images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||||
|
default:
|
||||||
|
return children, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range children {
|
||||||
|
child := children[i]
|
||||||
|
|
||||||
|
info, err := manager.Info(ctx, child.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range info.Labels {
|
||||||
|
if !strings.HasPrefix(k, "containerd.io/distribution.source.") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if child.Annotations == nil {
|
||||||
|
child.Annotations = map[string]string{}
|
||||||
|
}
|
||||||
|
child.Annotations[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
children[i] = child
|
||||||
|
}
|
||||||
|
return children, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
26
vendor/github.com/containerd/containerd/rootfs/apply.go
generated
vendored
26
vendor/github.com/containerd/containerd/rootfs/apply.go
generated
vendored
@ -48,6 +48,14 @@ type Layer struct {
|
|||||||
// Layers are applied in order they are given, making the first layer the
|
// Layers are applied in order they are given, making the first layer the
|
||||||
// bottom-most layer in the layer chain.
|
// bottom-most layer in the layer chain.
|
||||||
func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier) (digest.Digest, error) {
|
func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier) (digest.Digest, error) {
|
||||||
|
return ApplyLayersWithOpts(ctx, layers, sn, a, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyLayersWithOpts applies all the layers using the given snapshotter, applier, and apply opts.
|
||||||
|
// The returned result is a chain id digest representing all the applied layers.
|
||||||
|
// Layers are applied in order they are given, making the first layer the
|
||||||
|
// bottom-most layer in the layer chain.
|
||||||
|
func ApplyLayersWithOpts(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier, applyOpts []diff.ApplyOpt) (digest.Digest, error) {
|
||||||
chain := make([]digest.Digest, len(layers))
|
chain := make([]digest.Digest, len(layers))
|
||||||
for i, layer := range layers {
|
for i, layer := range layers {
|
||||||
chain[i] = layer.Diff.Digest
|
chain[i] = layer.Diff.Digest
|
||||||
@ -63,7 +71,7 @@ func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter,
|
|||||||
return "", errors.Wrapf(err, "failed to stat snapshot %s", chainID)
|
return "", errors.Wrapf(err, "failed to stat snapshot %s", chainID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := applyLayers(ctx, layers, chain, sn, a); err != nil && !errdefs.IsAlreadyExists(err) {
|
if err := applyLayers(ctx, layers, chain, sn, a, nil, applyOpts); err != nil && !errdefs.IsAlreadyExists(err) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -75,6 +83,13 @@ func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter,
|
|||||||
// using the provided snapshotter and applier. If the layer was unpacked true
|
// using the provided snapshotter and applier. If the layer was unpacked true
|
||||||
// is returned, if the layer already exists false is returned.
|
// is returned, if the layer already exists false is returned.
|
||||||
func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) (bool, error) {
|
func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) (bool, error) {
|
||||||
|
return ApplyLayerWithOpts(ctx, layer, chain, sn, a, opts, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyLayerWithOpts applies a single layer on top of the given provided layer chain,
|
||||||
|
// using the provided snapshotter, applier, and apply opts. If the layer was unpacked true
|
||||||
|
// is returned, if the layer already exists false is returned.
|
||||||
|
func ApplyLayerWithOpts(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts []snapshots.Opt, applyOpts []diff.ApplyOpt) (bool, error) {
|
||||||
var (
|
var (
|
||||||
chainID = identity.ChainID(append(chain, layer.Diff.Digest)).String()
|
chainID = identity.ChainID(append(chain, layer.Diff.Digest)).String()
|
||||||
applied bool
|
applied bool
|
||||||
@ -84,7 +99,7 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap
|
|||||||
return false, errors.Wrapf(err, "failed to stat snapshot %s", chainID)
|
return false, errors.Wrapf(err, "failed to stat snapshot %s", chainID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := applyLayers(ctx, []Layer{layer}, append(chain, layer.Diff.Digest), sn, a, opts...); err != nil {
|
if err := applyLayers(ctx, []Layer{layer}, append(chain, layer.Diff.Digest), sn, a, opts, applyOpts); err != nil {
|
||||||
if !errdefs.IsAlreadyExists(err) {
|
if !errdefs.IsAlreadyExists(err) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -93,9 +108,10 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return applied, nil
|
return applied, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) error {
|
func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts []snapshots.Opt, applyOpts []diff.ApplyOpt) error {
|
||||||
var (
|
var (
|
||||||
parent = identity.ChainID(chain[:len(chain)-1])
|
parent = identity.ChainID(chain[:len(chain)-1])
|
||||||
chainID = identity.ChainID(chain)
|
chainID = identity.ChainID(chain)
|
||||||
@ -113,7 +129,7 @@ func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn
|
|||||||
mounts, err = sn.Prepare(ctx, key, parent.String(), opts...)
|
mounts, err = sn.Prepare(ctx, key, parent.String(), opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errdefs.IsNotFound(err) && len(layers) > 1 {
|
if errdefs.IsNotFound(err) && len(layers) > 1 {
|
||||||
if err := applyLayers(ctx, layers[:len(layers)-1], chain[:len(chain)-1], sn, a); err != nil {
|
if err := applyLayers(ctx, layers[:len(layers)-1], chain[:len(chain)-1], sn, a, nil, applyOpts); err != nil {
|
||||||
if !errdefs.IsAlreadyExists(err) {
|
if !errdefs.IsAlreadyExists(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -144,7 +160,7 @@ func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
diff, err = a.Apply(ctx, layer.Blob, mounts)
|
diff, err = a.Apply(ctx, layer.Blob, mounts, applyOpts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrapf(err, "failed to extract layer %s", layer.Diff.Digest)
|
err = errors.Wrapf(err, "failed to extract layer %s", layer.Diff.Digest)
|
||||||
return err
|
return err
|
||||||
|
3
vendor/github.com/containerd/containerd/runtime/task.go
generated
vendored
3
vendor/github.com/containerd/containerd/runtime/task.go
generated
vendored
@ -33,6 +33,7 @@ type TaskInfo struct {
|
|||||||
|
|
||||||
// Process is a runtime object for an executing process inside a container
|
// Process is a runtime object for an executing process inside a container
|
||||||
type Process interface {
|
type Process interface {
|
||||||
|
// ID of the process
|
||||||
ID() string
|
ID() string
|
||||||
// State returns the process state
|
// State returns the process state
|
||||||
State(context.Context) (State, error)
|
State(context.Context) (State, error)
|
||||||
@ -54,6 +55,8 @@ type Process interface {
|
|||||||
type Task interface {
|
type Task interface {
|
||||||
Process
|
Process
|
||||||
|
|
||||||
|
// PID of the process
|
||||||
|
PID() uint32
|
||||||
// Namespace that the task exists in
|
// Namespace that the task exists in
|
||||||
Namespace() string
|
Namespace() string
|
||||||
// Pause pauses the container process
|
// Pause pauses the container process
|
||||||
|
10
vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go
generated
vendored
10
vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go
generated
vendored
@ -37,12 +37,12 @@ import (
|
|||||||
"github.com/containerd/containerd/metadata"
|
"github.com/containerd/containerd/metadata"
|
||||||
"github.com/containerd/containerd/mount"
|
"github.com/containerd/containerd/mount"
|
||||||
"github.com/containerd/containerd/namespaces"
|
"github.com/containerd/containerd/namespaces"
|
||||||
|
"github.com/containerd/containerd/pkg/process"
|
||||||
"github.com/containerd/containerd/platforms"
|
"github.com/containerd/containerd/platforms"
|
||||||
"github.com/containerd/containerd/plugin"
|
"github.com/containerd/containerd/plugin"
|
||||||
"github.com/containerd/containerd/runtime"
|
"github.com/containerd/containerd/runtime"
|
||||||
"github.com/containerd/containerd/runtime/linux/runctypes"
|
"github.com/containerd/containerd/runtime/linux/runctypes"
|
||||||
"github.com/containerd/containerd/runtime/v1"
|
v1 "github.com/containerd/containerd/runtime/v1"
|
||||||
"github.com/containerd/containerd/runtime/v1/linux/proc"
|
|
||||||
shim "github.com/containerd/containerd/runtime/v1/shim/v1"
|
shim "github.com/containerd/containerd/runtime/v1/shim/v1"
|
||||||
runc "github.com/containerd/go-runc"
|
runc "github.com/containerd/go-runc"
|
||||||
"github.com/containerd/typeurl"
|
"github.com/containerd/typeurl"
|
||||||
@ -335,7 +335,7 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) {
|
|||||||
filepath.Join(r.root, ns, id),
|
filepath.Join(r.root, ns, id),
|
||||||
)
|
)
|
||||||
ctx = namespaces.WithNamespace(ctx, ns)
|
ctx = namespaces.WithNamespace(ctx, ns)
|
||||||
pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, proc.InitPidFile))
|
pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, process.InitPidFile))
|
||||||
shimExit := make(chan struct{})
|
shimExit := make(chan struct{})
|
||||||
s, err := bundle.NewShimClient(ctx, ns, ShimConnect(r.config, func() {
|
s, err := bundle.NewShimClient(ctx, ns, ShimConnect(r.config, func() {
|
||||||
defer close(shimExit)
|
defer close(shimExit)
|
||||||
@ -422,7 +422,7 @@ func (r *Runtime) cleanupAfterDeadShim(ctx context.Context, bundle *bundle, ns,
|
|||||||
"namespace": ns,
|
"namespace": ns,
|
||||||
}).Warn("cleaning up after shim dead")
|
}).Warn("cleaning up after shim dead")
|
||||||
|
|
||||||
pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, proc.InitPidFile))
|
pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, process.InitPidFile))
|
||||||
ctx = namespaces.WithNamespace(ctx, ns)
|
ctx = namespaces.WithNamespace(ctx, ns)
|
||||||
if err := r.terminate(ctx, bundle, ns, id); err != nil {
|
if err := r.terminate(ctx, bundle, ns, id); err != nil {
|
||||||
if r.config.ShimDebug {
|
if r.config.ShimDebug {
|
||||||
@ -487,7 +487,7 @@ func (r *Runtime) getRuntime(ctx context.Context, ns, id string) (*runc.Runc, er
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
cmd = r.config.Runtime
|
cmd = r.config.Runtime
|
||||||
root = proc.RuncRoot
|
root = process.RuncRoot
|
||||||
)
|
)
|
||||||
if ropts != nil {
|
if ropts != nil {
|
||||||
if ropts.Runtime != "" {
|
if ropts.Runtime != "" {
|
||||||
|
13
vendor/github.com/containerd/containerd/runtime/v1/linux/task.go
generated
vendored
13
vendor/github.com/containerd/containerd/runtime/v1/linux/task.go
generated
vendored
@ -84,6 +84,11 @@ func (t *Task) Namespace() string {
|
|||||||
return t.namespace
|
return t.namespace
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PID of the task
|
||||||
|
func (t *Task) PID() uint32 {
|
||||||
|
return uint32(t.pid)
|
||||||
|
}
|
||||||
|
|
||||||
// Delete the task and return the exit status
|
// Delete the task and return the exit status
|
||||||
func (t *Task) Delete(ctx context.Context) (*runtime.Exit, error) {
|
func (t *Task) Delete(ctx context.Context) (*runtime.Exit, error) {
|
||||||
rsp, err := t.shim.Delete(ctx, empty)
|
rsp, err := t.shim.Delete(ctx, empty)
|
||||||
@ -124,11 +129,15 @@ func (t *Task) Start(ctx context.Context) error {
|
|||||||
t.pid = int(r.Pid)
|
t.pid = int(r.Pid)
|
||||||
if !hasCgroup {
|
if !hasCgroup {
|
||||||
cg, err := cgroups.Load(cgroups.V1, cgroups.PidPath(t.pid))
|
cg, err := cgroups.Load(cgroups.V1, cgroups.PidPath(t.pid))
|
||||||
if err != nil {
|
if err != nil && err != cgroups.ErrCgroupDeleted {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
t.cg = cg
|
if err == cgroups.ErrCgroupDeleted {
|
||||||
|
t.cg = nil
|
||||||
|
} else {
|
||||||
|
t.cg = cg
|
||||||
|
}
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
}
|
}
|
||||||
t.events.Publish(ctx, runtime.TaskStartEventTopic, &eventstypes.TaskStart{
|
t.events.Publish(ctx, runtime.TaskStartEventTopic, &eventstypes.TaskStart{
|
||||||
|
21
vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go
generated
vendored
21
vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go
generated
vendored
@ -127,8 +127,8 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa
|
|||||||
"address": address,
|
"address": address,
|
||||||
}).Infof("shim placed in cgroup %s", cgroup)
|
}).Infof("shim placed in cgroup %s", cgroup)
|
||||||
}
|
}
|
||||||
if err = sys.SetOOMScore(cmd.Process.Pid, sys.OOMScoreMaxKillable); err != nil {
|
if err = setupOOMScore(cmd.Process.Pid); err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "failed to set OOM Score on shim")
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
c, clo, err := WithConnect(address, func() {})(ctx, config)
|
c, clo, err := WithConnect(address, func() {})(ctx, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -138,6 +138,21 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setupOOMScore gets containerd's oom score and adds +1 to it
|
||||||
|
// to ensure a shim has a lower* score than the daemons
|
||||||
|
func setupOOMScore(shimPid int) error {
|
||||||
|
pid := os.Getpid()
|
||||||
|
score, err := sys.GetOOMScoreAdj(pid)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "get daemon OOM score")
|
||||||
|
}
|
||||||
|
shimScore := score + 1
|
||||||
|
if err := sys.SetOOMScore(shimPid, shimScore); err != nil {
|
||||||
|
return errors.Wrap(err, "set shim OOM score")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func newCommand(binary, daemonAddress string, debug bool, config shim.Config, socket *os.File, stdout, stderr io.Writer) (*exec.Cmd, error) {
|
func newCommand(binary, daemonAddress string, debug bool, config shim.Config, socket *os.File, stdout, stderr io.Writer) (*exec.Cmd, error) {
|
||||||
selfExe, err := os.Executable()
|
selfExe, err := os.Executable()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -283,7 +298,7 @@ func (c *Client) KillShim(ctx context.Context) error {
|
|||||||
return c.signalShim(ctx, unix.SIGKILL)
|
return c.signalShim(ctx, unix.SIGKILL)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close the cient connection
|
// Close the client connection
|
||||||
func (c *Client) Close() error {
|
func (c *Client) Close() error {
|
||||||
if c.c == nil {
|
if c.c == nil {
|
||||||
return nil
|
return nil
|
||||||
|
44
vendor/github.com/containerd/containerd/runtime/v1/shim/service.go
generated
vendored
44
vendor/github.com/containerd/containerd/runtime/v1/shim/service.go
generated
vendored
@ -35,10 +35,10 @@ import (
|
|||||||
"github.com/containerd/containerd/log"
|
"github.com/containerd/containerd/log"
|
||||||
"github.com/containerd/containerd/mount"
|
"github.com/containerd/containerd/mount"
|
||||||
"github.com/containerd/containerd/namespaces"
|
"github.com/containerd/containerd/namespaces"
|
||||||
|
"github.com/containerd/containerd/pkg/process"
|
||||||
|
"github.com/containerd/containerd/pkg/stdio"
|
||||||
"github.com/containerd/containerd/runtime"
|
"github.com/containerd/containerd/runtime"
|
||||||
"github.com/containerd/containerd/runtime/linux/runctypes"
|
"github.com/containerd/containerd/runtime/linux/runctypes"
|
||||||
rproc "github.com/containerd/containerd/runtime/proc"
|
|
||||||
"github.com/containerd/containerd/runtime/v1/linux/proc"
|
|
||||||
shimapi "github.com/containerd/containerd/runtime/v1/shim/v1"
|
shimapi "github.com/containerd/containerd/runtime/v1/shim/v1"
|
||||||
runc "github.com/containerd/go-runc"
|
runc "github.com/containerd/go-runc"
|
||||||
"github.com/containerd/typeurl"
|
"github.com/containerd/typeurl"
|
||||||
@ -84,7 +84,7 @@ func NewService(config Config, publisher events.Publisher) (*Service, error) {
|
|||||||
s := &Service{
|
s := &Service{
|
||||||
config: config,
|
config: config,
|
||||||
context: ctx,
|
context: ctx,
|
||||||
processes: make(map[string]rproc.Process),
|
processes: make(map[string]process.Process),
|
||||||
events: make(chan interface{}, 128),
|
events: make(chan interface{}, 128),
|
||||||
ec: Default.Subscribe(),
|
ec: Default.Subscribe(),
|
||||||
}
|
}
|
||||||
@ -102,9 +102,9 @@ type Service struct {
|
|||||||
|
|
||||||
config Config
|
config Config
|
||||||
context context.Context
|
context context.Context
|
||||||
processes map[string]rproc.Process
|
processes map[string]process.Process
|
||||||
events chan interface{}
|
events chan interface{}
|
||||||
platform rproc.Platform
|
platform stdio.Platform
|
||||||
ec chan runc.Exit
|
ec chan runc.Exit
|
||||||
|
|
||||||
// Filled by Create()
|
// Filled by Create()
|
||||||
@ -114,9 +114,9 @@ type Service struct {
|
|||||||
|
|
||||||
// Create a new initial process and container with the underlying OCI runtime
|
// Create a new initial process and container with the underlying OCI runtime
|
||||||
func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ *shimapi.CreateTaskResponse, err error) {
|
func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ *shimapi.CreateTaskResponse, err error) {
|
||||||
var mounts []proc.Mount
|
var mounts []process.Mount
|
||||||
for _, m := range r.Rootfs {
|
for _, m := range r.Rootfs {
|
||||||
mounts = append(mounts, proc.Mount{
|
mounts = append(mounts, process.Mount{
|
||||||
Type: m.Type,
|
Type: m.Type,
|
||||||
Source: m.Source,
|
Source: m.Source,
|
||||||
Target: m.Target,
|
Target: m.Target,
|
||||||
@ -132,7 +132,7 @@ func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
config := &proc.CreateConfig{
|
config := &process.CreateConfig{
|
||||||
ID: r.ID,
|
ID: r.ID,
|
||||||
Bundle: r.Bundle,
|
Bundle: r.Bundle,
|
||||||
Runtime: r.Runtime,
|
Runtime: r.Runtime,
|
||||||
@ -266,7 +266,7 @@ func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*pty
|
|||||||
return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
|
return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
|
||||||
}
|
}
|
||||||
|
|
||||||
process, err := p.(*proc.Init).Exec(ctx, s.config.Path, &proc.ExecConfig{
|
process, err := p.(*process.Init).Exec(ctx, s.config.Path, &process.ExecConfig{
|
||||||
ID: r.ID,
|
ID: r.ID,
|
||||||
Terminal: r.Terminal,
|
Terminal: r.Terminal,
|
||||||
Stdin: r.Stdin,
|
Stdin: r.Stdin,
|
||||||
@ -348,7 +348,7 @@ func (s *Service) Pause(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, er
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := p.(*proc.Init).Pause(ctx); err != nil {
|
if err := p.(*process.Init).Pause(ctx); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return empty, nil
|
return empty, nil
|
||||||
@ -360,7 +360,7 @@ func (s *Service) Resume(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := p.(*proc.Init).Resume(ctx); err != nil {
|
if err := p.(*process.Init).Resume(ctx); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return empty, nil
|
return empty, nil
|
||||||
@ -448,7 +448,7 @@ func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskReque
|
|||||||
}
|
}
|
||||||
options = *v.(*runctypes.CheckpointOptions)
|
options = *v.(*runctypes.CheckpointOptions)
|
||||||
}
|
}
|
||||||
if err := p.(*proc.Init).Checkpoint(ctx, &proc.CheckpointConfig{
|
if err := p.(*process.Init).Checkpoint(ctx, &process.CheckpointConfig{
|
||||||
Path: r.Path,
|
Path: r.Path,
|
||||||
Exit: options.Exit,
|
Exit: options.Exit,
|
||||||
AllowOpenTCP: options.OpenTcp,
|
AllowOpenTCP: options.OpenTcp,
|
||||||
@ -476,7 +476,7 @@ func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*pt
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := p.(*proc.Init).Update(ctx, r.Resources); err != nil {
|
if err := p.(*process.Init).Update(ctx, r.Resources); err != nil {
|
||||||
return nil, errdefs.ToGRPC(err)
|
return nil, errdefs.ToGRPC(err)
|
||||||
}
|
}
|
||||||
return empty, nil
|
return empty, nil
|
||||||
@ -502,11 +502,11 @@ func (s *Service) processExits() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) allProcesses() []rproc.Process {
|
func (s *Service) allProcesses() []process.Process {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
res := make([]rproc.Process, 0, len(s.processes))
|
res := make([]process.Process, 0, len(s.processes))
|
||||||
for _, p := range s.processes {
|
for _, p := range s.processes {
|
||||||
res = append(res, p)
|
res = append(res, p)
|
||||||
}
|
}
|
||||||
@ -523,7 +523,7 @@ func (s *Service) checkProcesses(e runc.Exit) {
|
|||||||
if p.Pid() == e.Pid {
|
if p.Pid() == e.Pid {
|
||||||
|
|
||||||
if shouldKillAll {
|
if shouldKillAll {
|
||||||
if ip, ok := p.(*proc.Init); ok {
|
if ip, ok := p.(*process.Init); ok {
|
||||||
// Ensure all children are killed
|
// Ensure all children are killed
|
||||||
if err := ip.KillAll(s.context); err != nil {
|
if err := ip.KillAll(s.context); err != nil {
|
||||||
log.G(s.context).WithError(err).WithField("id", ip.ID()).
|
log.G(s.context).WithError(err).WithField("id", ip.ID()).
|
||||||
@ -569,7 +569,7 @@ func (s *Service) getContainerPids(ctx context.Context, id string) ([]uint32, er
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ps, err := p.(*proc.Init).Runtime().Ps(ctx, id)
|
ps, err := p.(*process.Init).Runtime().Ps(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -589,7 +589,7 @@ func (s *Service) forward(publisher events.Publisher) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getInitProcess returns initial process
|
// getInitProcess returns initial process
|
||||||
func (s *Service) getInitProcess() (rproc.Process, error) {
|
func (s *Service) getInitProcess() (process.Process, error) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
@ -601,7 +601,7 @@ func (s *Service) getInitProcess() (rproc.Process, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getExecProcess returns exec process
|
// getExecProcess returns exec process
|
||||||
func (s *Service) getExecProcess(id string) (rproc.Process, error) {
|
func (s *Service) getExecProcess(id string) (process.Process, error) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
@ -640,7 +640,7 @@ func getTopic(ctx context.Context, e interface{}) string {
|
|||||||
return runtime.TaskUnknownTopic
|
return runtime.TaskUnknownTopic
|
||||||
}
|
}
|
||||||
|
|
||||||
func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace, criu string, systemdCgroup bool, platform rproc.Platform, r *proc.CreateConfig, rootfs string) (*proc.Init, error) {
|
func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace, criu string, systemdCgroup bool, platform stdio.Platform, r *process.CreateConfig, rootfs string) (*process.Init, error) {
|
||||||
var options runctypes.CreateOptions
|
var options runctypes.CreateOptions
|
||||||
if r.Options != nil {
|
if r.Options != nil {
|
||||||
v, err := typeurl.UnmarshalAny(r.Options)
|
v, err := typeurl.UnmarshalAny(r.Options)
|
||||||
@ -650,8 +650,8 @@ func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace, criu st
|
|||||||
options = *v.(*runctypes.CreateOptions)
|
options = *v.(*runctypes.CreateOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
runtime := proc.NewRunc(runtimeRoot, path, namespace, r.Runtime, criu, systemdCgroup)
|
runtime := process.NewRunc(runtimeRoot, path, namespace, r.Runtime, criu, systemdCgroup)
|
||||||
p := proc.New(r.ID, runtime, rproc.Stdio{
|
p := process.New(r.ID, runtime, stdio.Stdio{
|
||||||
Stdin: r.Stdin,
|
Stdin: r.Stdin,
|
||||||
Stdout: r.Stdout,
|
Stdout: r.Stdout,
|
||||||
Stderr: r.Stderr,
|
Stderr: r.Stderr,
|
||||||
|
2
vendor/github.com/containerd/containerd/runtime/v2/binary.go
generated
vendored
2
vendor/github.com/containerd/containerd/runtime/v2/binary.go
generated
vendored
@ -87,7 +87,7 @@ func (b *binary) Start(ctx context.Context, opts *types.Any, onClose func()) (_
|
|||||||
defer f.Close()
|
defer f.Close()
|
||||||
if _, err := io.Copy(os.Stderr, f); err != nil {
|
if _, err := io.Copy(os.Stderr, f); err != nil {
|
||||||
// When using a multi-container shim the 2nd to Nth container in the
|
// When using a multi-container shim the 2nd to Nth container in the
|
||||||
// shim will not have a seperate log pipe. Ignore the failure log
|
// shim will not have a separate log pipe. Ignore the failure log
|
||||||
// message here when the shim connect times out.
|
// message here when the shim connect times out.
|
||||||
if !os.IsNotExist(errors.Cause(err)) {
|
if !os.IsNotExist(errors.Cause(err)) {
|
||||||
log.G(ctx).WithError(err).Error("copy shim log")
|
log.G(ctx).WithError(err).Error("copy shim log")
|
||||||
|
7
vendor/github.com/containerd/containerd/runtime/v2/manager.go
generated
vendored
7
vendor/github.com/containerd/containerd/runtime/v2/manager.go
generated
vendored
@ -140,6 +140,11 @@ func (m *TaskManager) Create(ctx context.Context, id string, opts runtime.Create
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
cleanupAfterDeadShim(context.Background(), id, ns, m.events, b)
|
cleanupAfterDeadShim(context.Background(), id, ns, m.events, b)
|
||||||
|
// Remove self from the runtime task list. Even though the cleanupAfterDeadShim()
|
||||||
|
// would publish taskExit event, but the shim.Delete() would always failed with ttrpc
|
||||||
|
// disconnect and there is no chance to remove this dead task from runtime task lists.
|
||||||
|
// Thus it's better to delete it here.
|
||||||
|
m.tasks.Delete(ctx, id)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -258,6 +263,8 @@ func (m *TaskManager) loadTasks(ctx context.Context) error {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
cleanupAfterDeadShim(context.Background(), id, ns, m.events, binaryCall)
|
cleanupAfterDeadShim(context.Background(), id, ns, m.events, binaryCall)
|
||||||
|
// Remove self from the runtime task list.
|
||||||
|
m.tasks.Delete(ctx, id)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cleanupAfterDeadShim(ctx, id, ns, m.events, binaryCall)
|
cleanupAfterDeadShim(ctx, id, ns, m.events, binaryCall)
|
||||||
|
10
vendor/github.com/containerd/containerd/runtime/v2/shim.go
generated
vendored
10
vendor/github.com/containerd/containerd/runtime/v2/shim.go
generated
vendored
@ -79,7 +79,7 @@ func loadShim(ctx context.Context, bundle *Bundle, events *exchange.Exchange, rt
|
|||||||
defer f.Close()
|
defer f.Close()
|
||||||
if _, err := io.Copy(os.Stderr, f); err != nil {
|
if _, err := io.Copy(os.Stderr, f); err != nil {
|
||||||
// When using a multi-container shim the 2nd to Nth container in the
|
// When using a multi-container shim the 2nd to Nth container in the
|
||||||
// shim will not have a seperate log pipe. Ignore the failure log
|
// shim will not have a separate log pipe. Ignore the failure log
|
||||||
// message here when the shim connect times out.
|
// message here when the shim connect times out.
|
||||||
if !os.IsNotExist(errors.Cause(err)) {
|
if !os.IsNotExist(errors.Cause(err)) {
|
||||||
log.G(ctx).WithError(err).Error("copy shim log")
|
log.G(ctx).WithError(err).Error("copy shim log")
|
||||||
@ -100,6 +100,8 @@ func loadShim(ctx context.Context, bundle *Bundle, events *exchange.Exchange, rt
|
|||||||
events: events,
|
events: events,
|
||||||
rtTasks: rt,
|
rtTasks: rt,
|
||||||
}
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
if err := s.Connect(ctx); err != nil {
|
if err := s.Connect(ctx); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -193,6 +195,11 @@ func (s *shim) ID() string {
|
|||||||
return s.bundle.ID
|
return s.bundle.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PID of the task
|
||||||
|
func (s *shim) PID() uint32 {
|
||||||
|
return uint32(s.taskPid)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *shim) Namespace() string {
|
func (s *shim) Namespace() string {
|
||||||
return s.bundle.Namespace
|
return s.bundle.Namespace
|
||||||
}
|
}
|
||||||
@ -214,6 +221,7 @@ func (s *shim) Delete(ctx context.Context) (*runtime.Exit, error) {
|
|||||||
if err := s.waitShutdown(ctx); err != nil {
|
if err := s.waitShutdown(ctx); err != nil {
|
||||||
log.G(ctx).WithError(err).Error("failed to shutdown shim")
|
log.G(ctx).WithError(err).Error("failed to shutdown shim")
|
||||||
}
|
}
|
||||||
|
s.Close()
|
||||||
if err := s.bundle.Delete(); err != nil {
|
if err := s.bundle.Delete(); err != nil {
|
||||||
log.G(ctx).WithError(err).Error("failed to delete bundle")
|
log.G(ctx).WithError(err).Error("failed to delete bundle")
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user