Move shim protos into linux pkg

This moves the shim's API and protos out of the containerd services
package and into the linux runtime package. This is because the shim is
an implementation detail of the linux runtime that we have and it is not
a containerd user facing api.

Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
This commit is contained in:
Michael Crosby
2017-06-22 14:29:49 -07:00
parent 6ad3ba739e
commit 990536f2cc
26 changed files with 1838 additions and 2266 deletions

91
linux/bundle.go Normal file
View File

@@ -0,0 +1,91 @@
// +build linux
package linux
import (
"bytes"
"context"
"io"
"io/ioutil"
"os"
"path/filepath"
client "github.com/containerd/containerd/linux/shim"
)
func loadBundle(path, namespace string) *bundle {
return &bundle{
path: path,
namespace: namespace,
}
}
// newBundle creates a new bundle on disk at the provided path for the given id
func newBundle(path, namespace, id string, spec []byte) (b *bundle, err error) {
if err := os.MkdirAll(path, 0700); err != nil {
return nil, err
}
path = filepath.Join(path, id)
defer func() {
if err != nil {
os.RemoveAll(path)
}
}()
if err := os.Mkdir(path, 0700); err != nil {
return nil, err
}
if err := os.Mkdir(filepath.Join(path, "rootfs"), 0700); err != nil {
return nil, err
}
f, err := os.Create(filepath.Join(path, configFilename))
if err != nil {
return nil, err
}
defer f.Close()
_, err = io.Copy(f, bytes.NewReader(spec))
return &bundle{
path: path,
namespace: namespace,
}, err
}
type bundle struct {
path string
namespace string
}
// NewShim connects to the shim managing the bundle and tasks
func (b *bundle) NewShim(ctx context.Context, binary string, remote bool) (*client.Client, error) {
opt := client.WithStart(binary)
if !remote {
opt = client.WithLocal
}
return client.New(ctx, client.Config{
Address: filepath.Join(b.path, "shim.sock"),
Path: b.path,
Namespace: b.namespace,
}, opt)
}
// Connect reconnects to an existing shim
func (b *bundle) Connect(ctx context.Context, remote bool) (*client.Client, error) {
opt := client.WithConnect
if !remote {
opt = client.WithLocal
}
return client.New(ctx, client.Config{
Address: filepath.Join(b.path, "shim.sock"),
Path: b.path,
Namespace: b.namespace,
}, opt)
}
// spec returns the spec written to the bundle
func (b *bundle) Spec() ([]byte, error) {
return ioutil.ReadFile(filepath.Join(b.path, configFilename))
}
// Delete deletes the bundle from disk
func (b *bundle) Delete() error {
return os.RemoveAll(b.path)
}

75
linux/list.go Normal file
View File

@@ -0,0 +1,75 @@
// +build linux
package linux
import (
"context"
"sync"
"github.com/containerd/containerd/namespaces"
)
func newTaskList() *taskList {
return &taskList{
tasks: make(map[string]map[string]*Task),
}
}
type taskList struct {
mu sync.Mutex
tasks map[string]map[string]*Task
}
func (l *taskList) get(ctx context.Context, id string) (*Task, error) {
l.mu.Lock()
defer l.mu.Unlock()
namespace, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return nil, err
}
tasks, ok := l.tasks[namespace]
if !ok {
return nil, ErrTaskNotExists
}
t, ok := tasks[id]
if !ok {
return nil, ErrTaskNotExists
}
return t, nil
}
func (l *taskList) add(ctx context.Context, t *Task) error {
namespace, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return err
}
return l.addWithNamespace(namespace, t)
}
func (l *taskList) addWithNamespace(namespace string, t *Task) error {
l.mu.Lock()
defer l.mu.Unlock()
id := t.containerID
if _, ok := l.tasks[namespace]; !ok {
l.tasks[namespace] = make(map[string]*Task)
}
if _, ok := l.tasks[namespace][id]; ok {
return ErrTaskAlreadyExists
}
l.tasks[namespace][id] = t
return nil
}
func (l *taskList) delete(ctx context.Context, t *Task) {
l.mu.Lock()
defer l.mu.Unlock()
namespace, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return
}
tasks, ok := l.tasks[namespace]
if ok {
delete(tasks, t.containerID)
}
}

View File

@@ -3,29 +3,29 @@
package linux
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"time"
"google.golang.org/grpc"
"github.com/boltdb/bolt"
eventsapi "github.com/containerd/containerd/api/services/events/v1"
"github.com/containerd/containerd/api/services/shim/v1"
"github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/api/types/task"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/events"
shimb "github.com/containerd/containerd/linux/shim"
client "github.com/containerd/containerd/linux/shim"
shim "github.com/containerd/containerd/linux/shim/v1"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/metadata"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/plugin"
runc "github.com/containerd/go-runc"
google_protobuf "github.com/golang/protobuf/ptypes/empty"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
@@ -35,6 +35,7 @@ var (
ErrTaskNotExists = errors.New("task does not exist")
ErrTaskAlreadyExists = errors.New("task already exists")
pluginID = fmt.Sprintf("%s.%s", plugin.RuntimePlugin, "linux")
empty = &google_protobuf.Empty{}
)
const (
@@ -50,6 +51,7 @@ func init() {
Init: New,
Requires: []plugin.PluginType{
plugin.TaskMonitorPlugin,
plugin.MetadataPlugin,
},
Config: &Config{
Shim: defaultShim,
@@ -69,71 +71,6 @@ type Config struct {
NoShim bool `toml:"no_shim,omitempty"`
}
func newTaskList() *taskList {
return &taskList{
tasks: make(map[string]map[string]*Task),
}
}
type taskList struct {
mu sync.Mutex
tasks map[string]map[string]*Task
}
func (l *taskList) get(ctx context.Context, id string) (*Task, error) {
l.mu.Lock()
defer l.mu.Unlock()
namespace, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return nil, err
}
tasks, ok := l.tasks[namespace]
if !ok {
return nil, ErrTaskNotExists
}
t, ok := tasks[id]
if !ok {
return nil, ErrTaskNotExists
}
return t, nil
}
func (l *taskList) add(ctx context.Context, t *Task) error {
namespace, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return err
}
return l.addWithNamespace(namespace, t)
}
func (l *taskList) addWithNamespace(namespace string, t *Task) error {
l.mu.Lock()
defer l.mu.Unlock()
id := t.containerID
if _, ok := l.tasks[namespace]; !ok {
l.tasks[namespace] = make(map[string]*Task)
}
if _, ok := l.tasks[namespace][id]; ok {
return ErrTaskAlreadyExists
}
l.tasks[namespace][id] = t
return nil
}
func (l *taskList) delete(ctx context.Context, t *Task) {
l.mu.Lock()
defer l.mu.Unlock()
namespace, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return
}
tasks, ok := l.tasks[namespace]
if ok {
delete(tasks, t.containerID)
}
}
func New(ic *plugin.InitContext) (interface{}, error) {
if err := os.MkdirAll(ic.Root, 0700); err != nil {
return nil, err
@@ -142,6 +79,10 @@ func New(ic *plugin.InitContext) (interface{}, error) {
if err != nil {
return nil, err
}
m, err := ic.Get(plugin.MetadataPlugin)
if err != nil {
return nil, err
}
cfg := ic.Config.(*Config)
c, cancel := context.WithCancel(ic.Context)
r := &Runtime{
@@ -155,10 +96,11 @@ func New(ic *plugin.InitContext) (interface{}, error) {
monitor: monitor.(plugin.TaskMonitor),
tasks: newTaskList(),
emitter: events.GetPoster(ic.Context),
db: m.(*bolt.DB),
}
// set the events output for a monitor if it generates events
r.monitor.Events(r.events)
tasks, err := r.loadAllTasks(ic.Context)
tasks, err := r.restoreTasks(ic.Context)
if err != nil {
return nil, err
}
@@ -166,6 +108,9 @@ func New(ic *plugin.InitContext) (interface{}, error) {
if err := r.tasks.addWithNamespace(t.namespace, t); err != nil {
return nil, err
}
if err := r.handleEvents(ic.Context, t.shim); err != nil {
return nil, err
}
}
return r, nil
}
@@ -182,39 +127,44 @@ type Runtime struct {
monitor plugin.TaskMonitor
tasks *taskList
emitter events.Poster
db *bolt.DB
}
func (r *Runtime) ID() string {
return pluginID
}
func (r *Runtime) Create(ctx context.Context, id string, opts plugin.CreateOpts) (t plugin.Task, err error) {
func (r *Runtime) Create(ctx context.Context, id string, opts plugin.CreateOpts) (_ plugin.Task, err error) {
namespace, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return nil, err
}
path, err := r.newBundle(namespace, id, opts.Spec)
bundle, err := newBundle(filepath.Join(r.root, namespace), namespace, id, opts.Spec)
if err != nil {
return nil, err
}
s, err := newShim(ctx, r.shim, path, namespace, r.remote)
if err != nil {
os.RemoveAll(path)
return nil, err
}
// Exit the shim on error
defer func() {
if err != nil {
s.Exit(context.Background(), &shim.ExitRequest{})
bundle.Delete()
}
}()
s, err := bundle.NewShim(ctx, r.shim, r.remote)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
if kerr := s.KillShim(ctx); kerr != nil {
log.G(ctx).WithError(err).Error("failed to kill shim")
}
}
}()
if err = r.handleEvents(ctx, s); err != nil {
os.RemoveAll(path)
return nil, err
}
sopts := &shim.CreateRequest{
sopts := &shim.CreateTaskRequest{
ID: id,
Bundle: path,
Bundle: bundle.path,
Runtime: r.runtime,
Stdin: opts.IO.Stdin,
Stdout: opts.IO.Stdout,
@@ -230,15 +180,14 @@ func (r *Runtime) Create(ctx context.Context, id string, opts plugin.CreateOpts)
})
}
if _, err = s.Create(ctx, sopts); err != nil {
os.RemoveAll(path)
return nil, errors.New(grpc.ErrorDesc(err))
}
c := newTask(id, namespace, opts.Spec, s)
if err := r.tasks.add(ctx, c); err != nil {
t := newTask(id, namespace, opts.Spec, s)
if err := r.tasks.add(ctx, t); err != nil {
return nil, err
}
// after the task is created, add it to the monitor
if err = r.monitor.Monitor(c); err != nil {
if err = r.monitor.Monitor(t); err != nil {
return nil, err
}
@@ -252,7 +201,7 @@ func (r *Runtime) Create(ctx context.Context, id string, opts plugin.CreateOpts)
}
if err := r.emit(ctx, "/runtime/create", &eventsapi.RuntimeCreate{
ID: id,
Bundle: path,
Bundle: bundle.path,
RootFS: runtimeMounts,
IO: &eventsapi.RuntimeIO{
Stdin: opts.IO.Stdin,
@@ -264,8 +213,7 @@ func (r *Runtime) Create(ctx context.Context, id string, opts plugin.CreateOpts)
}); err != nil {
return nil, err
}
return c, nil
return t, nil
}
func (r *Runtime) Delete(ctx context.Context, c plugin.Task) (*plugin.Exit, error) {
@@ -275,22 +223,25 @@ func (r *Runtime) Delete(ctx context.Context, c plugin.Task) (*plugin.Exit, erro
}
lc, ok := c.(*Task)
if !ok {
return nil, fmt.Errorf("container cannot be cast as *linux.Container")
return nil, fmt.Errorf("task cannot be cast as *linux.Task")
}
// remove the container from the monitor
if err := r.monitor.Stop(lc); err != nil {
// TODO: log error here
return nil, err
}
rsp, err := lc.shim.Delete(ctx, &shim.DeleteRequest{})
rsp, err := lc.shim.Delete(ctx, empty)
if err != nil {
return nil, errors.New(grpc.ErrorDesc(err))
}
lc.shim.Exit(ctx, &shim.ExitRequest{})
if err := lc.shim.KillShim(ctx); err != nil {
log.G(ctx).WithError(err).Error("failed to kill shim")
}
r.tasks.delete(ctx, lc)
i := c.Info()
if err := r.emit(ctx, "/runtime/delete", &eventsapi.RuntimeDelete{
var (
bundle = loadBundle(filepath.Join(r.root, namespace, lc.containerID), namespace)
i = c.Info()
)
if err := r.emit(ctx, "/runtime/delete", eventsapi.RuntimeDelete{
ID: i.ID,
Runtime: i.Runtime,
ExitStatus: rsp.ExitStatus,
@@ -302,7 +253,7 @@ func (r *Runtime) Delete(ctx context.Context, c plugin.Task) (*plugin.Exit, erro
Status: rsp.ExitStatus,
Timestamp: rsp.ExitedAt,
Pid: rsp.Pid,
}, r.deleteBundle(namespace, lc.containerID)
}, bundle.Delete()
}
func (r *Runtime) Tasks(ctx context.Context) ([]plugin.Task, error) {
@@ -321,17 +272,19 @@ func (r *Runtime) Tasks(ctx context.Context) ([]plugin.Task, error) {
return o, nil
}
func (r *Runtime) loadAllTasks(ctx context.Context) ([]*Task, error) {
func (r *Runtime) restoreTasks(ctx context.Context) ([]*Task, error) {
dir, err := ioutil.ReadDir(r.root)
if err != nil {
return nil, err
}
var o []*Task
for _, fi := range dir {
if !fi.IsDir() {
for _, namespace := range dir {
if !namespace.IsDir() {
continue
}
tasks, err := r.loadTasks(ctx, fi.Name())
name := namespace.Name()
log.G(ctx).WithField("namespace", name).Debug("loading tasks in namespace")
tasks, err := r.loadTasks(ctx, name)
if err != nil {
return nil, err
}
@@ -350,28 +303,41 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) {
return nil, err
}
var o []*Task
for _, fi := range dir {
if !fi.IsDir() {
for _, path := range dir {
if !path.IsDir() {
continue
}
id := fi.Name()
// TODO: optimize this if it is call frequently to list all containers
// i.e. dont' reconnect to the the shim's ever time
c, err := r.loadTask(ctx, ns, filepath.Join(r.root, ns, id))
id := path.Name()
bundle := loadBundle(filepath.Join(r.root, ns, id), ns)
s, err := bundle.Connect(ctx, r.remote)
if err != nil {
log.G(ctx).WithError(err).Warnf("failed to load container %s/%s", ns, id)
// if we fail to load the container, connect to the shim, make sure if the shim has
// been killed and cleanup the resources still being held by the container
r.killContainer(ctx, ns, id)
log.G(ctx).WithError(err).Error("connecting to shim")
if err := r.terminate(ctx, bundle, ns, id); err != nil {
log.G(ctx).WithError(err).WithField("bundle", bundle.path).Error("failed to terminate task, leaving bundle for debugging")
continue
}
if err := bundle.Delete(); err != nil {
log.G(ctx).WithError(err).Error("delete bundle")
}
continue
}
o = append(o, c)
spec, err := bundle.Spec()
if err != nil {
log.G(ctx).WithError(err).Error("load task spec")
}
o = append(o, &Task{
containerID: id,
shim: s,
spec: spec,
namespace: ns,
})
}
return o, nil
}
func (r *Runtime) handleEvents(ctx context.Context, s shim.ShimClient) error {
events, err := s.Events(r.eventsContext, &shim.EventsRequest{})
func (r *Runtime) handleEvents(ctx context.Context, s *client.Client) error {
events, err := s.Stream(r.eventsContext, &shim.StreamEventsRequest{})
if err != nil {
return err
}
@@ -379,7 +345,7 @@ func (r *Runtime) handleEvents(ctx context.Context, s shim.ShimClient) error {
return nil
}
func (r *Runtime) forward(ctx context.Context, events shim.Shim_EventsClient) {
func (r *Runtime) forward(ctx context.Context, events shim.Shim_StreamClient) {
for {
e, err := events.Recv()
if err != nil {
@@ -391,19 +357,19 @@ func (r *Runtime) forward(ctx context.Context, events shim.Shim_EventsClient) {
topic := ""
var et plugin.EventType
switch e.Type {
case task.Event_CREATE:
case shim.Event_CREATE:
topic = "task-create"
et = plugin.CreateEvent
case task.Event_START:
case shim.Event_START:
topic = "task-start"
et = plugin.StartEvent
case task.Event_EXEC_ADDED:
case shim.Event_EXEC_ADDED:
topic = "task-execadded"
et = plugin.ExecAddEvent
case task.Event_OOM:
case shim.Event_OOM:
topic = "task-oom"
et = plugin.OOMEvent
case task.Event_EXIT:
case shim.Event_EXIT:
topic = "task-exit"
et = plugin.ExitEvent
}
@@ -418,7 +384,7 @@ func (r *Runtime) forward(ctx context.Context, events shim.Shim_EventsClient) {
}
if err := r.emit(ctx, "/runtime/"+topic, &eventsapi.RuntimeEvent{
ID: e.ID,
Type: e.Type,
Type: eventsapi.RuntimeEvent_EventType(e.Type),
Pid: e.Pid,
ExitStatus: e.ExitStatus,
ExitedAt: e.ExitedAt,
@@ -428,89 +394,51 @@ func (r *Runtime) forward(ctx context.Context, events shim.Shim_EventsClient) {
}
}
func (r *Runtime) newBundle(namespace, id string, spec []byte) (string, error) {
path := filepath.Join(r.root, namespace)
if err := os.MkdirAll(path, 0700); err != nil {
return "", err
}
path = filepath.Join(path, id)
if err := os.Mkdir(path, 0700); err != nil {
return "", err
}
if err := os.Mkdir(filepath.Join(path, "rootfs"), 0700); err != nil {
return "", err
}
f, err := os.Create(filepath.Join(path, configFilename))
func (r *Runtime) terminate(ctx context.Context, bundle *bundle, ns, id string) error {
ctx = namespaces.WithNamespace(ctx, ns)
rt, err := r.getRuntime(ctx, ns, id)
if err != nil {
return "", err
return err
}
defer f.Close()
_, err = io.Copy(f, bytes.NewReader(spec))
return path, err
}
func (r *Runtime) deleteBundle(namespace, id string) error {
return os.RemoveAll(filepath.Join(r.root, namespace, id))
}
func (r *Runtime) loadTask(ctx context.Context, namespace, path string) (*Task, error) {
id := filepath.Base(path)
s, err := loadShim(path, namespace, r.remote)
if err != nil {
return nil, err
}
if err = r.handleEvents(ctx, s); err != nil {
return nil, err
}
data, err := ioutil.ReadFile(filepath.Join(path, configFilename))
if err != nil {
return nil, err
}
return &Task{
containerID: id,
shim: s,
spec: data,
namespace: namespace,
}, nil
}
// killContainer is used whenever the runtime fails to connect to a shim (it died)
// and needs to cleanup the container resources in the underlying runtime (runc, etc...)
func (r *Runtime) killContainer(ctx context.Context, ns, id string) {
log.G(ctx).Debug("terminating container after failed load")
runtime := &runc.Runc{
// TODO: should we get Command provided for initial container creation?
Command: r.runtime,
LogFormat: runc.JSON,
PdeathSignal: unix.SIGKILL,
Root: filepath.Join(shimb.RuncRoot, ns),
}
if err := runtime.Kill(ctx, id, int(unix.SIGKILL), &runc.KillOpts{
All: true,
}); err != nil {
if err := rt.Kill(ctx, id, int(unix.SIGKILL), &runc.KillOpts{All: true}); err != nil {
log.G(ctx).WithError(err).Warnf("kill all processes for %s", id)
}
// it can take a while for the container to be killed so poll for the container's status
// until it is in a stopped state
status := "running"
for status != "stopped" {
c, err := runtime.State(ctx, id)
c, err := rt.State(ctx, id)
if err != nil {
break
}
status = c.Status
time.Sleep(10 * time.Millisecond)
time.Sleep(50 * time.Millisecond)
}
if err := runtime.Delete(ctx, id); err != nil {
log.G(ctx).WithError(err).Warnf("delete container %s", id)
if err := rt.Delete(ctx, id); err != nil {
log.G(ctx).WithError(err).Warnf("delete runtime state %s", id)
}
// try to unmount the rootfs in case it was not owned by an external mount namespace
unix.Unmount(filepath.Join(r.root, ns, id, "rootfs"), 0)
// remove container bundle
if err := r.deleteBundle(ns, id); err != nil {
log.G(ctx).WithError(err).Warnf("delete container bundle %s", id)
if err := unix.Unmount(filepath.Join(bundle.path, "rootfs"), 0); err != nil {
log.G(ctx).WithError(err).Warnf("unmount task rootfs %s", id)
}
return nil
}
func (r *Runtime) getRuntime(ctx context.Context, ns, id string) (*runc.Runc, error) {
var c containers.Container
if err := r.db.View(func(tx *bolt.Tx) error {
store := metadata.NewContainerStore(tx)
var err error
c, err = store.Get(ctx, id)
return err
}); err != nil {
return nil, err
}
return &runc.Runc{
Command: c.Runtime.Name,
LogFormat: runc.JSON,
PdeathSignal: unix.SIGKILL,
Root: filepath.Join(client.RuncRoot, ns),
}, nil
}
func (r *Runtime) emit(ctx context.Context, topic string, evt interface{}) error {

View File

@@ -1,90 +0,0 @@
// +build linux
package linux
import (
"context"
"fmt"
"net"
"os/exec"
"path/filepath"
"syscall"
"time"
"google.golang.org/grpc"
"github.com/containerd/containerd/api/services/shim/v1"
localShim "github.com/containerd/containerd/linux/shim"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/reaper"
"github.com/containerd/containerd/sys"
"github.com/pkg/errors"
)
func newShim(ctx context.Context, shimName string, path, namespace string, remote bool) (shim.ShimClient, error) {
if !remote {
return localShim.Client(path, namespace)
}
socket := filepath.Join(path, "shim.sock")
l, err := sys.CreateUnixSocket(socket)
if err != nil {
return nil, err
}
cmd := exec.Command(shimName, "--namespace", namespace)
cmd.Dir = path
f, err := l.(*net.UnixListener).File()
if err != nil {
return nil, err
}
// close our side of the socket, do not close the listener as it will
// remove the socket from disk
defer f.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, f)
// make sure the shim can be re-parented to system init
// and is cloned in a new mount namespace because the overlay/filesystems
// will be mounted by the shim
cmd.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: syscall.CLONE_NEWNS,
Setpgid: true,
}
if err := reaper.Default.Start(cmd); err != nil {
return nil, errors.Wrapf(err, "failed to start shim")
}
defer func() {
if err != nil {
cmd.Process.Kill()
reaper.Default.Wait(cmd)
} else {
log.G(ctx).WithField("socket", socket).Infof("new shim started")
}
}()
if err = sys.SetOOMScore(cmd.Process.Pid, sys.OOMScoreMaxKillable); err != nil {
return nil, errors.Wrap(err, "failed to set OOM Score on shim")
}
return connectShim(socket)
}
func loadShim(path, namespace string, remote bool) (shim.ShimClient, error) {
if !remote {
return localShim.Client(path, namespace)
}
socket := filepath.Join(path, "shim.sock")
return connectShim(socket)
}
func connectShim(socket string) (shim.ShimClient, error) {
// reset the logger for grpc to log to dev/null so that it does not mess with our stdio
dialOpts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithTimeout(100 * time.Second)}
dialOpts = append(dialOpts,
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", socket, timeout)
}),
grpc.WithBlock(),
grpc.WithTimeout(2*time.Second),
)
conn, err := grpc.Dial(fmt.Sprintf("unix://%s", socket), dialOpts...)
if err != nil {
return nil, errors.Wrapf(err, "failed to connect to shim via \"%s\"", fmt.Sprintf("unix://%s", socket))
}
return shim.NewShimClient(conn), nil
}

View File

@@ -1,150 +1,181 @@
// +build !windows
// +build linux
package shim
import (
"path/filepath"
"context"
"fmt"
"io"
"net"
"os"
"os/exec"
"strings"
"syscall"
"time"
shimapi "github.com/containerd/containerd/api/services/shim/v1"
"github.com/containerd/containerd/api/types/task"
runc "github.com/containerd/go-runc"
google_protobuf "github.com/golang/protobuf/ptypes/empty"
"golang.org/x/net/context"
"golang.org/x/sys/unix"
"github.com/Sirupsen/logrus"
"github.com/pkg/errors"
shim "github.com/containerd/containerd/linux/shim/v1"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/reaper"
"github.com/containerd/containerd/sys"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
func Client(path, namespace string) (shimapi.ShimClient, error) {
pid, err := runc.ReadPidFile(filepath.Join(path, "init.pid"))
type ClientOpt func(context.Context, Config) (shim.ShimClient, io.Closer, error)
// WithStart executes a new shim process
func WithStart(binary string) ClientOpt {
return func(ctx context.Context, config Config) (shim.ShimClient, io.Closer, error) {
socket, err := newSocket(config)
if err != nil {
return nil, nil, err
}
// close our side of the socket, do not close the listener as it will
// remove the socket from disk
defer socket.Close()
cmd := newCommand(binary, config, socket)
if err := reaper.Default.Start(cmd); err != nil {
return nil, nil, errors.Wrapf(err, "failed to start shim")
}
log.G(ctx).WithFields(logrus.Fields{
"pid": cmd.Process.Pid,
"address": config.Address,
}).Infof("shim %s started", binary)
if err = sys.SetOOMScore(cmd.Process.Pid, sys.OOMScoreMaxKillable); err != nil {
return nil, nil, errors.Wrap(err, "failed to set OOM Score on shim")
}
return WithConnect(ctx, config)
}
}
func newCommand(binary string, config Config, socket *os.File) *exec.Cmd {
args := []string{
"--namespace", config.Namespace,
}
if config.Debug {
args = append(args, "--debug")
}
cmd := exec.Command(binary, args...)
cmd.Dir = config.Path
// make sure the shim can be re-parented to system init
// and is cloned in a new mount namespace because the overlay/filesystems
// will be mounted by the shim
cmd.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: syscall.CLONE_NEWNS,
Setpgid: true,
}
cmd.ExtraFiles = append(cmd.ExtraFiles, socket)
return cmd
}
func newSocket(config Config) (*os.File, error) {
l, err := sys.CreateUnixSocket(config.Address)
if err != nil {
return nil, err
}
return l.(*net.UnixListener).File()
}
s, err := New(path, namespace)
func connect(address string) (*grpc.ClientConn, error) {
gopts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithInsecure(),
grpc.WithTimeout(100 * time.Second),
grpc.WithDialer(dialer),
grpc.FailOnNonTempDialError(true),
}
conn, err := grpc.Dial(dialAddress(address), gopts...)
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q", address)
}
return conn, nil
}
func dialer(address string, timeout time.Duration) (net.Conn, error) {
address = strings.TrimPrefix(address, "unix://")
return net.DialTimeout("unix", address, timeout)
}
func dialAddress(address string) string {
return fmt.Sprintf("unix://%s", address)
}
// WithConnect connects to an existing shim
func WithConnect(ctx context.Context, config Config) (shim.ShimClient, io.Closer, error) {
conn, err := connect(config.Address)
if err != nil {
return nil, nil, err
}
return shim.NewShimClient(conn), conn, nil
}
// WithLocal uses an in process shim
func WithLocal(ctx context.Context, config Config) (shim.ShimClient, io.Closer, error) {
service, err := NewService(config.Path, config.Namespace)
if err != nil {
return nil, nil, err
}
return NewLocal(service), nil, nil
}
type Config struct {
Address string
Path string
Namespace string
Debug bool
}
// New returns a new shim client
func New(ctx context.Context, config Config, opt ClientOpt) (*Client, error) {
s, c, err := opt(ctx, config)
if err != nil {
return nil, err
}
cl := &client{
s: s,
}
// used when quering container status and info
cl.s.initProcess = &initProcess{
id: filepath.Base(path),
pid: pid,
runc: &runc.Runc{
Log: filepath.Join(path, "log.json"),
LogFormat: runc.JSON,
PdeathSignal: syscall.SIGKILL,
Root: filepath.Join(RuncRoot, namespace),
},
}
return cl, nil
}
type client struct {
s *Service
}
func (c *client) Create(ctx context.Context, in *shimapi.CreateRequest, opts ...grpc.CallOption) (*shimapi.CreateResponse, error) {
return c.s.Create(ctx, in)
}
func (c *client) Start(ctx context.Context, in *shimapi.StartRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
return c.s.Start(ctx, in)
}
func (c *client) Delete(ctx context.Context, in *shimapi.DeleteRequest, opts ...grpc.CallOption) (*shimapi.DeleteResponse, error) {
return c.s.Delete(ctx, in)
}
func (c *client) DeleteProcess(ctx context.Context, in *shimapi.DeleteProcessRequest, opts ...grpc.CallOption) (*shimapi.DeleteResponse, error) {
return c.s.DeleteProcess(ctx, in)
}
func (c *client) Exec(ctx context.Context, in *shimapi.ExecRequest, opts ...grpc.CallOption) (*shimapi.ExecResponse, error) {
return c.s.Exec(ctx, in)
}
func (c *client) Pty(ctx context.Context, in *shimapi.PtyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
return c.s.Pty(ctx, in)
}
func (c *client) Events(ctx context.Context, in *shimapi.EventsRequest, opts ...grpc.CallOption) (shimapi.Shim_EventsClient, error) {
return &events{
c: c.s.events,
ctx: ctx,
return &Client{
ShimClient: s,
c: c,
}, nil
}
func (c *client) State(ctx context.Context, in *shimapi.StateRequest, opts ...grpc.CallOption) (*shimapi.StateResponse, error) {
return c.s.State(ctx, in)
type Client struct {
shim.ShimClient
c io.Closer
}
func (c *client) Pause(ctx context.Context, in *shimapi.PauseRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
return c.s.Pause(ctx, in)
}
func (c *client) Resume(ctx context.Context, in *shimapi.ResumeRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
return c.s.Resume(ctx, in)
}
func (c *client) Kill(ctx context.Context, in *shimapi.KillRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
return c.s.Kill(ctx, in)
}
func (c *client) Processes(ctx context.Context, in *shimapi.ProcessesRequest, opts ...grpc.CallOption) (*shimapi.ProcessesResponse, error) {
return c.s.Processes(ctx, in)
}
func (c *client) Exit(ctx context.Context, in *shimapi.ExitRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
// don't exit the calling process for the client
// but make sure we unmount the containers rootfs for this client
if err := unix.Unmount(filepath.Join(c.s.path, "rootfs"), 0); err != nil {
return nil, err
func (c *Client) IsAlive(ctx context.Context) (bool, error) {
_, err := c.ShimInfo(ctx, empty)
if err != nil {
if err != grpc.ErrServerStopped {
return false, err
}
return false, nil
}
return empty, nil
return true, nil
}
func (c *client) CloseStdin(ctx context.Context, in *shimapi.CloseStdinRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
return c.s.CloseStdin(ctx, in)
// KillShim kills the shim forcefully
func (c *Client) KillShim(ctx context.Context) error {
info, err := c.ShimInfo(ctx, empty)
if err != nil {
return err
}
pid := int(info.ShimPid)
// make sure we don't kill ourselves if we are running a local shim
if os.Getpid() == pid {
return nil
}
return unix.Kill(pid, unix.SIGKILL)
}
func (c *client) Checkpoint(ctx context.Context, in *shimapi.CheckpointRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
return c.s.Checkpoint(ctx, in)
}
type events struct {
c chan *task.Event
ctx context.Context
}
func (e *events) Recv() (*task.Event, error) {
ev := <-e.c
return ev, nil
}
func (e *events) Header() (metadata.MD, error) {
return nil, nil
}
func (e *events) Trailer() metadata.MD {
return nil
}
func (e *events) CloseSend() error {
return nil
}
func (e *events) Context() context.Context {
return e.ctx
}
func (e *events) SendMsg(m interface{}) error {
return nil
}
func (e *events) RecvMsg(m interface{}) error {
return nil
func (c *Client) Close() error {
if c.c == nil {
return nil
}
return c.c.Close()
}

View File

@@ -16,7 +16,7 @@ import (
"golang.org/x/sys/unix"
"github.com/containerd/console"
shimapi "github.com/containerd/containerd/api/services/shim/v1"
shimapi "github.com/containerd/containerd/linux/shim/v1"
"github.com/containerd/fifo"
runc "github.com/containerd/go-runc"
specs "github.com/opencontainers/runtime-spec/specs-go"
@@ -43,7 +43,7 @@ type execProcess struct {
terminal bool
}
func newExecProcess(context context.Context, path string, r *shimapi.ExecRequest, parent *initProcess, id int) (process, error) {
func newExecProcess(context context.Context, path string, r *shimapi.ExecProcessRequest, parent *initProcess, id int) (process, error) {
e := &execProcess{
id: id,
parent: parent,
@@ -120,17 +120,6 @@ func newExecProcess(context context.Context, path string, r *shimapi.ExecRequest
return e, nil
}
func rlimits(rr []*shimapi.Rlimit) (o []specs.LinuxRlimit) {
for _, r := range rr {
o = append(o, specs.LinuxRlimit{
Type: r.Type,
Hard: r.Hard,
Soft: r.Soft,
})
}
return o
}
func (e *execProcess) Pid() int {
return e.pid
}

View File

@@ -17,7 +17,7 @@ import (
"golang.org/x/sys/unix"
"github.com/containerd/console"
shimapi "github.com/containerd/containerd/api/services/shim/v1"
shimapi "github.com/containerd/containerd/linux/shim/v1"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/plugin"
@@ -52,7 +52,7 @@ type initProcess struct {
terminal bool
}
func newInitProcess(context context.Context, path, namespace string, r *shimapi.CreateRequest) (*initProcess, error) {
func newInitProcess(context context.Context, path, namespace string, r *shimapi.CreateTaskRequest) (*initProcess, error) {
for _, rm := range r.Rootfs {
m := &mount.Mount{
Type: rm.Type,
@@ -104,9 +104,10 @@ func newInitProcess(context context.Context, path, namespace string, r *shimapi.
WorkDir: filepath.Join(r.Bundle, "work"),
ParentPath: r.ParentCheckpoint,
},
PidFile: pidFile,
IO: io,
NoPivot: r.NoPivot,
PidFile: pidFile,
IO: io,
// TODO: implement runtime options
//NoPivot: r.NoPivot,
Detach: true,
NoSubreaper: true,
}
@@ -117,7 +118,7 @@ func newInitProcess(context context.Context, path, namespace string, r *shimapi.
opts := &runc.CreateOpts{
PidFile: pidFile,
IO: io,
NoPivot: r.NoPivot,
// NoPivot: r.NoPivot,
}
if socket != nil {
opts.ConsoleSocket = socket
@@ -253,7 +254,7 @@ func (p *initProcess) Stdin() io.Closer {
return p.stdin
}
func (p *initProcess) Checkpoint(context context.Context, r *shimapi.CheckpointRequest) error {
func (p *initProcess) Checkpoint(context context.Context, r *shimapi.CheckpointTaskRequest) error {
var actions []runc.CheckpointAction
/*
if !r.Exit {

126
linux/shim/local.go Normal file
View File

@@ -0,0 +1,126 @@
// +build !windows
package shim
import (
"path/filepath"
shimapi "github.com/containerd/containerd/linux/shim/v1"
google_protobuf "github.com/golang/protobuf/ptypes/empty"
"golang.org/x/net/context"
"golang.org/x/sys/unix"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
// NewLocal returns a shim client implementation for issue commands to a shim
func NewLocal(s *Service) shimapi.ShimClient {
return &local{
s: s,
}
}
type local struct {
s *Service
}
func (c *local) Create(ctx context.Context, in *shimapi.CreateTaskRequest, opts ...grpc.CallOption) (*shimapi.CreateTaskResponse, error) {
return c.s.Create(ctx, in)
}
func (c *local) Start(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
return c.s.Start(ctx, in)
}
func (c *local) Delete(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*shimapi.DeleteResponse, error) {
// make sure we unmount the containers rootfs for this local
if err := unix.Unmount(filepath.Join(c.s.path, "rootfs"), 0); err != nil {
return nil, err
}
return c.s.Delete(ctx, in)
}
func (c *local) DeleteProcess(ctx context.Context, in *shimapi.DeleteProcessRequest, opts ...grpc.CallOption) (*shimapi.DeleteResponse, error) {
return c.s.DeleteProcess(ctx, in)
}
func (c *local) Exec(ctx context.Context, in *shimapi.ExecProcessRequest, opts ...grpc.CallOption) (*shimapi.ExecProcessResponse, error) {
return c.s.Exec(ctx, in)
}
func (c *local) ResizePty(ctx context.Context, in *shimapi.ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
return c.s.ResizePty(ctx, in)
}
func (c *local) Stream(ctx context.Context, in *shimapi.StreamEventsRequest, opts ...grpc.CallOption) (shimapi.Shim_StreamClient, error) {
return &events{
c: c.s.events,
ctx: ctx,
}, nil
}
func (c *local) State(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*shimapi.StateResponse, error) {
return c.s.State(ctx, in)
}
func (c *local) Pause(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
return c.s.Pause(ctx, in)
}
func (c *local) Resume(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
return c.s.Resume(ctx, in)
}
func (c *local) Kill(ctx context.Context, in *shimapi.KillRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
return c.s.Kill(ctx, in)
}
func (c *local) ListProcesses(ctx context.Context, in *shimapi.ListProcessesRequest, opts ...grpc.CallOption) (*shimapi.ListProcessesResponse, error) {
return c.s.ListProcesses(ctx, in)
}
func (c *local) CloseIO(ctx context.Context, in *shimapi.CloseIORequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
return c.s.CloseIO(ctx, in)
}
func (c *local) Checkpoint(ctx context.Context, in *shimapi.CheckpointTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
return c.s.Checkpoint(ctx, in)
}
func (c *local) ShimInfo(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*shimapi.ShimInfoResponse, error) {
return c.s.ShimInfo(ctx, in)
}
type events struct {
c chan *shimapi.Event
ctx context.Context
}
func (e *events) Recv() (*shimapi.Event, error) {
ev := <-e.c
return ev, nil
}
func (e *events) Header() (metadata.MD, error) {
return nil, nil
}
func (e *events) Trailer() metadata.MD {
return nil
}
func (e *events) CloseSend() error {
return nil
}
func (e *events) Context() context.Context {
return e.ctx
}
func (e *events) SendMsg(m interface{}) error {
return nil
}
func (e *events) RecvMsg(m interface{}) error {
return nil
}

View File

@@ -9,8 +9,8 @@ import (
"syscall"
"github.com/containerd/console"
shimapi "github.com/containerd/containerd/api/services/shim/v1"
"github.com/containerd/containerd/api/types/task"
shimapi "github.com/containerd/containerd/linux/shim/v1"
"github.com/containerd/containerd/reaper"
google_protobuf "github.com/golang/protobuf/ptypes/empty"
"github.com/pkg/errors"
@@ -22,15 +22,15 @@ var empty = &google_protobuf.Empty{}
const RuncRoot = "/run/containerd/runc"
// New returns a new shim service that can be used via GRPC
func New(path, namespace string) (*Service, error) {
// NewService returns a new shim service that can be used via GRPC
func NewService(path, namespace string) (*Service, error) {
if namespace == "" {
return nil, fmt.Errorf("shim namespace cannot be empty")
}
return &Service{
path: path,
processes: make(map[int]process),
events: make(chan *task.Event, 4096),
events: make(chan *shimapi.Event, 4096),
namespace: namespace,
}, nil
}
@@ -42,14 +42,14 @@ type Service struct {
bundle string
mu sync.Mutex
processes map[int]process
events chan *task.Event
events chan *shimapi.Event
eventsMu sync.Mutex
deferredEvent *task.Event
deferredEvent *shimapi.Event
execID int
namespace string
}
func (s *Service) Create(ctx context.Context, r *shimapi.CreateRequest) (*shimapi.CreateResponse, error) {
func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (*shimapi.CreateTaskResponse, error) {
process, err := newInitProcess(ctx, s.path, s.namespace, r)
if err != nil {
return nil, err
@@ -65,30 +65,30 @@ func (s *Service) Create(ctx context.Context, r *shimapi.CreateRequest) (*shimap
ExitCh: make(chan int, 1),
}
reaper.Default.Register(pid, cmd)
s.events <- &task.Event{
Type: task.Event_CREATE,
s.events <- &shimapi.Event{
Type: shimapi.Event_CREATE,
ID: r.ID,
Pid: uint32(pid),
}
go s.waitExit(process, pid, cmd)
return &shimapi.CreateResponse{
return &shimapi.CreateTaskResponse{
Pid: uint32(pid),
}, nil
}
func (s *Service) Start(ctx context.Context, r *shimapi.StartRequest) (*google_protobuf.Empty, error) {
func (s *Service) Start(ctx context.Context, r *google_protobuf.Empty) (*google_protobuf.Empty, error) {
if err := s.initProcess.Start(ctx); err != nil {
return nil, err
}
s.events <- &task.Event{
Type: task.Event_START,
s.events <- &shimapi.Event{
Type: shimapi.Event_START,
ID: s.id,
Pid: uint32(s.initProcess.Pid()),
}
return empty, nil
}
func (s *Service) Delete(ctx context.Context, r *shimapi.DeleteRequest) (*shimapi.DeleteResponse, error) {
func (s *Service) Delete(ctx context.Context, r *google_protobuf.Empty) (*shimapi.DeleteResponse, error) {
p := s.initProcess
// TODO (@crosbymichael): how to handle errors here
p.Delete(ctx)
@@ -124,7 +124,7 @@ func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessReq
}, nil
}
func (s *Service) Exec(ctx context.Context, r *shimapi.ExecRequest) (*shimapi.ExecResponse, error) {
func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*shimapi.ExecProcessResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
s.execID++
@@ -140,18 +140,18 @@ func (s *Service) Exec(ctx context.Context, r *shimapi.ExecRequest) (*shimapi.Ex
reaper.Default.Register(pid, cmd)
s.processes[pid] = process
s.events <- &task.Event{
Type: task.Event_EXEC_ADDED,
s.events <- &shimapi.Event{
Type: shimapi.Event_EXEC_ADDED,
ID: s.id,
Pid: uint32(pid),
}
go s.waitExit(process, pid, cmd)
return &shimapi.ExecResponse{
return &shimapi.ExecProcessResponse{
Pid: uint32(pid),
}, nil
}
func (s *Service) Pty(ctx context.Context, r *shimapi.PtyRequest) (*google_protobuf.Empty, error) {
func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*google_protobuf.Empty, error) {
if r.Pid == 0 {
return nil, errors.Errorf("pid not provided in request")
}
@@ -171,7 +171,7 @@ func (s *Service) Pty(ctx context.Context, r *shimapi.PtyRequest) (*google_proto
return empty, nil
}
func (s *Service) Events(r *shimapi.EventsRequest, stream shimapi.Shim_EventsServer) error {
func (s *Service) Stream(r *shimapi.StreamEventsRequest, stream shimapi.Shim_StreamServer) error {
s.eventsMu.Lock()
defer s.eventsMu.Unlock()
@@ -195,7 +195,7 @@ func (s *Service) Events(r *shimapi.EventsRequest, stream shimapi.Shim_EventsSer
}
}
func (s *Service) State(ctx context.Context, r *shimapi.StateRequest) (*shimapi.StateResponse, error) {
func (s *Service) State(ctx context.Context, r *google_protobuf.Empty) (*shimapi.StateResponse, error) {
st, err := s.initProcess.ContainerStatus(ctx)
if err != nil {
return nil, err
@@ -245,28 +245,20 @@ func (s *Service) State(ctx context.Context, r *shimapi.StateRequest) (*shimapi.
return o, nil
}
func (s *Service) Pause(ctx context.Context, r *shimapi.PauseRequest) (*google_protobuf.Empty, error) {
func (s *Service) Pause(ctx context.Context, r *google_protobuf.Empty) (*google_protobuf.Empty, error) {
if err := s.initProcess.Pause(ctx); err != nil {
return nil, err
}
return empty, nil
}
func (s *Service) Resume(ctx context.Context, r *shimapi.ResumeRequest) (*google_protobuf.Empty, error) {
func (s *Service) Resume(ctx context.Context, r *google_protobuf.Empty) (*google_protobuf.Empty, error) {
if err := s.initProcess.Resume(ctx); err != nil {
return nil, err
}
return empty, nil
}
func (s *Service) Exit(ctx context.Context, r *shimapi.ExitRequest) (*google_protobuf.Empty, error) {
// signal ourself to exit
if err := unix.Kill(os.Getpid(), syscall.SIGTERM); err != nil {
return nil, err
}
return empty, nil
}
func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*google_protobuf.Empty, error) {
if r.Pid == 0 {
if err := s.initProcess.Kill(ctx, r.Signal, r.All); err != nil {
@@ -300,7 +292,7 @@ func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*google_pro
return empty, nil
}
func (s *Service) Processes(ctx context.Context, r *shimapi.ProcessesRequest) (*shimapi.ProcessesResponse, error) {
func (s *Service) ListProcesses(ctx context.Context, r *shimapi.ListProcessesRequest) (*shimapi.ListProcessesResponse, error) {
pids, err := s.getContainerPids(ctx, r.ID)
if err != nil {
return nil, err
@@ -311,13 +303,13 @@ func (s *Service) Processes(ctx context.Context, r *shimapi.ProcessesRequest) (*
Pid: pid,
})
}
resp := &shimapi.ProcessesResponse{
resp := &shimapi.ListProcessesResponse{
Processes: ps,
}
return resp, nil
}
func (s *Service) CloseStdin(ctx context.Context, r *shimapi.CloseStdinRequest) (*google_protobuf.Empty, error) {
func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*google_protobuf.Empty, error) {
p, ok := s.processes[int(r.Pid)]
if !ok {
return nil, fmt.Errorf("process does not exist %d", r.Pid)
@@ -328,20 +320,26 @@ func (s *Service) CloseStdin(ctx context.Context, r *shimapi.CloseStdinRequest)
return empty, nil
}
func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointRequest) (*google_protobuf.Empty, error) {
func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) (*google_protobuf.Empty, error) {
if err := s.initProcess.Checkpoint(ctx, r); err != nil {
return nil, err
}
return empty, nil
}
func (s *Service) ShimInfo(ctx context.Context, r *google_protobuf.Empty) (*shimapi.ShimInfoResponse, error) {
return &shimapi.ShimInfoResponse{
ShimPid: uint32(os.Getpid()),
}, nil
}
func (s *Service) waitExit(p process, pid int, cmd *reaper.Cmd) {
status := <-cmd.ExitCh
p.Exited(status)
reaper.Default.Delete(pid)
s.events <- &task.Event{
Type: task.Event_EXIT,
s.events <- &shimapi.Event{
Type: shimapi.Event_EXIT,
ID: s.id,
Pid: uint32(pid),
ExitStatus: uint32(status),

4247
linux/shim/v1/shim.pb.go Normal file

File diff suppressed because it is too large Load Diff

156
linux/shim/v1/shim.proto Normal file
View File

@@ -0,0 +1,156 @@
syntax = "proto3";
package containerd.runtime.linux.shim.v1;
import "google/protobuf/any.proto";
import "google/protobuf/empty.proto";
import "gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "github.com/containerd/containerd/api/types/mount.proto";
import "github.com/containerd/containerd/api/types/task/task.proto";
option go_package = "github.com/containerd/containerd/linux/shim/v1;shim";
// Shim service is launched for each container and is responsible for owning the IO
// for the container and its additional processes. The shim is also the parent of
// each container and allows reattaching to the IO and receiving the exit status
// for the container processes.
service Shim {
// State returns shim and task state information.
rpc State(google.protobuf.Empty) returns (StateResponse);
rpc Create(CreateTaskRequest) returns (CreateTaskResponse);
rpc Start(google.protobuf.Empty) returns (google.protobuf.Empty);
rpc Delete(google.protobuf.Empty) returns (DeleteResponse);
rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse);
rpc ListProcesses(ListProcessesRequest) returns (ListProcessesResponse);
rpc Pause(google.protobuf.Empty) returns (google.protobuf.Empty);
rpc Resume(google.protobuf.Empty) returns (google.protobuf.Empty);
rpc Checkpoint(CheckpointTaskRequest) returns (google.protobuf.Empty);
rpc Stream(StreamEventsRequest) returns (stream Event);
rpc Kill(KillRequest) returns (google.protobuf.Empty);
rpc Exec(ExecProcessRequest) returns (ExecProcessResponse);
rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty);
rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty);
// ShimInfo returns information about the shim.
rpc ShimInfo(google.protobuf.Empty) returns (ShimInfoResponse);
}
message CreateTaskRequest {
string id = 1;
string bundle = 2;
string runtime = 3;
repeated containerd.types.Mount rootfs = 4;
bool terminal = 5;
string stdin = 6;
string stdout = 7;
string stderr = 8;
string checkpoint = 9;
string parent_checkpoint = 10;
}
message CreateTaskResponse {
uint32 pid = 1;
}
message DeleteResponse {
uint32 pid = 1;
uint32 exit_status = 2;
google.protobuf.Timestamp exited_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message DeleteProcessRequest {
uint32 pid = 1;
}
message ExecProcessRequest {
bool terminal = 1;
string stdin = 2;
string stdout = 3;
string stderr = 4;
google.protobuf.Any spec = 5;
}
message ExecProcessResponse {
uint32 pid = 1;
}
message ResizePtyRequest {
uint32 pid = 1;
uint32 width = 2;
uint32 height = 3;
}
message StateResponse {
string id = 1;
string bundle = 2;
uint32 pid = 3;
containerd.v1.types.Status status = 4;
repeated containerd.v1.types.Process processes = 5;
string stdin = 6;
string stdout = 7;
string stderr = 8;
bool terminal = 9;
}
message KillRequest {
uint32 signal = 1;
bool all = 2;
uint32 pid = 3;
}
message CloseIORequest {
uint32 pid = 1;
bool stdin = 2;
}
message ListProcessesRequest {
string id = 1;
}
message ListProcessesResponse{
repeated containerd.v1.types.Process processes = 1;
}
message CheckpointTaskRequest {
string path = 1;
map<string, string> options = 2;
}
message ShimInfoResponse {
uint32 shim_pid = 1;
}
message StreamEventsRequest {
}
message Event {
string id = 1;
enum EventType {
EXIT = 0;
OOM = 1;
CREATE = 2;
START = 3;
EXEC_ADDED = 4;
PAUSED = 5;
}
EventType type = 2;
uint32 pid = 3;
uint32 exit_status = 4;
google.protobuf.Timestamp exited_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}

View File

@@ -7,8 +7,9 @@ import (
"google.golang.org/grpc"
"github.com/containerd/containerd/api/services/shim/v1"
"github.com/containerd/containerd/api/types/task"
client "github.com/containerd/containerd/linux/shim"
shim "github.com/containerd/containerd/linux/shim/v1"
"github.com/containerd/containerd/plugin"
protobuf "github.com/gogo/protobuf/types"
specs "github.com/opencontainers/runtime-spec/specs-go"
@@ -18,11 +19,11 @@ import (
type Task struct {
containerID string
spec []byte
shim shim.ShimClient
shim *client.Client
namespace string
}
func newTask(id, namespace string, spec []byte, shim shim.ShimClient) *Task {
func newTask(id, namespace string, spec []byte, shim *client.Client) *Task {
return &Task{
containerID: id,
shim: shim,
@@ -42,7 +43,7 @@ func (t *Task) Info() plugin.TaskInfo {
}
func (t *Task) Start(ctx context.Context) error {
_, err := t.shim.Start(ctx, &shim.StartRequest{})
_, err := t.shim.Start(ctx, empty)
if err != nil {
err = errors.New(grpc.ErrorDesc(err))
}
@@ -50,7 +51,7 @@ func (t *Task) Start(ctx context.Context) error {
}
func (t *Task) State(ctx context.Context) (plugin.State, error) {
response, err := t.shim.State(ctx, &shim.StateRequest{})
response, err := t.shim.State(ctx, empty)
if err != nil {
return plugin.State{}, errors.New(grpc.ErrorDesc(err))
}
@@ -77,7 +78,7 @@ func (t *Task) State(ctx context.Context) (plugin.State, error) {
}
func (t *Task) Pause(ctx context.Context) error {
_, err := t.shim.Pause(ctx, &shim.PauseRequest{})
_, err := t.shim.Pause(ctx, empty)
if err != nil {
err = errors.New(grpc.ErrorDesc(err))
}
@@ -85,7 +86,7 @@ func (t *Task) Pause(ctx context.Context) error {
}
func (t *Task) Resume(ctx context.Context) error {
_, err := t.shim.Resume(ctx, &shim.ResumeRequest{})
_, err := t.shim.Resume(ctx, empty)
if err != nil {
err = errors.New(grpc.ErrorDesc(err))
}
@@ -105,7 +106,7 @@ func (t *Task) Kill(ctx context.Context, signal uint32, pid uint32, all bool) er
}
func (t *Task) Exec(ctx context.Context, opts plugin.ExecOpts) (plugin.Process, error) {
request := &shim.ExecRequest{
request := &shim.ExecProcessRequest{
Stdin: opts.IO.Stdin,
Stdout: opts.IO.Stdout,
Stderr: opts.IO.Stderr,
@@ -127,7 +128,7 @@ func (t *Task) Exec(ctx context.Context, opts plugin.ExecOpts) (plugin.Process,
}
func (t *Task) Processes(ctx context.Context) ([]uint32, error) {
resp, err := t.shim.Processes(ctx, &shim.ProcessesRequest{
resp, err := t.shim.ListProcesses(ctx, &shim.ListProcessesRequest{
ID: t.containerID,
})
if err != nil {
@@ -143,8 +144,8 @@ func (t *Task) Processes(ctx context.Context) ([]uint32, error) {
return pids, nil
}
func (t *Task) Pty(ctx context.Context, pid uint32, size plugin.ConsoleSize) error {
_, err := t.shim.Pty(ctx, &shim.PtyRequest{
func (t *Task) ResizePty(ctx context.Context, pid uint32, size plugin.ConsoleSize) error {
_, err := t.shim.ResizePty(ctx, &shim.ResizePtyRequest{
Pid: pid,
Width: size.Width,
Height: size.Height,
@@ -155,9 +156,10 @@ func (t *Task) Pty(ctx context.Context, pid uint32, size plugin.ConsoleSize) err
return err
}
func (t *Task) CloseStdin(ctx context.Context, pid uint32) error {
_, err := t.shim.CloseStdin(ctx, &shim.CloseStdinRequest{
Pid: pid,
func (t *Task) CloseIO(ctx context.Context, pid uint32) error {
_, err := t.shim.CloseIO(ctx, &shim.CloseIORequest{
Pid: pid,
Stdin: true,
})
if err != nil {
err = errors.New(grpc.ErrorDesc(err))
@@ -166,7 +168,7 @@ func (t *Task) CloseStdin(ctx context.Context, pid uint32) error {
}
func (t *Task) Checkpoint(ctx context.Context, path string, options map[string]string) error {
r := &shim.CheckpointRequest{
r := &shim.CheckpointTaskRequest{
Path: path,
Options: options,
}