Comment more packages to pass go lint

Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
This commit is contained in:
Michael Crosby 2017-10-02 11:50:18 -04:00
parent 33e974ce99
commit 451421b615
54 changed files with 255 additions and 121 deletions

View File

@ -300,7 +300,7 @@ func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor,
m.Lock()
manifestStack = append(manifestStack, desc)
m.Unlock()
return nil, images.StopHandler
return nil, images.ErrStopHandler
default:
return nil, nil
}

View File

@ -36,7 +36,7 @@ type store struct {
root string
}
// NewServer returns a local content store
// NewStore returns a local content store
func NewStore(root string) (content.Store, error) {
if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil && !os.IsExist(err) {
return nil, err
@ -383,8 +383,8 @@ func (s *store) Abort(ctx context.Context, ref string) error {
return nil
}
func (cs *store) blobPath(dgst digest.Digest) string {
return filepath.Join(cs.root, "blobs", dgst.Algorithm().String(), dgst.Hex())
func (s *store) blobPath(dgst digest.Digest) string {
return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex())
}
func (s *store) ingestRoot(ref string) string {

View File

@ -137,7 +137,7 @@ We get back a list of mounts from `Snapshotter.Prepare`, with the `key`
identifying the active snapshot. Mount this to the temporary location with the
following:
if err := MountAll(mounts, tmpDir); err != nil { ... }
if err := mount.All(mounts, tmpDir); err != nil { ... }
Once the mounts are performed, our temporary location is ready to capture
a diff. In practice, this works similar to a filesystem transaction. The

View File

@ -25,7 +25,7 @@ func init() {
plugin.Register(&plugin.Registration{
Type: plugin.DiffPlugin,
ID: "walking",
Requires: []plugin.PluginType{
Requires: []plugin.Type{
plugin.ContentPlugin,
plugin.MetadataPlugin,
},
@ -81,7 +81,7 @@ func (s *walkingDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts
}
defer os.RemoveAll(dir)
if err := mount.MountAll(mounts, dir); err != nil {
if err := mount.All(mounts, dir); err != nil {
return emptyDesc, errors.Wrap(err, "failed to mount")
}
defer mount.Unmount(dir, 0)
@ -149,12 +149,12 @@ func (s *walkingDiff) DiffMounts(ctx context.Context, lower, upper []mount.Mount
}
defer os.RemoveAll(bDir)
if err := mount.MountAll(lower, aDir); err != nil {
if err := mount.All(lower, aDir); err != nil {
return emptyDesc, errors.Wrap(err, "failed to mount")
}
defer mount.Unmount(aDir, 0)
if err := mount.MountAll(upper, bDir); err != nil {
if err := mount.All(upper, bDir); err != nil {
return emptyDesc, errors.Wrap(err, "failed to mount")
}
defer mount.Unmount(bDir, 0)

View File

@ -14,37 +14,40 @@ import (
)
var (
// SkipDesc is used to skip processing of a descriptor and
// ErrSkipDesc is used to skip processing of a descriptor and
// its descendants.
SkipDesc = fmt.Errorf("skip descriptor")
ErrSkipDesc = fmt.Errorf("skip descriptor")
// StopHandler is used to signify that the descriptor
// ErrStopHandler is used to signify that the descriptor
// has been handled and should not be handled further.
// This applies only to a single descriptor in a handler
// chain and does not apply to descendant descriptors.
StopHandler = fmt.Errorf("stop handler")
ErrStopHandler = fmt.Errorf("stop handler")
)
// Handler handles image manifests
type Handler interface {
Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error)
}
// HandlerFunc function implementing the Handler interface
type HandlerFunc func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error)
// Handle image manifests
func (fn HandlerFunc) Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
return fn(ctx, desc)
}
// Handlers returns a handler that will run the handlers in sequence.
//
// A handler may return `StopHandler` to stop calling additional handlers
// A handler may return `ErrStopHandler` to stop calling additional handlers
func Handlers(handlers ...Handler) HandlerFunc {
return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
var children []ocispec.Descriptor
for _, handler := range handlers {
ch, err := handler.Handle(ctx, desc)
if err != nil {
if errors.Cause(err) == StopHandler {
if errors.Cause(err) == ErrStopHandler {
break
}
return nil, err
@ -67,7 +70,7 @@ func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) err
children, err := handler.Handle(ctx, desc)
if err != nil {
if errors.Cause(err) == SkipDesc {
if errors.Cause(err) == ErrSkipDesc {
continue // don't traverse the children.
}
return err
@ -87,7 +90,7 @@ func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) err
// If the handler decode subresources, they will be visited, as well.
//
// Handlers for siblings are run in parallel on the provided descriptors. A
// handler may return `SkipDesc` to signal to the dispatcher to not traverse
// handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse
// any children.
//
// Typically, this function will be used with `FetchHandler`, often composed
@ -104,7 +107,7 @@ func Dispatch(ctx context.Context, handler Handler, descs ...ocispec.Descriptor)
children, err := handler.Handle(ctx, desc)
if err != nil {
if errors.Cause(err) == SkipDesc {
if errors.Cause(err) == ErrSkipDesc {
return nil // don't traverse the children.
}
return err

View File

@ -21,6 +21,7 @@ type Image struct {
CreatedAt, UpdatedAt time.Time
}
// Store and interact with images
type Store interface {
Get(ctx context.Context, name string) (Image, error)
List(ctx context.Context, filters ...string) ([]Image, error)
@ -69,6 +70,7 @@ func (image *Image) Size(ctx context.Context, provider content.Provider, platfor
}), ChildrenHandler(provider, platform)), image.Target)
}
// Manifest returns the manifest for an image.
func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform string) (ocispec.Manifest, error) {
var (
matcher platforms.Matcher
@ -177,7 +179,7 @@ func Platforms(ctx context.Context, provider content.Provider, image ocispec.Des
return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
if desc.Platform != nil {
platformSpecs = append(platformSpecs, *desc.Platform)
return nil, SkipDesc
return nil, ErrSkipDesc
}
switch desc.MediaType {

View File

@ -9,15 +9,13 @@ const (
maxSize = 4096
)
// Validate a label's key and value are under 4096 bytes
func Validate(k, v string) error {
// A label key and value should be under 4096 bytes
if (len(k) + len(v)) > maxSize {
if len(k) > 10 {
k = k[:10]
}
return errors.Wrapf(errdefs.ErrInvalidArgument, "label key and value greater than maximum size (%d bytes), key: %s", maxSize, k)
}
return nil
}

View File

@ -15,6 +15,7 @@ import (
"github.com/pkg/errors"
)
// loadBundle loads an existing bundle from disk
func loadBundle(id, path, workdir string) *bundle {
return &bundle{
id: id,
@ -71,6 +72,7 @@ type bundle struct {
type shimOpt func(*bundle, string, *runcopts.RuncOptions) (client.Config, client.ClientOpt)
// ShimRemote is a shimOpt for connecting and starting a remote shim
func ShimRemote(shim, daemonAddress, cgroup string, debug bool, exitHandler func()) shimOpt {
return func(b *bundle, ns string, ropts *runcopts.RuncOptions) (client.Config, client.ClientOpt) {
return b.shimConfig(ns, ropts),
@ -78,12 +80,14 @@ func ShimRemote(shim, daemonAddress, cgroup string, debug bool, exitHandler func
}
}
// ShimLocal is a shimOpt for using an in process shim implementation
func ShimLocal(exchange *events.Exchange) shimOpt {
return func(b *bundle, ns string, ropts *runcopts.RuncOptions) (client.Config, client.ClientOpt) {
return b.shimConfig(ns, ropts), client.WithLocal(exchange)
}
}
// ShimConnect is a shimOpt for connecting to an existing remote shim
func ShimConnect() shimOpt {
return func(b *bundle, ns string, ropts *runcopts.RuncOptions) (client.Config, client.ClientOpt) {
return b.shimConfig(ns, ropts), client.WithConnect(b.shimAddress(ns))

View File

@ -11,15 +11,20 @@ import (
"github.com/containerd/containerd/runtime"
)
// Process implements a linux process
type Process struct {
id string
t *Task
}
// ID of the process
func (p *Process) ID() string {
return p.id
}
// Kill sends the provided signal to the underlying process
//
// Unable to kill all processes in the task using this method on a process
func (p *Process) Kill(ctx context.Context, signal uint32, _ bool) error {
_, err := p.t.shim.Kill(ctx, &shim.KillRequest{
Signal: signal,
@ -31,6 +36,7 @@ func (p *Process) Kill(ctx context.Context, signal uint32, _ bool) error {
return err
}
// State of process
func (p *Process) State(ctx context.Context) (runtime.State, error) {
// use the container status for the status of the process
response, err := p.t.shim.State(ctx, &shim.StateRequest{
@ -63,6 +69,7 @@ func (p *Process) State(ctx context.Context) (runtime.State, error) {
}, nil
}
// ResizePty changes the side of the process's PTY to the provided width and height
func (p *Process) ResizePty(ctx context.Context, size runtime.ConsoleSize) error {
_, err := p.t.shim.ResizePty(ctx, &shim.ResizePtyRequest{
ID: p.id,
@ -75,6 +82,7 @@ func (p *Process) ResizePty(ctx context.Context, size runtime.ConsoleSize) error
return err
}
// CloseIO closes the provided IO pipe for the process
func (p *Process) CloseIO(ctx context.Context) error {
_, err := p.t.shim.CloseIO(ctx, &shim.CloseIORequest{
ID: p.id,
@ -86,6 +94,7 @@ func (p *Process) CloseIO(ctx context.Context) error {
return nil
}
// Start the process
func (p *Process) Start(ctx context.Context) error {
_, err := p.t.shim.Start(ctx, &shim.StartRequest{
ID: p.id,
@ -96,6 +105,7 @@ func (p *Process) Start(ctx context.Context) error {
return nil
}
// Wait on the process to exit and return the exit status and timestamp
func (p *Process) Wait(ctx context.Context) (*runtime.Exit, error) {
r, err := p.t.shim.Wait(ctx, &shim.WaitRequest{
ID: p.id,

View File

@ -52,7 +52,7 @@ func init() {
Type: plugin.RuntimePlugin,
ID: "linux",
Init: New,
Requires: []plugin.PluginType{
Requires: []plugin.Type{
plugin.TaskMonitorPlugin,
plugin.MetadataPlugin,
},
@ -65,6 +65,7 @@ func init() {
var _ = (runtime.Runtime)(&Runtime{})
// Config options for the runtime
type Config struct {
// Shim is a path or name of binary implementing the Shim GRPC API
Shim string `toml:"shim"`
@ -78,6 +79,7 @@ type Config struct {
ShimDebug bool `toml:"shim_debug"`
}
// New returns a configured runtime
func New(ic *plugin.InitContext) (interface{}, error) {
if err := os.MkdirAll(ic.Root, 0711); err != nil {
return nil, err
@ -117,6 +119,7 @@ func New(ic *plugin.InitContext) (interface{}, error) {
return r, nil
}
// Runtime for a linux based system
type Runtime struct {
root string
state string
@ -130,10 +133,12 @@ type Runtime struct {
config *Config
}
// ID of the runtime
func (r *Runtime) ID() string {
return pluginID
}
// Create a new task
func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts) (_ runtime.Task, err error) {
namespace, err := namespaces.NamespaceRequired(ctx)
if err != nil {
@ -265,6 +270,7 @@ func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts
return t, nil
}
// Delete a task removing all on disk state
func (r *Runtime) Delete(ctx context.Context, c runtime.Task) (*runtime.Exit, error) {
namespace, err := namespaces.NamespaceRequired(ctx)
if err != nil {
@ -305,6 +311,7 @@ func (r *Runtime) Delete(ctx context.Context, c runtime.Task) (*runtime.Exit, er
}, nil
}
// Tasks returns all tasks known to the runtime
func (r *Runtime) Tasks(ctx context.Context) ([]runtime.Task, error) {
return r.tasks.GetAll(ctx)
}
@ -330,6 +337,7 @@ func (r *Runtime) restoreTasks(ctx context.Context) ([]*Task, error) {
return o, nil
}
// Get a specific task by task id
func (r *Runtime) Get(ctx context.Context, id string) (runtime.Task, error) {
return r.tasks.Get(ctx, id)
}
@ -491,6 +499,5 @@ func (r *Runtime) getRuncOptions(ctx context.Context, id string) (*runcopts.Runc
return ropts, nil
}
return nil, nil
}

View File

@ -27,6 +27,7 @@ import (
"google.golang.org/grpc"
)
// ClientOpt is an option for a shim client configuration
type ClientOpt func(context.Context, Config) (shim.ShimClient, io.Closer, error)
// WithStart executes a new shim process
@ -180,6 +181,7 @@ func WithLocal(publisher events.Publisher) func(context.Context, Config) (shim.S
}
}
// Config contains shim specific configuration
type Config struct {
Path string
Namespace string
@ -202,6 +204,7 @@ func New(ctx context.Context, config Config, opt ClientOpt) (*Client, error) {
}, nil
}
// Client is a shim client containing the connection to a shim
type Client struct {
shim.ShimClient
@ -233,6 +236,7 @@ func (c *Client) KillShim(ctx context.Context) error {
return c.signalShim(ctx, unix.SIGKILL)
}
// Close the cient connection
func (c *Client) Close() error {
if c.c == nil {
return nil

View File

@ -27,6 +27,7 @@ import (
"github.com/pkg/errors"
)
// InitPidFile name of the file that contains the init pid
const InitPidFile = "init.pid"
type initProcess struct {

View File

@ -29,6 +29,7 @@ import (
var empty = &google_protobuf.Empty{}
// RuncRoot is the path to the root runc state directory
const RuncRoot = "/run/containerd/runc"
// NewService returns a new shim service that can be used via GRPC
@ -65,6 +66,7 @@ type platform interface {
close() error
}
// Service is the shim implementation of a remote shim over GRPC
type Service struct {
mu sync.Mutex
@ -80,6 +82,7 @@ type Service struct {
bundle string
}
// Create a new initial process and container with the underlying OCI runtime
func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (*shimapi.CreateTaskResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
@ -110,6 +113,7 @@ func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (*sh
}, nil
}
// Start a process
func (s *Service) Start(ctx context.Context, r *shimapi.StartRequest) (*shimapi.StartResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
@ -139,6 +143,7 @@ func (s *Service) Start(ctx context.Context, r *shimapi.StartRequest) (*shimapi.
}, nil
}
// Delete the initial process and container
func (s *Service) Delete(ctx context.Context, r *google_protobuf.Empty) (*shimapi.DeleteResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
@ -165,6 +170,7 @@ func (s *Service) Delete(ctx context.Context, r *google_protobuf.Empty) (*shimap
}, nil
}
// DeleteProcess deletes an exec'd process
func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessRequest) (*shimapi.DeleteResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
@ -186,6 +192,7 @@ func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessReq
}, nil
}
// Exec an additional process inside the container
func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*google_protobuf.Empty, error) {
s.mu.Lock()
defer s.mu.Unlock()
@ -212,6 +219,7 @@ func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*goo
return empty, nil
}
// ResizePty of a process
func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*google_protobuf.Empty, error) {
s.mu.Lock()
defer s.mu.Unlock()
@ -232,6 +240,7 @@ func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*
return empty, nil
}
// State returns runtime state information for a process
func (s *Service) State(ctx context.Context, r *shimapi.StateRequest) (*shimapi.StateResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
@ -271,6 +280,7 @@ func (s *Service) State(ctx context.Context, r *shimapi.StateRequest) (*shimapi.
}, nil
}
// Pause the container
func (s *Service) Pause(ctx context.Context, r *google_protobuf.Empty) (*google_protobuf.Empty, error) {
s.mu.Lock()
defer s.mu.Unlock()
@ -287,6 +297,7 @@ func (s *Service) Pause(ctx context.Context, r *google_protobuf.Empty) (*google_
return empty, nil
}
// Resume the container
func (s *Service) Resume(ctx context.Context, r *google_protobuf.Empty) (*google_protobuf.Empty, error) {
s.mu.Lock()
defer s.mu.Unlock()
@ -303,6 +314,7 @@ func (s *Service) Resume(ctx context.Context, r *google_protobuf.Empty) (*google
return empty, nil
}
// Kill a process with the provided signal
func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*google_protobuf.Empty, error) {
s.mu.Lock()
defer s.mu.Unlock()
@ -327,6 +339,7 @@ func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*google_pro
return empty, nil
}
// ListPids returns all pids inside the container
func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*shimapi.ListPidsResponse, error) {
pids, err := s.getContainerPids(ctx, r.ID)
if err != nil {
@ -337,6 +350,7 @@ func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*sh
}, nil
}
// CloseIO of a process
func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*google_protobuf.Empty, error) {
s.mu.Lock()
defer s.mu.Unlock()
@ -352,6 +366,7 @@ func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*goog
return empty, nil
}
// Checkpoint the container
func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) (*google_protobuf.Empty, error) {
s.mu.Lock()
defer s.mu.Unlock()
@ -368,12 +383,14 @@ func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskReque
return empty, nil
}
// ShimInfo returns shim information such as the shim's pid
func (s *Service) ShimInfo(ctx context.Context, r *google_protobuf.Empty) (*shimapi.ShimInfoResponse, error) {
return &shimapi.ShimInfoResponse{
ShimPid: uint32(os.Getpid()),
}, nil
}
// Update a running container
func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*google_protobuf.Empty, error) {
s.mu.Lock()
defer s.mu.Unlock()
@ -387,6 +404,7 @@ func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*go
return empty, nil
}
// Wait for a process to exit
func (s *Service) Wait(ctx context.Context, r *shimapi.WaitRequest) (*shimapi.WaitResponse, error) {
s.mu.Lock()
p := s.processes[r.ID]

View File

@ -16,6 +16,7 @@ import (
"github.com/gogo/protobuf/types"
)
// Task on a linux based system
type Task struct {
id string
pid int
@ -45,10 +46,12 @@ func newTask(id, namespace string, pid int, shim *client.Client, monitor runtime
}, nil
}
// ID of the task
func (t *Task) ID() string {
return t.id
}
// Info returns task information about the runtime and namespace
func (t *Task) Info() runtime.TaskInfo {
return runtime.TaskInfo{
ID: t.id,
@ -57,6 +60,7 @@ func (t *Task) Info() runtime.TaskInfo {
}
}
// Start the task
func (t *Task) Start(ctx context.Context) error {
hasCgroup := t.cg != nil
r, err := t.shim.Start(ctx, &shim.StartRequest{
@ -79,6 +83,7 @@ func (t *Task) Start(ctx context.Context) error {
return nil
}
// State returns runtime information for the task
func (t *Task) State(ctx context.Context) (runtime.State, error) {
response, err := t.shim.State(ctx, &shim.StateRequest{
ID: t.id,
@ -114,6 +119,7 @@ func (t *Task) State(ctx context.Context) (runtime.State, error) {
}, nil
}
// Pause the task and all processes
func (t *Task) Pause(ctx context.Context) error {
_, err := t.shim.Pause(ctx, empty)
if err != nil {
@ -122,6 +128,7 @@ func (t *Task) Pause(ctx context.Context) error {
return err
}
// Resume the task and all processes
func (t *Task) Resume(ctx context.Context) error {
if _, err := t.shim.Resume(ctx, empty); err != nil {
return errdefs.FromGRPC(err)
@ -129,6 +136,9 @@ func (t *Task) Resume(ctx context.Context) error {
return nil
}
// Kill the task using the provided signal
//
// Optionally send the signal to all processes that are a child of the task
func (t *Task) Kill(ctx context.Context, signal uint32, all bool) error {
if _, err := t.shim.Kill(ctx, &shim.KillRequest{
ID: t.id,
@ -140,6 +150,7 @@ func (t *Task) Kill(ctx context.Context, signal uint32, all bool) error {
return nil
}
// Exec creates a new process inside the task
func (t *Task) Exec(ctx context.Context, id string, opts runtime.ExecOpts) (runtime.Process, error) {
request := &shim.ExecProcessRequest{
ID: id,
@ -158,6 +169,7 @@ func (t *Task) Exec(ctx context.Context, id string, opts runtime.ExecOpts) (runt
}, nil
}
// Pids returns all system level process ids running inside the task
func (t *Task) Pids(ctx context.Context) ([]uint32, error) {
resp, err := t.shim.ListPids(ctx, &shim.ListPidsRequest{
ID: t.id,
@ -168,6 +180,7 @@ func (t *Task) Pids(ctx context.Context) ([]uint32, error) {
return resp.Pids, nil
}
// ResizePty changes the side of the task's PTY to the provided width and height
func (t *Task) ResizePty(ctx context.Context, size runtime.ConsoleSize) error {
_, err := t.shim.ResizePty(ctx, &shim.ResizePtyRequest{
ID: t.id,
@ -180,6 +193,7 @@ func (t *Task) ResizePty(ctx context.Context, size runtime.ConsoleSize) error {
return err
}
// CloseIO closes the provided IO on the task
func (t *Task) CloseIO(ctx context.Context) error {
_, err := t.shim.CloseIO(ctx, &shim.CloseIORequest{
ID: t.id,
@ -191,6 +205,7 @@ func (t *Task) CloseIO(ctx context.Context) error {
return err
}
// Checkpoint creates a system level dump of the task and process information that can be later restored
func (t *Task) Checkpoint(ctx context.Context, path string, options *types.Any) error {
r := &shim.CheckpointTaskRequest{
Path: path,
@ -202,6 +217,7 @@ func (t *Task) Checkpoint(ctx context.Context, path string, options *types.Any)
return nil
}
// DeleteProcess removes the provided process from the task and deletes all on disk state
func (t *Task) DeleteProcess(ctx context.Context, id string) (*runtime.Exit, error) {
r, err := t.shim.DeleteProcess(ctx, &shim.DeleteProcessRequest{
ID: id,
@ -216,6 +232,7 @@ func (t *Task) DeleteProcess(ctx context.Context, id string) (*runtime.Exit, err
}, nil
}
// Update changes runtime information of a running task
func (t *Task) Update(ctx context.Context, resources *types.Any) error {
if _, err := t.shim.Update(ctx, &shim.UpdateTaskRequest{
Resources: resources,
@ -225,6 +242,7 @@ func (t *Task) Update(ctx context.Context, resources *types.Any) error {
return nil
}
// Process returns a specific process inside the task by the process id
func (t *Task) Process(ctx context.Context, id string) (runtime.Process, error) {
// TODO: verify process exists for container
return &Process{
@ -233,6 +251,7 @@ func (t *Task) Process(ctx context.Context, id string) (runtime.Process, error)
}, nil
}
// Metrics returns runtime specific system level metric information for the task
func (t *Task) Metrics(ctx context.Context) (interface{}, error) {
stats, err := t.cg.Stat(cgroups.IgnoreNotExist)
if err != nil {
@ -241,10 +260,12 @@ func (t *Task) Metrics(ctx context.Context) (interface{}, error) {
return stats, nil
}
// Cgroup returns the underlying cgroup for a linux task
func (t *Task) Cgroup() cgroups.Cgroup {
return t.cg
}
// Wait for the task to exit returning the status and timestamp
func (t *Task) Wait(ctx context.Context) (*runtime.Exit, error) {
r, err := t.shim.Wait(ctx, &shim.WaitRequest{
ID: t.id,

View File

@ -22,6 +22,7 @@ type containerStore struct {
tx *bolt.Tx
}
// NewContainerStore returns a Store backed by an underlying bolt DB
func NewContainerStore(tx *bolt.Tx) containers.Store {
return &containerStore{
tx: tx,

View File

@ -417,11 +417,7 @@ func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64,
if err := boltutil.WriteLabels(bkt, base.Labels); err != nil {
return err
}
if err := bkt.Put(bucketKeySize, sizeEncoded); err != nil {
return err
}
return nil
return bkt.Put(bucketKeySize, sizeEncoded)
}
func (nw *namespacedWriter) Status() (content.Status, error) {
@ -497,9 +493,5 @@ func writeInfo(info *content.Info, bkt *bolt.Bucket) error {
return err
}
if err := bkt.Put(bucketKeySize, sizeEncoded); err != nil {
return err
}
return nil
return bkt.Put(bucketKeySize, sizeEncoded)
}

View File

@ -22,6 +22,7 @@ type imageStore struct {
tx *bolt.Tx
}
// NewImageStore returns a store backed by a bolt DB
func NewImageStore(tx *bolt.Tx) images.Store {
return &imageStore{tx: tx}
}
@ -281,7 +282,7 @@ func writeImage(bkt *bolt.Bucket, image *images.Image) error {
func encodeSize(size int64) ([]byte, error) {
var (
buf [binary.MaxVarintLen64]byte
sizeEncoded []byte = buf[:]
sizeEncoded = buf[:]
)
sizeEncoded = sizeEncoded[:binary.PutVarint(sizeEncoded, size)]

View File

@ -14,6 +14,7 @@ type namespaceStore struct {
tx *bolt.Tx
}
// NewNamespaceStore returns a store backed by a bolt DB
func NewNamespaceStore(tx *bolt.Tx) namespaces.Store {
return &namespaceStore{tx: tx}
}

View File

@ -15,6 +15,7 @@ import (
"golang.org/x/net/context"
)
// Config for the cgroups monitor
type Config struct {
NoPrometheus bool `toml:"no_prometheus"`
}
@ -28,14 +29,15 @@ func init() {
})
}
// New returns a new cgroups monitor
func New(ic *plugin.InitContext) (interface{}, error) {
var ns *metrics.Namespace
config := ic.Config.(*Config)
if !config.NoPrometheus {
ns = metrics.NewNamespace("container", "", nil)
}
collector := NewCollector(ns)
oom, err := NewOOMCollector(ns)
collector := newCollector(ns)
oom, err := newOOMCollector(ns)
if err != nil {
return nil, err
}
@ -51,8 +53,8 @@ func New(ic *plugin.InitContext) (interface{}, error) {
}
type cgroupsMonitor struct {
collector *Collector
oom *OOMCollector
collector *collector
oom *oomCollector
context context.Context
publisher events.Publisher
}

View File

@ -14,7 +14,9 @@ import (
)
var (
// ErrAlreadyCollected is returned when a cgroups is already being monitored
ErrAlreadyCollected = errors.New("cgroup is already being collected")
// ErrCgroupNotExists is returns when a cgroup no longer exists
ErrCgroupNotExists = errors.New("cgroup does not exist in the collector")
)
@ -22,14 +24,14 @@ var (
// where the event originated from
type Trigger func(string, string, cgroups.Cgroup)
// New registers the Collector with the provided namespace and returns it so
// newCollector registers the collector with the provided namespace and returns it so
// that cgroups can be added for collection
func NewCollector(ns *metrics.Namespace) *Collector {
func newCollector(ns *metrics.Namespace) *collector {
if ns == nil {
return &Collector{}
return &collector{}
}
// add machine cpus and memory info
c := &Collector{
c := &collector{
ns: ns,
cgroups: make(map[string]*task),
}
@ -52,9 +54,9 @@ func taskID(id, namespace string) string {
return fmt.Sprintf("%s-%s", id, namespace)
}
// Collector provides the ability to collect container stats and export
// collector provides the ability to collect container stats and export
// them in the prometheus format
type Collector struct {
type collector struct {
mu sync.RWMutex
cgroups map[string]*task
@ -62,13 +64,13 @@ type Collector struct {
metrics []*metric
}
func (c *Collector) Describe(ch chan<- *prometheus.Desc) {
func (c *collector) Describe(ch chan<- *prometheus.Desc) {
for _, m := range c.metrics {
ch <- m.desc(c.ns)
}
}
func (c *Collector) Collect(ch chan<- prometheus.Metric) {
func (c *collector) Collect(ch chan<- prometheus.Metric) {
c.mu.RLock()
wg := &sync.WaitGroup{}
for _, t := range c.cgroups {
@ -79,7 +81,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) {
wg.Wait()
}
func (c *Collector) collect(id, namespace string, cg cgroups.Cgroup, ch chan<- prometheus.Metric, wg *sync.WaitGroup) {
func (c *collector) collect(id, namespace string, cg cgroups.Cgroup, ch chan<- prometheus.Metric, wg *sync.WaitGroup) {
defer wg.Done()
stats, err := cg.Stat(cgroups.IgnoreNotExist)
if err != nil {
@ -92,7 +94,7 @@ func (c *Collector) collect(id, namespace string, cg cgroups.Cgroup, ch chan<- p
}
// Add adds the provided cgroup and id so that metrics are collected and exported
func (c *Collector) Add(id, namespace string, cg cgroups.Cgroup) error {
func (c *collector) Add(id, namespace string, cg cgroups.Cgroup) error {
if c.ns == nil {
return nil
}
@ -110,7 +112,7 @@ func (c *Collector) Add(id, namespace string, cg cgroups.Cgroup) error {
}
// Remove removes the provided cgroup by id from the collector
func (c *Collector) Remove(id, namespace string) {
func (c *collector) Remove(id, namespace string) {
if c.ns == nil {
return
}

View File

@ -14,7 +14,7 @@ import (
"github.com/sirupsen/logrus"
)
func NewOOMCollector(ns *metrics.Namespace) (*OOMCollector, error) {
func newOOMCollector(ns *metrics.Namespace) (*oomCollector, error) {
fd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC)
if err != nil {
return nil, err
@ -23,7 +23,7 @@ func NewOOMCollector(ns *metrics.Namespace) (*OOMCollector, error) {
if ns != nil {
desc = ns.NewDesc("memory_oom", "The number of times a container has received an oom event", metrics.Total, "container_id", "namespace")
}
c := &OOMCollector{
c := &oomCollector{
fd: fd,
desc: desc,
set: make(map[uintptr]*oom),
@ -35,7 +35,7 @@ func NewOOMCollector(ns *metrics.Namespace) (*OOMCollector, error) {
return c, nil
}
type OOMCollector struct {
type oomCollector struct {
mu sync.Mutex
desc *prometheus.Desc
@ -51,7 +51,7 @@ type oom struct {
count int64
}
func (o *OOMCollector) Add(id, namespace string, cg cgroups.Cgroup, triggers ...Trigger) error {
func (o *oomCollector) Add(id, namespace string, cg cgroups.Cgroup, triggers ...Trigger) error {
o.mu.Lock()
defer o.mu.Unlock()
fd, err := cg.OOMEventFD()
@ -71,11 +71,11 @@ func (o *OOMCollector) Add(id, namespace string, cg cgroups.Cgroup, triggers ...
return unix.EpollCtl(o.fd, unix.EPOLL_CTL_ADD, int(fd), &event)
}
func (o *OOMCollector) Describe(ch chan<- *prometheus.Desc) {
func (o *oomCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- o.desc
}
func (o *OOMCollector) Collect(ch chan<- prometheus.Metric) {
func (o *oomCollector) Collect(ch chan<- prometheus.Metric) {
o.mu.Lock()
defer o.mu.Unlock()
for _, t := range o.set {
@ -85,11 +85,11 @@ func (o *OOMCollector) Collect(ch chan<- prometheus.Metric) {
}
// Close closes the epoll fd
func (o *OOMCollector) Close() error {
func (o *oomCollector) Close() error {
return unix.Close(int(o.fd))
}
func (o *OOMCollector) start() {
func (o *oomCollector) start() {
var events [128]unix.EpollEvent
for {
n, err := unix.EpollWait(o.fd, events[:], -1)
@ -105,7 +105,7 @@ func (o *OOMCollector) start() {
}
}
func (o *OOMCollector) process(fd uintptr, event uint32) {
func (o *oomCollector) process(fd uintptr, event uint32) {
// make sure to always flush the fd
flush(fd)

View File

@ -13,8 +13,8 @@ type Mount struct {
Options []string
}
// MountAll mounts all the provided mounts to the provided target
func MountAll(mounts []Mount, target string) error {
// All mounts all the provided mounts to the provided target
func All(mounts []Mount, target string) error {
for _, m := range mounts {
if err := m.Mount(target); err != nil {
return err

View File

@ -6,6 +6,7 @@ import (
"golang.org/x/sys/unix"
)
// Mount to the provided target path
func (m *Mount) Mount(target string) error {
flags, data := parseMountOptions(m.Options)
@ -40,6 +41,7 @@ func (m *Mount) Mount(target string) error {
return nil
}
// Unmount the provided mount path with the flags
func Unmount(mount string, flags int) error {
return unix.Unmount(mount, flags)
}

View File

@ -9,7 +9,9 @@ import (
)
const (
// NamespaceEnvVar is the environment variable key name
NamespaceEnvVar = "CONTAINERD_NAMESPACE"
// Default is the name of the default namespace
Default = "default"
)

View File

@ -9,7 +9,8 @@ import (
"github.com/containerd/containerd/log"
)
func NewContext(ctx context.Context, plugins map[PluginType]map[string]interface{}, root, state, id string) *InitContext {
// NewContext returns a new plugin InitContext
func NewContext(ctx context.Context, plugins map[Type]map[string]interface{}, root, state, id string) *InitContext {
return &InitContext{
plugins: plugins,
Root: filepath.Join(root, id),
@ -18,6 +19,7 @@ func NewContext(ctx context.Context, plugins map[PluginType]map[string]interface
}
}
// InitContext is used for plugin inititalization
type InitContext struct {
Root string
State string
@ -26,17 +28,19 @@ type InitContext struct {
Config interface{}
Events *events.Exchange
plugins map[PluginType]map[string]interface{}
plugins map[Type]map[string]interface{}
}
func (i *InitContext) Get(t PluginType) (interface{}, error) {
// Get returns the first plugin by its type
func (i *InitContext) Get(t Type) (interface{}, error) {
for _, v := range i.plugins[t] {
return v, nil
}
return nil, fmt.Errorf("no plugins registered for %s", t)
}
func (i *InitContext) GetAll(t PluginType) (map[string]interface{}, error) {
// GetAll returns all plugins with the specific type
func (i *InitContext) GetAll(t Type) (map[string]interface{}, error) {
p, ok := i.plugins[t]
if !ok {
return nil, fmt.Errorf("no plugins registered for %s", t)

View File

@ -6,6 +6,7 @@ import (
"golang.org/x/net/context"
)
// Differ allows the apply and creation of filesystem diffs between mounts
type Differ interface {
Apply(ctx context.Context, desc ocispec.Descriptor, mount []mount.Mount) (ocispec.Descriptor, error)
DiffMounts(ctx context.Context, lower, upper []mount.Mount, media, ref string) (ocispec.Descriptor, error)

View File

@ -9,49 +9,62 @@ import (
)
var (
ErrNoPluginType = errors.New("plugin: no type")
// ErrNoType is returned when no type is specified
ErrNoType = errors.New("plugin: no type")
// ErrNoPluginID is returned when no id is specified
ErrNoPluginID = errors.New("plugin: no id")
// SkipPlugin is used when a plugin is not initialized and should not be loaded,
// ErrSkipPlugin is used when a plugin is not initialized and should not be loaded,
// this allows the plugin loader differentiate between a plugin which is configured
// not to load and one that fails to load.
SkipPlugin = errors.New("skip plugin")
ErrSkipPlugin = errors.New("skip plugin")
)
// IsSkipPlugin returns true if the error is skipping the plugin
func IsSkipPlugin(err error) bool {
if errors.Cause(err) == SkipPlugin {
if errors.Cause(err) == ErrSkipPlugin {
return true
}
return false
}
type PluginType string
// Type is the type of the plugin
type Type string
const (
RuntimePlugin PluginType = "io.containerd.runtime.v1"
GRPCPlugin PluginType = "io.containerd.grpc.v1"
SnapshotPlugin PluginType = "io.containerd.snapshotter.v1"
TaskMonitorPlugin PluginType = "io.containerd.monitor.v1"
DiffPlugin PluginType = "io.containerd.differ.v1"
MetadataPlugin PluginType = "io.containerd.metadata.v1"
ContentPlugin PluginType = "io.containerd.content.v1"
// RuntimePlugin implements a runtime
RuntimePlugin Type = "io.containerd.runtime.v1"
// GRPCPlugin implements a grpc service
GRPCPlugin Type = "io.containerd.grpc.v1"
// SnapshotPlugin implements a snapshotter
SnapshotPlugin Type = "io.containerd.snapshotter.v1"
// TaskMonitorPlugin implements a task monitor
TaskMonitorPlugin Type = "io.containerd.monitor.v1"
// DiffPlugin implements a differ
DiffPlugin Type = "io.containerd.differ.v1"
// MetadataPlugin implements a metadata store
MetadataPlugin Type = "io.containerd.metadata.v1"
// ContentPlugin implements a content store
ContentPlugin Type = "io.containerd.content.v1"
)
// Registration contains information for registering a plugin
type Registration struct {
Type PluginType
Type Type
ID string
Config interface{}
Requires []PluginType
Requires []Type
Init func(*InitContext) (interface{}, error)
added bool
}
// URI returns the full plugin URI
func (r *Registration) URI() string {
return fmt.Sprintf("%s.%s", r.Type, r.ID)
}
// Service allows GRPC services to be registered with the underlying server
type Service interface {
Register(*grpc.Server) error
}
@ -75,11 +88,12 @@ func Load(path string) (err error) {
return loadPlugins(path)
}
// Register allows plugins to register
func Register(r *Registration) {
register.Lock()
defer register.Unlock()
if r.Type == "" {
panic(ErrNoPluginType)
panic(ErrNoType)
}
if r.ID == "" {
panic(ErrNoPluginID)
@ -87,6 +101,7 @@ func Register(r *Registration) {
register.r = append(register.r, r)
}
// Graph returns an ordered list of registered plugins for initialization
func Graph() (ordered []*Registration) {
for _, r := range register.r {
children(r.Requires, &ordered)
@ -98,7 +113,7 @@ func Graph() (ordered []*Registration) {
return ordered
}
func children(types []PluginType, ordered *[]*Registration) {
func children(types []Type, ordered *[]*Registration) {
for _, t := range types {
for _, r := range register.r {
if r.Type == t {

View File

@ -16,6 +16,7 @@ type Bar float64
var _ fmt.Formatter = Bar(1.0)
// Format the progress bar as output
func (h Bar) Format(state fmt.State, r rune) {
switch r {
case 'r':

View File

@ -10,16 +10,20 @@ import (
// Bytes converts a regular int64 to human readable type.
type Bytes int64
// String returns the string representation of bytes
func (b Bytes) String() string {
return units.CustomSize("%02.1f %s", float64(b), 1024.0, []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"})
}
// BytesPerSecond is the rate in seconds for byte operations
type BytesPerSecond int64
// NewBytesPerSecond returns the rate that n bytes were written in the provided duration
func NewBytesPerSecond(n int64, duration time.Duration) BytesPerSecond {
return BytesPerSecond(float64(n) / duration.Seconds())
}
// String returns the string representation of the rate
func (bps BytesPerSecond) String() string {
return fmt.Sprintf("%v/s", Bytes(bps))
}

View File

@ -16,12 +16,14 @@ type Writer struct {
lines int
}
// NewWriter returns a writer
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
}
}
// Write the provided bytes
func (w *Writer) Write(p []byte) (n int, err error) {
return w.buf.Write(p)
}

View File

@ -5,6 +5,7 @@ import (
"github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
)
// FieldpathEnabled returns true if E_Fieldpath is enabled
func FieldpathEnabled(file *descriptor.FileDescriptorProto, message *descriptor.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Fieldpath, proto.GetBoolExtension(file.Options, E_FieldpathAll, false))
}

View File

@ -12,6 +12,7 @@ import (
"github.com/pkg/errors"
)
// ErrNoSuchProcess is returned when the process no longer exists
var ErrNoSuchProcess = errors.New("no such process")
const bufferSize = 2048
@ -36,10 +37,12 @@ func Reap() error {
return err
}
// Default is the default monitor initialized for the package
var Default = &Monitor{
subscribers: make(map[chan runc.Exit]struct{}),
}
// Monitor monitors the underlying system for process status changes
type Monitor struct {
sync.Mutex
@ -73,6 +76,7 @@ func (m *Monitor) Wait(c *exec.Cmd, ec chan runc.Exit) (int, error) {
return -1, ErrNoSuchProcess
}
// Subscribe to process exit changes
func (m *Monitor) Subscribe() chan runc.Exit {
c := make(chan runc.Exit, bufferSize)
m.Lock()
@ -81,6 +85,7 @@ func (m *Monitor) Subscribe() chan runc.Exit {
return c
}
// Unsubscribe to process exit changes
func (m *Monitor) Unsubscribe(c chan runc.Exit) {
m.Lock()
delete(m.subscribers, c)

View File

@ -12,8 +12,11 @@ import (
)
var (
// ErrInvalid is returned when there is an invalid reference
ErrInvalid = errors.New("invalid reference")
// ErrObjectRequired is returned when the object is required
ErrObjectRequired = errors.New("object required")
// ErrHostnameRequired is returned when the hostname is required
ErrHostnameRequired = errors.New("hostname required")
)
@ -138,7 +141,6 @@ func SplitObject(obj string) (tag string, dgst digest.Digest) {
parts := strings.SplitAfterN(obj, "@", 2)
if len(parts) < 2 {
return parts[0], ""
} else {
}
return parts[0], digest.Digest(parts[1])
}
}

View File

@ -47,6 +47,7 @@ type Converter struct {
layerBlobs map[digest.Digest]ocispec.Descriptor
}
// NewConverter returns a new converter
func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) *Converter {
return &Converter{
contentStore: contentStore,
@ -56,6 +57,7 @@ func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) *Converte
}
}
// Handle fetching descriptors for a docker media type
func (c *Converter) Handle(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
switch desc.MediaType {
case images.MediaTypeDockerSchema1Manifest:
@ -101,6 +103,7 @@ func (c *Converter) Handle(ctx context.Context, desc ocispec.Descriptor) ([]ocis
}
}
// Convert a docker manifest to an OCI descriptor
func (c *Converter) Convert(ctx context.Context) (ocispec.Descriptor, error) {
history, diffIDs, err := c.schema1ManifestHistory()
if err != nil {

View File

@ -8,6 +8,7 @@ import (
"github.com/pkg/errors"
)
// Status of a content operation
type Status struct {
content.Status
@ -15,6 +16,7 @@ type Status struct {
UploadUUID string
}
// StatusTracker to track status of operations
type StatusTracker interface {
GetStatus(string) (Status, error)
SetStatus(string, Status)
@ -25,6 +27,7 @@ type memoryStatusTracker struct {
m sync.Mutex
}
// NewInMemoryTracker returns a StatusTracker that tracks content status in-memory
func NewInMemoryTracker() StatusTracker {
return &memoryStatusTracker{
statuses: map[string]Status{},

View File

@ -32,11 +32,13 @@ type Resolver interface {
Pusher(ctx context.Context, ref string) (Pusher, error)
}
// Fetcher fetches content
type Fetcher interface {
// Fetch the resource identified by the descriptor.
Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
}
// Pusher pushes content
type Pusher interface {
// Push returns a content writer for the given resource identified
// by the descriptor.
@ -47,6 +49,7 @@ type Pusher interface {
// function.
type FetcherFunc func(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
// Fetch content
func (fn FetcherFunc) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
return fn(ctx, desc)
}
@ -55,6 +58,7 @@ func (fn FetcherFunc) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.Re
// function.
type PusherFunc func(ctx context.Context, desc ocispec.Descriptor, r io.Reader) error
func (fn PusherFunc) Pusher(ctx context.Context, desc ocispec.Descriptor, r io.Reader) error {
// Push content
func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor, r io.Reader) error {
return fn(ctx, desc, r)
}

View File

@ -19,11 +19,13 @@ var (
type initializerFunc func(string) error
// Mounter handles mount and unmount
type Mounter interface {
Mount(target string, mounts ...mount.Mount) error
Unmount(target string) error
}
// InitRootFS initializes the snapshot for use as a rootfs
func InitRootFS(ctx context.Context, name string, parent digest.Digest, readonly bool, snapshotter snapshot.Snapshotter, mounter Mounter) ([]mount.Mount, error) {
_, err := snapshotter.Stat(ctx, name)
if err == nil {

View File

@ -1,15 +1,26 @@
package runtime
const (
// TaskCreateEventTopic for task create
TaskCreateEventTopic = "/tasks/create"
// TaskStartEventTopic for task start
TaskStartEventTopic = "/tasks/start"
// TaskOOMEventTopic for task oom
TaskOOMEventTopic = "/tasks/oom"
// TaskExitEventTopic for task exit
TaskExitEventTopic = "/tasks/exit"
// TaskDeleteEventTopic for task delete
TaskDeleteEventTopic = "/tasks/delete"
// TaskExecAddedEventTopic for task exec create
TaskExecAddedEventTopic = "/tasks/exec-added"
// TaskExecStartedEventTopic for task exec start
TaskExecStartedEventTopic = "/tasks/exec-started"
// TaskPausedEventTopic for task pause
TaskPausedEventTopic = "/tasks/paused"
// TaskResumedEventTopic for task resume
TaskResumedEventTopic = "/tasks/resumed"
// TaskCheckpointedEventTopic for task checkpoint
TaskCheckpointedEventTopic = "/tasks/checkpointed"
// TaskUnknownTopic for unknown task events
TaskUnknownTopic = "/tasks/?"
)

View File

@ -8,12 +8,14 @@ type TaskMonitor interface {
Stop(Task) error
}
// NewMultiTaskMonitor returns a new TaskMonitor broadcasting to the provided monitors
func NewMultiTaskMonitor(monitors ...TaskMonitor) TaskMonitor {
return &multiTaskMonitor{
monitors: monitors,
}
}
// NewNoopMonitor is a task monitor that does nothing
func NewNoopMonitor() TaskMonitor {
return &noopTaskMonitor{}
}

View File

@ -8,6 +8,7 @@ import (
"github.com/gogo/protobuf/types"
)
// IO holds process IO information
type IO struct {
Stdin string
Stdout string
@ -15,6 +16,7 @@ type IO struct {
Terminal bool
}
// CreateOpts contains task creation data
type CreateOpts struct {
// Spec is the OCI runtime spec
Spec *types.Any
@ -28,6 +30,7 @@ type CreateOpts struct {
Options *types.Any
}
// Exit information for a process
type Exit struct {
Pid uint32
Status uint32

View File

@ -63,7 +63,7 @@ func New(ctx context.Context, config *Config) (*Server, error) {
rpc: rpc,
events: events.NewExchange(),
}
initialized = make(map[plugin.PluginType]map[string]interface{})
initialized = make(map[plugin.Type]map[string]interface{})
)
for _, p := range plugins {
id := p.URI()

View File

@ -20,7 +20,7 @@ func init() {
plugin.Register(&plugin.Registration{
Type: plugin.GRPCPlugin,
ID: "containers",
Requires: []plugin.PluginType{
Requires: []plugin.Type{
plugin.MetadataPlugin,
},
Init: func(ic *plugin.InitContext) (interface{}, error) {

View File

@ -39,7 +39,7 @@ func init() {
plugin.Register(&plugin.Registration{
Type: plugin.GRPCPlugin,
ID: "content",
Requires: []plugin.PluginType{
Requires: []plugin.Type{
plugin.ContentPlugin,
plugin.MetadataPlugin,
},

View File

@ -26,7 +26,7 @@ func init() {
plugin.Register(&plugin.Registration{
Type: plugin.GRPCPlugin,
ID: "diff",
Requires: []plugin.PluginType{
Requires: []plugin.Type{
plugin.DiffPlugin,
},
Config: &config{

View File

@ -20,7 +20,7 @@ func init() {
plugin.Register(&plugin.Registration{
Type: plugin.GRPCPlugin,
ID: "images",
Requires: []plugin.PluginType{
Requires: []plugin.Type{
plugin.MetadataPlugin,
},
Init: func(ic *plugin.InitContext) (interface{}, error) {

View File

@ -21,7 +21,7 @@ func init() {
plugin.Register(&plugin.Registration{
Type: plugin.GRPCPlugin,
ID: "namespaces",
Requires: []plugin.PluginType{
Requires: []plugin.Type{
plugin.MetadataPlugin,
},
Init: func(ic *plugin.InitContext) (interface{}, error) {

View File

@ -24,7 +24,7 @@ func init() {
plugin.Register(&plugin.Registration{
Type: plugin.GRPCPlugin,
ID: "snapshots",
Requires: []plugin.PluginType{
Requires: []plugin.Type{
plugin.SnapshotPlugin,
plugin.MetadataPlugin,
},

View File

@ -41,7 +41,7 @@ func init() {
plugin.Register(&plugin.Registration{
Type: plugin.GRPCPlugin,
ID: "tasks",
Requires: []plugin.PluginType{
Requires: []plugin.Type{
plugin.RuntimePlugin,
plugin.MetadataPlugin,
plugin.ContentPlugin,

View File

@ -108,7 +108,7 @@ func TestBtrfsMounts(t *testing.T) {
if err := os.MkdirAll(target, 0755); err != nil {
t.Fatal(err)
}
if err := mount.MountAll(mounts, target); err != nil {
if err := mount.All(mounts, target); err != nil {
t.Fatal(err)
}
defer testutil.Unmount(t, target)
@ -138,7 +138,7 @@ func TestBtrfsMounts(t *testing.T) {
t.Fatal(err)
}
if err := mount.MountAll(mounts, target); err != nil {
if err := mount.All(mounts, target); err != nil {
t.Fatal(err)
}
defer testutil.Unmount(t, target)

View File

@ -221,7 +221,7 @@ func TestOverlayOverlayRead(t *testing.T) {
t.Error(err)
return
}
if err := mount.MountAll(mounts, dest); err != nil {
if err := mount.All(mounts, dest); err != nil {
t.Error(err)
return
}

View File

@ -146,7 +146,7 @@ func (u *Usage) Add(other Usage) {
// the active snapshot. Mount this to the temporary location with the
// following:
//
// if err := containerd.MountAll(mounts, tmpDir); err != nil { ... }
// if err := mount.All(mounts, tmpDir); err != nil { ... }
//
// Once the mounts are performed, our temporary location is ready to capture
// a diff. In practice, this works similar to a filesystem transaction. The

View File

@ -20,7 +20,7 @@ func applyToMounts(m []mount.Mount, work string, a fstest.Applier) (err error) {
}
defer os.RemoveAll(td)
if err := mount.MountAll(m, td); err != nil {
if err := mount.All(m, td); err != nil {
return errors.Wrap(err, "failed to mount")
}
defer func() {
@ -76,7 +76,7 @@ func checkSnapshot(ctx context.Context, sn snapshot.Snapshotter, work, name, che
}
}()
if err := mount.MountAll(m, td); err != nil {
if err := mount.All(m, td); err != nil {
return errors.Wrap(err, "failed to mount")
}
defer func() {

View File

@ -119,7 +119,7 @@ func checkSnapshotterBasic(ctx context.Context, t *testing.T, snapshotter snapsh
t.Fatal("expected mounts to have entries")
}
if err := mount.MountAll(mounts, preparing); err != nil {
if err := mount.All(mounts, preparing); err != nil {
t.Fatalf("failure reason: %+v", err)
}
defer testutil.Unmount(t, preparing)
@ -150,7 +150,7 @@ func checkSnapshotterBasic(ctx context.Context, t *testing.T, snapshotter snapsh
if err != nil {
t.Fatalf("failure reason: %+v", err)
}
if err := mount.MountAll(mounts, next); err != nil {
if err := mount.All(mounts, next); err != nil {
t.Fatalf("failure reason: %+v", err)
}
defer testutil.Unmount(t, next)
@ -212,7 +212,7 @@ func checkSnapshotterBasic(ctx context.Context, t *testing.T, snapshotter snapsh
if err != nil {
t.Fatalf("failure reason: %+v", err)
}
if err := mount.MountAll(mounts, nextnext); err != nil {
if err := mount.All(mounts, nextnext); err != nil {
t.Fatalf("failure reason: %+v", err)
}
@ -245,7 +245,7 @@ func checkSnapshotterStatActive(ctx context.Context, t *testing.T, snapshotter s
t.Fatal("expected mounts to have entries")
}
if err = mount.MountAll(mounts, preparing); err != nil {
if err = mount.All(mounts, preparing); err != nil {
t.Fatal(err)
}
defer testutil.Unmount(t, preparing)
@ -279,7 +279,7 @@ func checkSnapshotterStatCommitted(ctx context.Context, t *testing.T, snapshotte
t.Fatal("expected mounts to have entries")
}
if err = mount.MountAll(mounts, preparing); err != nil {
if err = mount.All(mounts, preparing); err != nil {
t.Fatal(err)
}
defer testutil.Unmount(t, preparing)
@ -318,7 +318,7 @@ func snapshotterPrepareMount(ctx context.Context, snapshotter snapshot.Snapshott
return "", fmt.Errorf("expected mounts to have entries")
}
if err = mount.MountAll(mounts, preparing); err != nil {
if err = mount.All(mounts, preparing); err != nil {
return "", err
}
return preparing, nil
@ -748,7 +748,7 @@ func checkSnapshotterViewReadonly(ctx context.Context, t *testing.T, snapshotter
}
// Just checking the option string of m is not enough, we need to test real mount. (#1368)
if err := mount.MountAll(m, viewMountPoint); err != nil {
if err := mount.All(m, viewMountPoint); err != nil {
t.Fatal(err)
}

View File

@ -43,7 +43,7 @@ func init() {
ID: runtimeName,
Type: plugin.RuntimePlugin,
Init: New,
Requires: []plugin.PluginType{
Requires: []plugin.Type{
plugin.MetadataPlugin,
},
})