Merge pull request #113 from abhinandanpb/client

Replacing containerd GRPC API with client
This commit is contained in:
Lantao Liu 2017-08-16 16:45:04 -07:00 committed by GitHub
commit 1ae4ee8325
170 changed files with 5819 additions and 7262 deletions

View File

@ -1,4 +1,4 @@
RUNC_VERSION=e775f0fba3ea329b8b766451c892c41a3d49594d
CNI_VERSION=v0.4.0
CONTAINERD_VERSION=2386062ce152d6f158d22be5991fe11c7cf67535
CONTAINERD_VERSION=938810e706bbcdbcb937ce63ba3e7c9ca329af64
CRITEST_VERSION=74bbd4e142f752f13c648d9dde23defed3e472a2

View File

@ -17,14 +17,11 @@ limitations under the License.
package server
import (
"encoding/json"
"fmt"
"strings"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/containers"
prototypes "github.com/gogo/protobuf/types"
"github.com/golang/glog"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runc/libcontainer/devices"
@ -94,29 +91,15 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
if err != nil {
return nil, fmt.Errorf("failed to generate container %q spec: %v", id, err)
}
rawSpec, err := json.Marshal(spec)
if err != nil {
return nil, fmt.Errorf("failed to marshal oci spec %+v: %v", spec, err)
}
glog.V(4).Infof("Container spec: %+v", spec)
var opts []containerd.NewContainerOpts
// Prepare container rootfs.
if config.GetLinux().GetSecurityContext().GetReadonlyRootfs() {
if _, err := c.snapshotService.View(ctx, id, image.ChainID); err != nil {
return nil, fmt.Errorf("failed to view container rootfs %q: %v", image.ChainID, err)
}
opts = append(opts, containerd.WithNewSnapshotView(id, image.Image))
} else {
if _, err := c.snapshotService.Prepare(ctx, id, image.ChainID); err != nil {
return nil, fmt.Errorf("failed to prepare container rootfs %q: %v", image.ChainID, err)
}
opts = append(opts, containerd.WithNewSnapshot(id, image.Image))
}
defer func() {
if retErr != nil {
if err := c.snapshotService.Remove(ctx, id); err != nil {
glog.Errorf("Failed to remove container snapshot %q: %v", id, err)
}
}
}()
meta.ImageRef = image.ID
// Create container root directory.
@ -135,29 +118,22 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
}
}()
// Create containerd container.
if _, err = c.containerService.Create(ctx, containers.Container{
ID: id,
// TODO(random-liu): Checkpoint metadata into container labels.
Image: image.ID,
Runtime: containers.RuntimeInfo{Name: defaultRuntime},
Spec: &prototypes.Any{
TypeUrl: runtimespec.Version,
Value: rawSpec,
},
RootFS: id,
}); err != nil {
opts = append(opts, containerd.WithSpec(spec), containerd.WithRuntime(defaultRuntime))
var cntr containerd.Container
if cntr, err = c.client.NewContainer(ctx, id, opts...); err != nil {
return nil, fmt.Errorf("failed to create containerd container: %v", err)
}
defer func() {
if retErr != nil {
if err := c.containerService.Delete(ctx, id); err != nil {
if err := cntr.Delete(ctx, containerd.WithSnapshotCleanup); err != nil {
glog.Errorf("Failed to delete containerd container %q: %v", id, err)
}
}
}()
container, err := containerstore.NewContainer(meta, containerstore.Status{CreatedAt: time.Now().UnixNano()})
container, err := containerstore.NewContainer(meta,
containerstore.Status{CreatedAt: time.Now().UnixNano()},
containerstore.WithContainer(cntr))
if err != nil {
return nil, fmt.Errorf("failed to create internal container object for %q: %v",
id, err)

View File

@ -19,6 +19,7 @@ package server
import (
"fmt"
"github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs"
"github.com/golang/glog"
"golang.org/x/net/context"
@ -68,14 +69,6 @@ func (c *criContainerdService) RemoveContainer(ctx context.Context, r *runtime.R
// kubelet implementation, we'll never start a container once we decide to remove it,
// so we don't need the "Dead" state for now.
// Remove container snapshot.
if err := c.snapshotService.Remove(ctx, id); err != nil {
if !errdefs.IsNotFound(err) {
return nil, fmt.Errorf("failed to remove container snapshot %q: %v", id, err)
}
glog.V(5).Infof("Remove called for snapshot %q that does not exist", id)
}
containerRootDir := getContainerRootDir(c.rootDir, id)
if err := c.os.RemoveAll(containerRootDir); err != nil {
return nil, fmt.Errorf("failed to remove container root directory %q: %v",
@ -88,8 +81,8 @@ func (c *criContainerdService) RemoveContainer(ctx context.Context, r *runtime.R
}
// Delete containerd container.
if err := c.containerService.Delete(ctx, id); err != nil {
if !isContainerdGRPCNotFoundError(err) {
if err := container.Container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil {
if !errdefs.IsNotFound(err) {
return nil, fmt.Errorf("failed to delete containerd container %q: %v", id, err)
}
glog.V(5).Infof("Remove called for containerd container %q that does not exist", id, err)

View File

@ -17,15 +17,13 @@ limitations under the License.
package server
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"time"
"github.com/containerd/containerd/api/services/tasks/v1"
"github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/api/types/task"
"github.com/containerd/containerd"
"github.com/golang/glog"
"golang.org/x/net/context"
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
@ -54,7 +52,7 @@ func (c *criContainerdService) StartContainer(ctx context.Context, r *runtime.St
if err := container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
// Always apply status change no matter startContainer fails or not. Because startContainer
// may change container state no matter it fails or succeeds.
startErr = c.startContainer(ctx, id, container.Metadata, &status)
startErr = c.startContainer(ctx, container.Container, container.Metadata, &status)
return status, nil
}); startErr != nil {
return nil, startErr
@ -66,13 +64,17 @@ func (c *criContainerdService) StartContainer(ctx context.Context, r *runtime.St
// startContainer actually starts the container. The function needs to be run in one transaction. Any updates
// to the status passed in will be applied no matter the function returns error or not.
func (c *criContainerdService) startContainer(ctx context.Context, id string, meta containerstore.Metadata, status *containerstore.Status) (retErr error) {
func (c *criContainerdService) startContainer(ctx context.Context,
container containerd.Container,
meta containerstore.Metadata,
status *containerstore.Status) (retErr error) {
config := meta.Config
id := container.ID()
// Return error if container is not in created state.
if status.State() != runtime.ContainerState_CONTAINER_CREATED {
return fmt.Errorf("container %q is in %s state", id, criContainerStateToString(status.State()))
}
// Do not start the container when there is a removal in progress.
if status.Removing {
return fmt.Errorf("container %q is in removing state", id)
@ -97,102 +99,65 @@ func (c *criContainerdService) startContainer(ctx context.Context, id string, me
sandboxConfig := sandbox.Config
sandboxID := meta.SandboxID
// Make sure sandbox is running.
sandboxInfo, err := c.taskService.Get(ctx, &tasks.GetTaskRequest{ContainerID: sandboxID})
s, err := sandbox.Container.Task(ctx, nil)
if err != nil {
return fmt.Errorf("failed to get sandbox container %q info: %v", sandboxID, err)
}
// This is only a best effort check, sandbox may still exit after this. If sandbox fails
// before starting the container, the start will fail.
if sandboxInfo.Task.Status != task.StatusRunning {
taskStatus, err := s.Status(ctx)
if err != nil {
return fmt.Errorf("failed to get task status for sandbox container %q: %v", id, err)
}
if taskStatus.Status != containerd.Running {
return fmt.Errorf("sandbox container %q is not running", sandboxID)
}
containerRootDir := getContainerRootDir(c.rootDir, id)
stdin, stdout, stderr := getStreamingPipes(containerRootDir)
// Set stdin to empty if Stdin == false.
if !config.GetStdin() {
stdin = ""
}
stdinPipe, stdoutPipe, stderrPipe, err := c.prepareStreamingPipes(ctx, stdin, stdout, stderr)
if err != nil {
return fmt.Errorf("failed to prepare streaming pipes: %v", err)
}
defer func() {
if retErr != nil {
if stdinPipe != nil {
stdinPipe.Close()
}
stdoutPipe.Close()
stderrPipe.Close()
}
}()
// Redirect the stream to std for now.
// TODO(random-liu): [P1] Support StdinOnce after container logging is added.
if stdinPipe != nil {
go func(w io.WriteCloser) {
io.Copy(w, os.Stdin) // nolint: errcheck
w.Close()
}(stdinPipe)
}
rStdoutPipe, wStdoutPipe := io.Pipe()
rStderrPipe, wStderrPipe := io.Pipe()
stdin := new(bytes.Buffer)
defer func() {
if retErr != nil {
rStdoutPipe.Close()
rStderrPipe.Close()
}
}()
if config.GetLogPath() != "" {
// Only generate container log when log path is specified.
logPath := filepath.Join(sandboxConfig.GetLogDirectory(), config.GetLogPath())
if err = c.agentFactory.NewContainerLogger(logPath, agents.Stdout, stdoutPipe).Start(); err != nil {
if err = c.agentFactory.NewContainerLogger(logPath, agents.Stdout, rStdoutPipe).Start(); err != nil {
return fmt.Errorf("failed to start container stdout logger: %v", err)
}
// Only redirect stderr when there is no tty.
if !config.GetTty() {
if err = c.agentFactory.NewContainerLogger(logPath, agents.Stderr, stderrPipe).Start(); err != nil {
if err = c.agentFactory.NewContainerLogger(logPath, agents.Stderr, rStderrPipe).Start(); err != nil {
return fmt.Errorf("failed to start container stderr logger: %v", err)
}
}
}
// Get rootfs mounts.
rootfsMounts, err := c.snapshotService.Mounts(ctx, id)
if err != nil {
return fmt.Errorf("failed to get rootfs mounts %q: %v", id, err)
}
var rootfs []*types.Mount
for _, m := range rootfsMounts {
rootfs = append(rootfs, &types.Mount{
Type: m.Type,
Source: m.Source,
Options: m.Options,
})
}
// Create containerd task.
createOpts := &tasks.CreateTaskRequest{
ContainerID: id,
Rootfs: rootfs,
Stdin: stdin,
Stdout: stdout,
Stderr: stderr,
Terminal: config.GetTty(),
}
glog.V(5).Infof("Create containerd task (id=%q, name=%q) with options %+v.",
id, meta.Name, createOpts)
createResp, err := c.taskService.Create(ctx, createOpts)
//TODO(Abhi): close stdin/pass a managed IOCreation
task, err := container.NewTask(ctx, containerd.NewIO(stdin, wStdoutPipe, wStderrPipe))
if err != nil {
return fmt.Errorf("failed to create containerd task: %v", err)
}
defer func() {
if retErr != nil {
// Cleanup the containerd task if an error is returned.
if _, err := c.taskService.Delete(ctx, &tasks.DeleteTaskRequest{ContainerID: id}); err != nil {
if _, err := task.Delete(ctx, containerd.WithProcessKill); err != nil {
glog.Errorf("Failed to delete containerd task %q: %v", id, err)
}
}
}()
// Start containerd task.
if _, err := c.taskService.Start(ctx, &tasks.StartTaskRequest{ContainerID: id}); err != nil {
if err := task.Start(ctx); err != nil {
return fmt.Errorf("failed to start containerd task %q: %v", id, err)
}
// Update container start timestamp.
status.Pid = createResp.Pid
status.Pid = task.Pid()
status.StartedAt = time.Now().UnixNano()
return nil
}

View File

@ -20,7 +20,7 @@ import (
"fmt"
"time"
"github.com/containerd/containerd/api/services/tasks/v1"
"github.com/containerd/containerd/errdefs"
"github.com/docker/docker/pkg/signal"
"github.com/golang/glog"
"golang.org/x/net/context"
@ -94,16 +94,20 @@ func (c *criContainerdService) stopContainer(ctx context.Context, container cont
}
}
glog.V(2).Infof("Stop container %q with signal %v", id, stopSignal)
_, err = c.taskService.Kill(ctx, &tasks.KillRequest{
ContainerID: id,
Signal: uint32(stopSignal),
All: true,
})
task, err := container.Container.Task(ctx, nil)
if err != nil {
if !isContainerdGRPCNotFoundError(err) && !isRuncProcessAlreadyFinishedError(err) {
return fmt.Errorf("failed to stop container %q: %v", id, err)
if !errdefs.IsNotFound(err) {
return fmt.Errorf("failed to stop container, task not found for container %q: %v", id, err)
}
return nil
}
if task != nil {
if err = task.Kill(ctx, stopSignal); err != nil {
if !errdefs.IsNotFound(err) {
return fmt.Errorf("failed to stop container %q: %v", id, err)
}
// Move on to make sure container status is updated.
}
// Move on to make sure container status is updated.
}
err = c.waitContainerStop(ctx, id, timeout)
@ -113,18 +117,22 @@ func (c *criContainerdService) stopContainer(ctx context.Context, container cont
glog.Errorf("Stop container %q timed out: %v", id, err)
}
task, err := container.Container.Task(ctx, nil)
if err != nil {
if !errdefs.IsNotFound(err) {
return fmt.Errorf("failed to stop container, task not found for container %q: %v", id, err)
}
return nil
}
// Event handler will Delete the container from containerd after it handles the Exited event.
glog.V(2).Infof("Kill container %q", id)
_, err := c.taskService.Kill(ctx, &tasks.KillRequest{
ContainerID: id,
Signal: uint32(unix.SIGKILL),
All: true,
})
if err != nil {
if !isContainerdGRPCNotFoundError(err) && !isRuncProcessAlreadyFinishedError(err) {
return fmt.Errorf("failed to kill container %q: %v", id, err)
if task != nil {
if err = task.Kill(ctx, unix.SIGKILL); err != nil {
if !errdefs.IsNotFound(err) {
return fmt.Errorf("failed to kill container %q: %v", id, err)
}
// Move on to make sure container status is updated.
}
// Move on to make sure container status is updated.
}
// Wait for a fixed timeout until container stop is observed by event monitor.

View File

@ -20,7 +20,7 @@ import (
"time"
"github.com/containerd/containerd/api/services/events/v1"
"github.com/containerd/containerd/api/services/tasks/v1"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/typeurl"
"github.com/golang/glog"
"github.com/jpillora/backoff"
@ -104,13 +104,22 @@ func (c *criContainerdService) handleEvent(evt *events.Envelope) {
// Non-init process died, ignore the event.
return
}
// Delete the container from containerd.
_, err = c.taskService.Delete(context.Background(), &tasks.DeleteTaskRequest{ContainerID: e.ContainerID})
// TODO(random-liu): Change isContainerdGRPCNotFoundError to use errdefs.
if err != nil && !isContainerdGRPCNotFoundError(err) {
// TODO(random-liu): [P0] Enqueue the event and retry.
glog.Errorf("Failed to delete container %q: %v", e.ContainerID, err)
return
task, err := cntr.Container.Task(context.Background(), nil)
if err != nil {
if !errdefs.IsNotFound(err) {
glog.Errorf("failed to stop container, task not found for container %q: %v", e.ContainerID, err)
return
}
}
if task != nil {
if _, err = task.Delete(context.Background()); err != nil {
if !errdefs.IsNotFound(err) {
// TODO(random-liu): [P0] Enqueue the event and retry.
glog.Errorf("failed to stop container %q: %v", e.ContainerID, err)
return
}
// Move on to make sure container status is updated.
}
}
err = cntr.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
// If FinishedAt has been set (e.g. with start failure), keep as

View File

@ -26,6 +26,7 @@ import (
"strings"
"syscall"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/docker/distribution/reference"
"github.com/docker/docker/pkg/stringid"
@ -33,8 +34,6 @@ import (
"github.com/opencontainers/image-spec/identity"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
"github.com/kubernetes-incubator/cri-containerd/pkg/store"
@ -78,12 +77,6 @@ const (
// According to http://man7.org/linux/man-pages/man5/resolv.conf.5.html:
// "The search list is currently limited to six domains with a total of 256 characters."
maxDNSSearches = 6
// stdinNamedPipe is the name of stdin named pipe.
stdinNamedPipe = "stdin"
// stdoutNamedPipe is the name of stdout named pipe.
stdoutNamedPipe = "stdout"
// stderrNamedPipe is the name of stderr named pipe.
stderrNamedPipe = "stderr"
// Delimiter used to construct container/sandbox names.
nameDelimiter = "_"
// netNSFormat is the format of network namespace of a process.
@ -148,15 +141,6 @@ func getContainerRootDir(rootDir, id string) string {
return filepath.Join(rootDir, containersDir, id)
}
// getStreamingPipes returns the stdin/stdout/stderr pipes path in the
// container/sandbox root.
func getStreamingPipes(rootDir string) (string, string, string) {
stdin := filepath.Join(rootDir, stdinNamedPipe)
stdout := filepath.Join(rootDir, stdoutNamedPipe)
stderr := filepath.Join(rootDir, stderrNamedPipe)
return stdin, stdout, stderr
}
// getSandboxHosts returns the hosts file path inside the sandbox root directory.
func getSandboxHosts(sandboxRootDir string) string {
return filepath.Join(sandboxRootDir, "hosts")
@ -223,18 +207,6 @@ func getPIDNamespace(pid uint32) string {
return fmt.Sprintf(pidNSFormat, pid)
}
// isContainerdGRPCNotFoundError checks whether a grpc error is not found error.
func isContainerdGRPCNotFoundError(grpcError error) bool {
return grpc.Code(grpcError) == codes.NotFound
}
// isRuncProcessAlreadyFinishedError checks whether a grpc error is a process already
// finished error.
// TODO(random-liu): Containerd should expose this error in api. (containerd#999)
func isRuncProcessAlreadyFinishedError(grpcError error) bool {
return strings.Contains(grpc.ErrorDesc(grpcError), "os: process already finished")
}
// criContainerStateToString formats CRI container state to string.
func criContainerStateToString(state runtime.ContainerState) string {
return runtime.ContainerState_name[int32(state)]
@ -269,46 +241,48 @@ func normalizeImageRef(ref string) (reference.Named, error) {
return reference.TagNameOnly(named), nil
}
// getImageInfo returns image chainID, compressed size and oci config. Note that getImageInfo
// getImageInfo returns image chainID, compressed size, oci config, imageID. Note that getImageInfo
// assumes that the image has been pulled or it will return an error.
func (c *criContainerdService) getImageInfo(ctx context.Context, ref string) (
imagedigest.Digest, int64, *imagespec.ImageConfig, error) {
imagedigest.Digest, int64, *imagespec.ImageConfig, imagedigest.Digest, error) {
// Get image config
normalized, err := normalizeImageRef(ref)
if err != nil {
return "", 0, nil, fmt.Errorf("failed to normalize image reference %q: %v", ref, err)
return "", 0, nil, "", fmt.Errorf("failed to normalize image reference %q: %v", ref, err)
}
normalizedRef := normalized.String()
//TODO(Abhi): Switch to using containerd client GetImage() api
image, err := c.imageStoreService.Get(ctx, normalizedRef)
if err != nil {
return "", 0, nil, fmt.Errorf("failed to get image %q from containerd image store: %v",
return "", 0, nil, "", fmt.Errorf("failed to get image %q from containerd image store: %v",
normalizedRef, err)
}
// Get image config
desc, err := image.Config(ctx, c.contentStoreService)
if err != nil {
return "", 0, nil, fmt.Errorf("failed to get image config descriptor: %v", err)
return "", 0, nil, "", fmt.Errorf("failed to get image config descriptor: %v", err)
}
rc, err := c.contentStoreService.Reader(ctx, desc.Digest)
rb, err := content.ReadBlob(ctx, c.contentStoreService, desc.Digest)
if err != nil {
return "", 0, nil, fmt.Errorf("failed to get image config reader: %v", err)
return "", 0, nil, "", fmt.Errorf("failed to get image config reader: %v", err)
}
defer rc.Close()
var imageConfig imagespec.Image
if err = json.NewDecoder(rc).Decode(&imageConfig); err != nil {
return "", 0, nil, fmt.Errorf("failed to decode image config: %v", err)
if err = json.Unmarshal(rb, &imageConfig); err != nil {
return "", 0, nil, "", err
}
// Get image chainID
diffIDs, err := image.RootFS(ctx, c.contentStoreService)
if err != nil {
return "", 0, nil, fmt.Errorf("failed to get image diff ids: %v", err)
return "", 0, nil, "", fmt.Errorf("failed to get image diff ids: %v", err)
}
chainID := identity.ChainID(diffIDs)
// Get image size
size, err := image.Size(ctx, c.contentStoreService)
if err != nil {
return "", 0, nil, fmt.Errorf("failed to get image size: %v", err)
return "", 0, nil, "", fmt.Errorf("failed to get image size: %v", err)
}
return chainID, size, &imageConfig.Config, nil
return chainID, size, &imageConfig.Config, desc.Digest, nil
}
// getRepoDigestAngTag returns image repoDigest and repoTag of the named image reference.
@ -336,6 +310,7 @@ func (c *criContainerdService) localResolve(ctx context.Context, ref string) (*i
if err != nil {
return nil, fmt.Errorf("invalid image reference %q: %v", ref, err)
}
//TODO(Abhi): Switch to using containerd client GetImage() api
imageInContainerd, err := c.imageStoreService.Get(ctx, normalized.String())
if err != nil {
if errdefs.IsNotFound(err) {

View File

@ -17,22 +17,15 @@ limitations under the License.
package server
import (
gocontext "context"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"strings"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs"
containerdimages "github.com/containerd/containerd/images"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/containerd/remotes/docker/schema1"
containerdrootfs "github.com/containerd/containerd/rootfs"
"github.com/golang/glog"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/net/context"
@ -88,70 +81,70 @@ func (c *criContainerdService) PullImage(ctx context.Context, r *runtime.PullIma
r.GetImage().GetImage(), retRes.GetImageRef())
}
}()
imageRef := r.GetImage().GetImage()
namedRef, err := normalizeImageRef(imageRef)
if err != nil {
return nil, fmt.Errorf("failed to parse image reference %q: %v", imageRef, err)
}
// TODO(random-liu): [P0] Avoid concurrent pulling/removing on the same image reference.
ref := namedRef.String()
if ref != imageRef {
glog.V(4).Infof("PullImage using normalized image ref: %q", ref)
}
resolver := docker.NewResolver(docker.ResolverOptions{
Credentials: func(string) (string, string, error) { return ParseAuth(r.GetAuth()) },
Client: http.DefaultClient,
})
// TODO(mikebrow): add truncIndex for image id
imageID, repoTag, repoDigest, err := c.pullImage(ctx, imageRef, r.GetAuth())
image, err := c.client.Pull(ctx, ref, containerd.WithPullUnpack, containerd.WithSchema1Conversion, containerd.WithResolver(resolver))
if err != nil {
return nil, fmt.Errorf("failed to pull image %q: %v", imageRef, err)
return nil, fmt.Errorf("failed to pull image %q: %v", ref, err)
}
repoDigest, repoTag := getRepoDigestAndTag(namedRef, image.Target().Digest, image.Target().MediaType == containerdimages.MediaTypeDockerSchema1Manifest)
for _, r := range []string{repoTag, repoDigest} {
if r == "" {
continue
}
if err := c.createImageReference(ctx, r, image.Target()); err != nil {
return nil, fmt.Errorf("failed to update image reference %q: %v", r, err)
}
}
glog.V(4).Infof("Pulled image %q with image id %q, repo tag %q, repo digest %q", imageRef, imageID,
repoTag, repoDigest)
// Get image information.
chainID, size, config, err := c.getImageInfo(ctx, imageRef)
chainID, size, config, id, err := c.getImageInfo(ctx, imageRef)
if err != nil {
return nil, fmt.Errorf("failed to get image %q information: %v", imageRef, err)
}
image := imagestore.Image{
imageID := id.String()
if err := c.createImageReference(ctx, imageID, image.Target()); err != nil {
return nil, fmt.Errorf("failed to update image reference %q: %v", imageID, err)
}
glog.V(4).Infof("Pulled image %q with image id %q, repo tag %q, repo digest %q", imageRef, imageID,
repoTag, repoDigest)
img := imagestore.Image{
ID: imageID,
ChainID: chainID.String(),
Size: size,
Config: config,
Image: image,
}
if repoDigest != "" {
image.RepoDigests = []string{repoDigest}
img.RepoDigests = []string{repoDigest}
}
if repoTag != "" {
image.RepoTags = []string{repoTag}
img.RepoTags = []string{repoTag}
}
c.imageStore.Add(image)
c.imageStore.Add(img)
// NOTE(random-liu): the actual state in containerd is the source of truth, even we maintain
// in-memory image store, it's only for in-memory indexing. The image could be removed
// by someone else anytime, before/during/after we create the metadata. We should always
// check the actual state in containerd before using the image or returning status of the
// image.
return &runtime.PullImageResponse{ImageRef: imageID}, err
}
// resourceSet is the helper struct to help tracking all resources associated
// with an image.
type resourceSet struct {
sync.Mutex
resources map[string]struct{}
}
func newResourceSet() *resourceSet {
return &resourceSet{resources: make(map[string]struct{})}
}
func (r *resourceSet) add(resource string) {
r.Lock()
defer r.Unlock()
r.resources[resource] = struct{}{}
}
// all returns an array of all resources added.
func (r *resourceSet) all() map[string]struct{} {
r.Lock()
defer r.Unlock()
resources := make(map[string]struct{})
for resource := range r.resources {
resources[resource] = struct{}{}
}
return resources
return &runtime.PullImageResponse{ImageRef: img.ID}, err
}
// ParseAuth parses AuthConfig and returns username and password/secret required by containerd.
@ -183,160 +176,6 @@ func ParseAuth(auth *runtime.AuthConfig) (string, string, error) {
return "", "", fmt.Errorf("invalid auth config")
}
// pullImage pulls image and returns image id (config digest), repoTag and repoDigest.
func (c *criContainerdService) pullImage(ctx context.Context, rawRef string, auth *runtime.AuthConfig) (
// TODO(random-liu): Replace with client.Pull.
string, string, string, error) {
namedRef, err := normalizeImageRef(rawRef)
if err != nil {
return "", "", "", fmt.Errorf("failed to parse image reference %q: %v", rawRef, err)
}
// TODO(random-liu): [P0] Avoid concurrent pulling/removing on the same image reference.
ref := namedRef.String()
if ref != rawRef {
glog.V(4).Infof("PullImage using normalized image ref: %q", ref)
}
// Resolve the image reference to get descriptor and fetcher.
resolver := docker.NewResolver(docker.ResolverOptions{
Credentials: func(string) (string, string, error) { return ParseAuth(auth) },
Client: http.DefaultClient,
})
_, desc, err := resolver.Resolve(ctx, ref)
if err != nil {
return "", "", "", fmt.Errorf("failed to resolve ref %q: %v", ref, err)
}
fetcher, err := resolver.Fetcher(ctx, ref)
if err != nil {
return "", "", "", fmt.Errorf("failed to get fetcher for ref %q: %v", ref, err)
}
// Currently, the resolved image name is the same with ref in docker resolver,
// but they may be different in the future.
// TODO(random-liu): Always resolve image reference and use resolved image name in
// the system.
glog.V(4).Infof("Start downloading resources for image %q", ref)
resources := newResourceSet()
resourceTrackHandler := containerdimages.HandlerFunc(func(ctx gocontext.Context, desc imagespec.Descriptor) (
[]imagespec.Descriptor, error) {
resources.add(remotes.MakeRefKey(ctx, desc))
return nil, nil
})
// Fetch all image resources into content store.
// Dispatch a handler which will run a sequence of handlers to:
// 1) track all resources associated using a customized handler;
// 2) fetch the object using a FetchHandler;
// 3) recurse through any sub-layers via a ChildrenHandler.
// Support schema1 image.
var (
schema1Converter *schema1.Converter
handler containerdimages.Handler
)
if desc.MediaType == containerdimages.MediaTypeDockerSchema1Manifest {
schema1Converter = schema1.NewConverter(c.contentStoreService, fetcher)
handler = containerdimages.Handlers(
resourceTrackHandler,
schema1Converter,
)
} else {
handler = containerdimages.Handlers(
resourceTrackHandler,
remotes.FetchHandler(c.contentStoreService, fetcher),
containerdimages.ChildrenHandler(c.contentStoreService),
)
}
if err := containerdimages.Dispatch(ctx, handler, desc); err != nil {
// Dispatch returns error when requested resources are locked.
// In that case, we should start waiting and checking the pulling
// progress.
// TODO(random-liu): Check specific resource locked error type.
glog.V(5).Infof("Dispatch for %q returns error: %v", ref, err)
}
// Wait for the image pulling to finish
if err := c.waitForResourcesDownloading(ctx, resources.all()); err != nil {
return "", "", "", fmt.Errorf("failed to wait for image %q downloading: %v", ref, err)
}
glog.V(4).Infof("Finished downloading resources for image %q", ref)
if schema1Converter != nil {
desc, err = schema1Converter.Convert(ctx)
if err != nil {
return "", "", "", fmt.Errorf("failed to convert schema 1 image %q: %v", ref, err)
}
}
// In the future, containerd will rely on the information in the image store to perform image
// garbage collection.
// For now, we simply use it to store and retrieve information required for pulling an image.
// @stevvooe said we should `Put` before downloading content, However:
// 1) Containerd client put image metadata after downloading;
// 2) We need desc returned by schema1 converter.
// So just put the image metadata after downloading now.
// TODO(random-liu): Fix the potential garbage collection race.
repoDigest, repoTag := getRepoDigestAndTag(namedRef, desc.Digest, schema1Converter != nil)
if ref != repoTag && ref != repoDigest {
return "", "", "", fmt.Errorf("unexpected repo tag %q and repo digest %q for %q", repoTag, repoDigest, ref)
}
for _, r := range []string{repoTag, repoDigest} {
if r == "" {
continue
}
if err := c.createImageReference(ctx, r, desc); err != nil {
return "", "", "", fmt.Errorf("failed to update image reference %q: %v", r, err)
}
}
// Do not cleanup if following operations fail so as to make resumable download possible.
// TODO(random-liu): Replace with image.Unpack.
// Unpack the image layers into snapshots.
image, err := c.imageStoreService.Get(ctx, ref)
if err != nil {
return "", "", "", fmt.Errorf("failed to get image %q from containerd image store: %v", ref, err)
}
// Read the image manifest from content store.
manifestDigest := image.Target.Digest
p, err := content.ReadBlob(ctx, c.contentStoreService, manifestDigest)
if err != nil {
return "", "", "", fmt.Errorf("readblob failed for manifest digest %q: %v", manifestDigest, err)
}
var manifest imagespec.Manifest
if err := json.Unmarshal(p, &manifest); err != nil {
return "", "", "", fmt.Errorf("unmarshal blob to manifest failed for manifest digest %q: %v",
manifestDigest, err)
}
diffIDs, err := image.RootFS(ctx, c.contentStoreService)
if err != nil {
return "", "", "", fmt.Errorf("failed to get image rootfs: %v", err)
}
if len(diffIDs) != len(manifest.Layers) {
return "", "", "", fmt.Errorf("mismatched image rootfs and manifest layers")
}
layers := make([]containerdrootfs.Layer, len(diffIDs))
for i := range diffIDs {
layers[i].Diff = imagespec.Descriptor{
// TODO: derive media type from compressed type
MediaType: imagespec.MediaTypeImageLayer,
Digest: diffIDs[i],
}
layers[i].Blob = manifest.Layers[i]
}
if _, err := containerdrootfs.ApplyLayers(ctx, layers, c.snapshotService, c.diffService); err != nil {
return "", "", "", fmt.Errorf("failed to apply layers %+v: %v", layers, err)
}
// TODO(random-liu): Considering how to deal with the disk usage of content.
configDesc, err := image.Config(ctx, c.contentStoreService)
if err != nil {
return "", "", "", fmt.Errorf("failed to get config descriptor for image %q: %v", ref, err)
}
// Use config digest as imageID to conform to oci image spec, and also add image id as
// image reference.
imageID := configDesc.Digest.String()
if err := c.createImageReference(ctx, imageID, desc); err != nil {
return "", "", "", fmt.Errorf("failed to update image id %q: %v", imageID, err)
}
return imageID, repoTag, repoDigest, nil
}
// createImageReference creates image reference inside containerd image store.
// Note that because create and update are not finished in one transaction, there could be race. E.g.
// the image reference is deleted by someone else after create returns already exists, but before update
@ -358,40 +197,3 @@ func (c *criContainerdService) createImageReference(ctx context.Context, name st
_, err = c.imageStoreService.Update(ctx, img, "target")
return err
}
// waitDownloadingPollInterval is the interval to check resource downloading progress.
const waitDownloadingPollInterval = 200 * time.Millisecond
// waitForResourcesDownloading waits for all resource downloading to finish.
func (c *criContainerdService) waitForResourcesDownloading(ctx context.Context, resources map[string]struct{}) error {
ticker := time.NewTicker(waitDownloadingPollInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// TODO(random-liu): Use better regexp when containerd `MakeRefKey` contains more
// information.
statuses, err := c.contentStoreService.ListStatuses(ctx, "")
if err != nil {
return fmt.Errorf("failed to get content status: %v", err)
}
pulling := false
// TODO(random-liu): Move Dispatch into a separate goroutine, so that we could report
// image pulling progress concurrently.
for _, status := range statuses {
_, ok := resources[status.Ref]
if ok {
glog.V(5).Infof("Pulling resource %q with progress %d/%d",
status.Ref, status.Offset, status.Total)
pulling = true
}
}
if !pulling {
return nil
}
case <-ctx.Done():
// TODO(random-liu): Abort ongoing pulling if cancelled.
return fmt.Errorf("image resources pulling is cancelled")
}
}
}

View File

@ -18,33 +18,12 @@ package server
import (
"encoding/base64"
"fmt"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
func TestResources(t *testing.T) {
const threads = 10
var wg sync.WaitGroup
r := newResourceSet()
for i := 0; i < threads; i++ {
wg.Add(1)
go func(ref string) {
r.add(ref)
wg.Done()
}(fmt.Sprintf("sha256:%d", i))
}
wg.Wait()
refs := r.all()
for i := 0; i < threads; i++ {
_, ok := refs[fmt.Sprintf("sha256:%d", i)]
assert.True(t, ok)
}
}
func TestParseAuth(t *testing.T) {
testUser := "username"
testPasswd := "password"

View File

@ -19,12 +19,10 @@ package server
import (
"fmt"
tasks "github.com/containerd/containerd/api/services/tasks/v1"
"github.com/containerd/containerd/api/types/task"
"github.com/golang/glog"
"golang.org/x/net/context"
"github.com/containerd/containerd/api/services/tasks/v1"
"github.com/containerd/containerd/api/types/task"
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
sandboxstore "github.com/kubernetes-incubator/cri-containerd/pkg/store/sandbox"
@ -42,16 +40,15 @@ func (c *criContainerdService) ListPodSandbox(ctx context.Context, r *runtime.Li
// List all sandboxes from store.
sandboxesInStore := c.sandboxStore.List()
resp, err := c.taskService.List(ctx, &tasks.ListTasksRequest{})
response, err := c.taskService.List(ctx, &tasks.ListTasksRequest{})
if err != nil {
return nil, fmt.Errorf("failed to list sandbox containers: %v", err)
}
sandboxesInContainerd := resp.Tasks
var sandboxes []*runtime.PodSandbox
for _, sandboxInStore := range sandboxesInStore {
var sandboxInContainerd *task.Task
for _, s := range sandboxesInContainerd {
var sandboxInContainerd *task.Process
for _, s := range response.Tasks {
if s.ID == sandboxInStore.ID {
sandboxInContainerd = s
break

View File

@ -19,7 +19,7 @@ package server
import (
"fmt"
"github.com/containerd/containerd/api/services/tasks/v1"
"github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs"
"github.com/golang/glog"
"golang.org/x/net/context"
@ -54,22 +54,14 @@ func (c *criContainerdService) RemovePodSandbox(ctx context.Context, r *runtime.
// Return error if sandbox container is not fully stopped.
// TODO(random-liu): [P0] Make sure network is torn down, may need to introduce a state.
_, err = c.taskService.Get(ctx, &tasks.GetTaskRequest{ContainerID: id})
if err != nil && !isContainerdGRPCNotFoundError(err) {
_, err = sandbox.Container.Task(ctx, nil)
if err != nil && !errdefs.IsNotFound(err) {
return nil, fmt.Errorf("failed to get sandbox container info for %q: %v", id, err)
}
if err == nil {
return nil, fmt.Errorf("sandbox container %q is not fully stopped", id)
}
// Remove sandbox container snapshot.
if err := c.snapshotService.Remove(ctx, id); err != nil {
if !errdefs.IsNotFound(err) {
return nil, fmt.Errorf("failed to remove sandbox container snapshot %q: %v", id, err)
}
glog.V(5).Infof("Remove called for snapshot %q that does not exist", id)
}
// Remove all containers inside the sandbox.
// NOTE(random-liu): container could still be created after this point, Kubelet should
// not rely on this behavior.
@ -96,8 +88,8 @@ func (c *criContainerdService) RemovePodSandbox(ctx context.Context, r *runtime.
}
// Delete sandbox container.
if err := c.containerService.Delete(ctx, id); err != nil {
if !isContainerdGRPCNotFoundError(err) {
if err := sandbox.Container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil {
if !errdefs.IsNotFound(err) {
return nil, fmt.Errorf("failed to delete sandbox container %q: %v", id, err)
}
glog.V(5).Infof("Remove called for sandbox container %q that does not exist", id, err)

View File

@ -17,17 +17,14 @@ limitations under the License.
package server
import (
"encoding/json"
"bytes"
"fmt"
"io"
"os"
"strings"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/api/services/tasks/v1"
"github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/containers"
prototypes "github.com/gogo/protobuf/types"
"github.com/golang/glog"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
@ -78,26 +75,7 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
// Ensure sandbox container image snapshot.
image, err := c.ensureImageExists(ctx, c.sandboxImage)
if err != nil {
return nil, fmt.Errorf("failed to get sandbox image %q: %v", defaultSandboxImage, err)
}
rootfsMounts, err := c.snapshotService.View(ctx, id, image.ChainID)
if err != nil {
return nil, fmt.Errorf("failed to prepare sandbox rootfs %q: %v", image.ChainID, err)
}
defer func() {
if retErr != nil {
if err := c.snapshotService.Remove(ctx, id); err != nil {
glog.Errorf("Failed to remove sandbox container snapshot %q: %v", id, err)
}
}
}()
var rootfs []*types.Mount
for _, m := range rootfsMounts {
rootfs = append(rootfs, &types.Mount{
Type: m.Type,
Source: m.Source,
Options: m.Options,
})
return nil, fmt.Errorf("failed to get sandbox image %q: %v", c.sandboxImage, err)
}
// Create sandbox container.
@ -105,34 +83,26 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
if err != nil {
return nil, fmt.Errorf("failed to generate sandbox container spec: %v", err)
}
rawSpec, err := json.Marshal(spec)
if err != nil {
return nil, fmt.Errorf("failed to marshal oci spec %+v: %v", spec, err)
}
glog.V(4).Infof("Sandbox container spec: %+v", spec)
if _, err = c.containerService.Create(ctx, containers.Container{
ID: id,
// TODO(random-liu): Checkpoint metadata into container labels.
Image: image.ID,
Runtime: containers.RuntimeInfo{Name: defaultRuntime},
Spec: &prototypes.Any{
TypeUrl: runtimespec.Version,
Value: rawSpec,
},
RootFS: id,
}); err != nil {
// TODO(random-liu): Checkpoint metadata into container labels.
opts := []containerd.NewContainerOpts{
containerd.WithSpec(spec),
containerd.WithRuntime(defaultRuntime),
containerd.WithNewSnapshotView(id, image.Image)}
container, err := c.client.NewContainer(ctx, id, opts...)
if err != nil {
return nil, fmt.Errorf("failed to create containerd container: %v", err)
}
defer func() {
if retErr != nil {
if err := c.containerService.Delete(ctx, id); err != nil {
glog.Errorf("Failed to delete containerd container%q: %v", id, err)
if err := container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil {
glog.Errorf("Failed to delete containerd container %q: %v", id, err)
}
}
}()
// Create sandbox container root directory.
// Prepare streaming named pipe.
sandboxRootDir := getSandboxRootDir(c.rootDir, id)
if err := c.os.MkdirAll(sandboxRootDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create sandbox root directory %q: %v",
@ -149,21 +119,18 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
}()
// Discard sandbox container output because we don't care about it.
_, stdout, stderr := getStreamingPipes(sandboxRootDir)
_, stdoutPipe, stderrPipe, err := c.prepareStreamingPipes(ctx, "", stdout, stderr)
if err != nil {
return nil, fmt.Errorf("failed to prepare streaming pipes: %v", err)
}
rStdoutPipe, wStdoutPipe := io.Pipe()
rStderrPipe, wStderrPipe := io.Pipe()
defer func() {
if retErr != nil {
stdoutPipe.Close()
stderrPipe.Close()
rStdoutPipe.Close()
rStderrPipe.Close()
}
}()
if err := c.agentFactory.NewSandboxLogger(stdoutPipe).Start(); err != nil {
if err := c.agentFactory.NewSandboxLogger(rStdoutPipe).Start(); err != nil {
return nil, fmt.Errorf("failed to start sandbox stdout logger: %v", err)
}
if err := c.agentFactory.NewSandboxLogger(stderrPipe).Start(); err != nil {
if err := c.agentFactory.NewSandboxLogger(rStderrPipe).Start(); err != nil {
return nil, fmt.Errorf("failed to start sandbox stderr logger: %v", err)
}
@ -180,32 +147,25 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
}
}()
createOpts := &tasks.CreateTaskRequest{
ContainerID: id,
Rootfs: rootfs,
// No stdin for sandbox container.
Stdout: stdout,
Stderr: stderr,
}
// Create sandbox task in containerd.
glog.V(5).Infof("Create sandbox container (id=%q, name=%q) with options %+v.",
id, name, createOpts)
createResp, err := c.taskService.Create(ctx, createOpts)
glog.V(5).Infof("Create sandbox container (id=%q, name=%q).",
id, name)
//TODO(Abhi): close the stdin or pass newIOCreation with /dev/null stdin
task, err := container.NewTask(ctx, containerd.NewIO(new(bytes.Buffer), wStdoutPipe, wStderrPipe))
if err != nil {
return nil, fmt.Errorf("failed to create sandbox container %q: %v",
id, err)
return nil, fmt.Errorf("failed to create task for sandbox %q: %v", id, err)
}
defer func() {
if retErr != nil {
// Cleanup the sandbox container if an error is returned.
if err := c.stopSandboxContainer(ctx, id); err != nil {
if _, err := task.Delete(ctx, containerd.WithProcessKill); err != nil {
glog.Errorf("Failed to delete sandbox container %q: %v", id, err)
}
}
}()
sandbox.Pid = createResp.Pid
sandbox.NetNS = getNetworkNamespace(createResp.Pid)
sandbox.Pid = task.Pid()
sandbox.NetNS = getNetworkNamespace(task.Pid())
if !config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostNetwork() {
// Setup network for sandbox.
// TODO(random-liu): [P2] Replace with permanent network namespace.
@ -223,14 +183,14 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
}()
}
// Start sandbox container in containerd.
if _, err := c.taskService.Start(ctx, &tasks.StartTaskRequest{ContainerID: id}); err != nil {
return nil, fmt.Errorf("failed to start sandbox container %q: %v",
if err = task.Start(ctx); err != nil {
return nil, fmt.Errorf("failed to start sandbox container task %q: %v",
id, err)
}
// Add sandbox into sandbox store.
sandbox.CreatedAt = time.Now().UnixNano()
sandbox.Container = container
if err := c.sandboxStore.Add(sandbox); err != nil {
return nil, fmt.Errorf("failed to add sandbox %+v into store: %v", sandbox, err)
}

View File

@ -22,9 +22,8 @@ import (
"github.com/golang/glog"
"golang.org/x/net/context"
"github.com/containerd/containerd/api/services/tasks/v1"
"github.com/containerd/containerd/api/types/task"
"github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs"
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
sandboxstore "github.com/kubernetes-incubator/cri-containerd/pkg/store/sandbox"
@ -47,18 +46,24 @@ func (c *criContainerdService) PodSandboxStatus(ctx context.Context, r *runtime.
// Use the full sandbox id.
id := sandbox.ID
info, err := c.taskService.Get(ctx, &tasks.GetTaskRequest{ContainerID: id})
if err != nil && !isContainerdGRPCNotFoundError(err) {
task, err := sandbox.Container.Task(ctx, nil)
if err != nil && !errdefs.IsNotFound(err) {
return nil, fmt.Errorf("failed to get sandbox container info for %q: %v", id, err)
}
// Set sandbox state to NOTREADY by default.
state := runtime.PodSandboxState_SANDBOX_NOTREADY
// If the sandbox container is running, treat it as READY.
if info != nil && info.Task.Status == task.StatusRunning {
state = runtime.PodSandboxState_SANDBOX_READY
}
if task != nil {
taskStatus, err := task.Status(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get task status for sandbox container %q: %v", id, err)
}
if taskStatus.Status == containerd.Running {
state = runtime.PodSandboxState_SANDBOX_READY
}
}
ip, err := c.netPlugin.GetContainerNetworkStatus(sandbox.NetNS, sandbox.Config.GetMetadata().GetNamespace(), sandbox.Config.GetMetadata().GetName(), id)
if err != nil {
// Ignore the error on network status

View File

@ -20,13 +20,10 @@ import (
"fmt"
"os"
"github.com/containerd/containerd/api/services/events/v1"
"github.com/containerd/containerd/api/services/tasks/v1"
"github.com/containerd/containerd/api/types/task"
"github.com/containerd/containerd/typeurl"
"github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs"
"github.com/golang/glog"
"golang.org/x/net/context"
"golang.org/x/sys/unix"
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
@ -83,69 +80,26 @@ func (c *criContainerdService) StopPodSandbox(ctx context.Context, r *runtime.St
return nil, fmt.Errorf("failed to unmount sandbox files in %q: %v", sandboxRoot, err)
}
if err := c.stopSandboxContainer(ctx, id); err != nil {
if err := c.stopSandboxContainer(ctx, sandbox.Container); err != nil {
return nil, fmt.Errorf("failed to stop sandbox container %q: %v", id, err)
}
return &runtime.StopPodSandboxResponse{}, nil
}
// stopSandboxContainer kills and deletes sandbox container.
func (c *criContainerdService) stopSandboxContainer(ctx context.Context, id string) error {
cancellable, cancel := context.WithCancel(ctx)
eventstream, err := c.eventService.Subscribe(cancellable, &events.SubscribeRequest{})
func (c *criContainerdService) stopSandboxContainer(ctx context.Context, container containerd.Container) error {
task, err := container.Task(ctx, nil)
if err != nil {
return fmt.Errorf("failed to get containerd event: %v", err)
}
defer cancel()
resp, err := c.taskService.Get(ctx, &tasks.GetTaskRequest{ContainerID: id})
if err != nil {
if isContainerdGRPCNotFoundError(err) {
if errdefs.IsNotFound(err) {
return nil
}
return fmt.Errorf("failed to get sandbox container: %v", err)
}
if resp.Task.Status != task.StatusStopped {
// TODO(random-liu): [P1] Handle sandbox container graceful deletion.
if _, err := c.taskService.Kill(ctx, &tasks.KillRequest{
ContainerID: id,
Signal: uint32(unix.SIGKILL),
All: true,
}); err != nil && !isContainerdGRPCNotFoundError(err) && !isRuncProcessAlreadyFinishedError(err) {
return fmt.Errorf("failed to kill sandbox container: %v", err)
}
if err := c.waitSandboxContainer(eventstream, id, resp.Task.Pid); err != nil {
return fmt.Errorf("failed to wait for pod sandbox to stop: %v", err)
}
}
// Delete the sandbox container from containerd.
_, err = c.taskService.Delete(ctx, &tasks.DeleteTaskRequest{ContainerID: id})
if err != nil && !isContainerdGRPCNotFoundError(err) {
_, err = task.Delete(ctx, containerd.WithProcessKill)
if err != nil && !errdefs.IsNotFound(err) {
return fmt.Errorf("failed to delete sandbox container: %v", err)
}
return nil
}
// waitSandboxContainer wait sandbox container stop event.
func (c *criContainerdService) waitSandboxContainer(eventstream events.Events_SubscribeClient, id string, pid uint32) error {
for {
evt, err := eventstream.Recv()
if err != nil {
return err
}
// Continue until the event received is of type task exit.
if !typeurl.Is(evt.Event, &events.TaskExit{}) {
continue
}
any, err := typeurl.UnmarshalAny(evt.Event)
if err != nil {
return err
}
e := any.(*events.TaskExit)
if e.ContainerID == id && e.Pid == pid {
return nil
}
}
}

View File

@ -20,7 +20,6 @@ import (
"fmt"
"golang.org/x/net/context"
healthapi "google.golang.org/grpc/health/grpc_health_v1"
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
@ -38,9 +37,8 @@ func (c *criContainerdService) Status(ctx context.Context, r *runtime.StatusRequ
Type: runtime.RuntimeReady,
Status: true,
}
// Use containerd grpc server healthcheck service to check its readiness.
resp, err := c.healthService.Check(ctx, &healthapi.HealthCheckRequest{})
if err != nil || resp.Status != healthapi.HealthCheckResponse_SERVING {
serving, err := c.client.IsServing(ctx)
if err != nil || !serving {
runtimeCondition.Status = false
runtimeCondition.Reason = runtimeNotReadyReason
if err != nil {
@ -49,7 +47,6 @@ func (c *criContainerdService) Status(ctx context.Context, r *runtime.StatusRequ
runtimeCondition.Message = "Containerd grpc server is not serving"
}
}
networkCondition := &runtime.RuntimeCondition{
Type: runtime.NetworkReady,
Status: true,

View File

@ -1,97 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"errors"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
healthapi "google.golang.org/grpc/health/grpc_health_v1"
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
servertesting "github.com/kubernetes-incubator/cri-containerd/pkg/server/testing"
)
func TestStatus(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
for desc, test := range map[string]struct {
containerdCheckRes *healthapi.HealthCheckResponse
containerdCheckErr error
networkStatusErr error
expectRuntimeNotReady bool
expectNetworkNotReady bool
}{
"runtime should not be ready when containerd is not serving": {
containerdCheckRes: &healthapi.HealthCheckResponse{
Status: healthapi.HealthCheckResponse_NOT_SERVING,
},
expectRuntimeNotReady: true,
},
"runtime should not be ready when containerd healthcheck returns error": {
containerdCheckErr: errors.New("healthcheck error"),
expectRuntimeNotReady: true,
},
"network should not be ready when network plugin status returns error": {
containerdCheckRes: &healthapi.HealthCheckResponse{
Status: healthapi.HealthCheckResponse_SERVING,
},
networkStatusErr: errors.New("status error"),
expectNetworkNotReady: true,
},
"runtime should be ready when containerd is serving": {
containerdCheckRes: &healthapi.HealthCheckResponse{
Status: healthapi.HealthCheckResponse_SERVING,
},
},
} {
t.Logf("TestCase %q", desc)
c := newTestCRIContainerdService()
ctx := context.Background()
mock := servertesting.NewMockHealthClient(ctrl)
mock.EXPECT().Check(ctx, &healthapi.HealthCheckRequest{}).Return(
test.containerdCheckRes, test.containerdCheckErr)
c.healthService = mock
if test.networkStatusErr != nil {
c.netPlugin.(*servertesting.FakeCNIPlugin).InjectError(
"Status", test.networkStatusErr)
}
resp, err := c.Status(ctx, &runtime.StatusRequest{})
assert.NoError(t, err)
require.NotNil(t, resp)
runtimeCondition := resp.Status.Conditions[0]
networkCondition := resp.Status.Conditions[1]
assert.Equal(t, runtime.RuntimeReady, runtimeCondition.Type)
assert.Equal(t, test.expectRuntimeNotReady, !runtimeCondition.Status)
if test.expectRuntimeNotReady {
assert.Equal(t, runtimeNotReadyReason, runtimeCondition.Reason)
assert.NotEmpty(t, runtimeCondition.Message)
}
assert.Equal(t, runtime.NetworkReady, networkCondition.Type)
assert.Equal(t, test.expectNetworkNotReady, !networkCondition.Status)
if test.expectNetworkNotReady {
assert.Equal(t, networkNotReadyReason, networkCondition.Reason)
assert.NotEmpty(t, networkCondition.Message)
}
}
}

View File

@ -1,64 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Automatically generated by MockGen. DO NOT EDIT!
// Source: google.golang.org/grpc/health/grpc_health_v1 (interfaces: HealthClient)
package testing
import (
gomock "github.com/golang/mock/gomock"
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
grpc_health_v1 "google.golang.org/grpc/health/grpc_health_v1"
)
// Mock of HealthClient interface
type MockHealthClient struct {
ctrl *gomock.Controller
recorder *_MockHealthClientRecorder
}
// Recorder for MockHealthClient (not exported)
type _MockHealthClientRecorder struct {
mock *MockHealthClient
}
func NewMockHealthClient(ctrl *gomock.Controller) *MockHealthClient {
mock := &MockHealthClient{ctrl: ctrl}
mock.recorder = &_MockHealthClientRecorder{mock}
return mock
}
func (_m *MockHealthClient) EXPECT() *_MockHealthClientRecorder {
return _m.recorder
}
func (_m *MockHealthClient) Check(_param0 context.Context, _param1 *grpc_health_v1.HealthCheckRequest, _param2 ...grpc.CallOption) (*grpc_health_v1.HealthCheckResponse, error) {
_s := []interface{}{_param0, _param1}
for _, _x := range _param2 {
_s = append(_s, _x)
}
ret := _m.ctrl.Call(_m, "Check", _s...)
ret0, _ := ret[0].(*grpc_health_v1.HealthCheckResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
func (_mr *_MockHealthClientRecorder) Check(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
_s := append([]interface{}{arg0, arg1}, arg2...)
return _mr.mock.ctrl.RecordCall(_mr.mock, "Check", _s...)
}

View File

@ -1,65 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Automatically generated by MockGen. DO NOT EDIT!
// Source: github.com/containerd/containerd/api/services/version (interfaces: VersionClient)
package testing
import (
version "github.com/containerd/containerd/api/services/version/v1"
gomock "github.com/golang/mock/gomock"
empty "github.com/golang/protobuf/ptypes/empty"
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Mock of VersionClient interface
type MockVersionClient struct {
ctrl *gomock.Controller
recorder *_MockVersionClientRecorder
}
// Recorder for MockVersionClient (not exported)
type _MockVersionClientRecorder struct {
mock *MockVersionClient
}
func NewMockVersionClient(ctrl *gomock.Controller) *MockVersionClient {
mock := &MockVersionClient{ctrl: ctrl}
mock.recorder = &_MockVersionClientRecorder{mock}
return mock
}
func (_m *MockVersionClient) EXPECT() *_MockVersionClientRecorder {
return _m.recorder
}
func (_m *MockVersionClient) Version(_param0 context.Context, _param1 *empty.Empty, _param2 ...grpc.CallOption) (*version.VersionResponse, error) {
_s := []interface{}{_param0, _param1}
for _, _x := range _param2 {
_s = append(_s, _x)
}
ret := _m.ctrl.Call(_m, "Version", _s...)
ret0, _ := ret[0].(*version.VersionResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
func (_mr *_MockVersionClientRecorder) Version(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
_s := append([]interface{}{arg0, arg1}, arg2...)
return _mr.mock.ctrl.RecordCall(_mr.mock, "Version", _s...)
}

View File

@ -19,7 +19,6 @@ package server
import (
"fmt"
"github.com/golang/protobuf/ptypes/empty"
"golang.org/x/net/context"
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
@ -34,7 +33,7 @@ const (
// Version returns the runtime name, runtime version and runtime API version.
func (c *criContainerdService) Version(ctx context.Context, r *runtime.VersionRequest) (*runtime.VersionResponse, error) {
resp, err := c.versionService.Version(ctx, &empty.Empty{})
resp, err := c.client.Version(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get containerd version: %v", err)
}

View File

@ -1,64 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"errors"
"testing"
versionapi "github.com/containerd/containerd/api/services/version/v1"
"github.com/golang/mock/gomock"
"github.com/golang/protobuf/ptypes/empty"
"github.com/stretchr/testify/assert"
"golang.org/x/net/context"
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
servertesting "github.com/kubernetes-incubator/cri-containerd/pkg/server/testing"
)
func TestVersion(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
for desc, test := range map[string]struct {
versionRes *versionapi.VersionResponse
versionErr error
expectErr bool
}{
"should return error if containerd version returns error": {
versionErr: errors.New("random error"),
expectErr: true,
},
"should not return error if containerd version returns successfully": {
versionRes: &versionapi.VersionResponse{Version: "1.1.1"},
expectErr: false,
},
} {
t.Logf("TestCase %q", desc)
c := newTestCRIContainerdService()
ctx := context.Background()
mock := servertesting.NewMockVersionClient(ctrl)
mock.EXPECT().Version(ctx, &empty.Empty{}).Return(test.versionRes, test.versionErr)
c.versionService = mock
v, err := c.Version(ctx, &runtime.VersionRequest{})
if test.expectErr {
assert.Equal(t, test.expectErr, err != nil)
} else {
assert.Equal(t, test.versionRes.Version, v.RuntimeVersion)
}
}
}

View File

@ -19,6 +19,8 @@ package container
import (
"sync"
"github.com/containerd/containerd"
"github.com/kubernetes-incubator/cri-containerd/pkg/store"
)
@ -29,20 +31,35 @@ type Container struct {
Metadata
// Status stores the status of the container.
Status StatusStorage
// TODO(random-liu): Add containerd container client.
// TODO(random-liu): Add stop channel to get rid of stop poll waiting.
// Containerd container
Container containerd.Container
}
// Opts sets specific information to newly created Container.
type Opts func(*Container)
// WithContainer adds the containerd Container to the internal data store.
func WithContainer(cntr containerd.Container) Opts {
return func(c *Container) {
c.Container = cntr
}
}
// NewContainer creates an internally used container type.
func NewContainer(metadata Metadata, status Status) (Container, error) {
func NewContainer(metadata Metadata, status Status, opts ...Opts) (Container, error) {
s, err := StoreStatus(metadata.ID, status)
if err != nil {
return Container{}, err
}
return Container{
c := Container{
Metadata: metadata,
Status: s,
}, nil
}
for _, o := range opts {
o(&c)
}
return c, nil
}
// Delete deletes checkpoint for the container.

View File

@ -19,6 +19,7 @@ package image
import (
"sync"
"github.com/containerd/containerd"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/kubernetes-incubator/cri-containerd/pkg/store"
@ -39,7 +40,8 @@ type Image struct {
Size int64
// Config is the oci image config of the image.
Config *imagespec.ImageConfig
// TODO(random-liu): Add containerd image client.
// Containerd image reference
Image containerd.Image
}
// Store stores all images.

View File

@ -19,6 +19,8 @@ package sandbox
import (
"sync"
"github.com/containerd/containerd"
"github.com/kubernetes-incubator/cri-containerd/pkg/store"
)
@ -27,7 +29,8 @@ import (
type Sandbox struct {
// Metadata is the metadata of the sandbox, it is immutable after created.
Metadata
// TODO(random-liu): Add containerd container client.
// Containerd sandbox container
Container containerd.Container
// TODO(random-liu): Add cni network namespace client.
}

View File

@ -78,7 +78,7 @@ func TestSandboxStore(t *testing.T) {
assert := assertlib.New(t)
sandboxes := map[string]Sandbox{}
for _, id := range ids {
sandboxes[id] = Sandbox{metadatas[id]}
sandboxes[id] = Sandbox{Metadata: metadatas[id]}
}
s := NewStore()

View File

@ -1,8 +1,9 @@
github.com/blang/semver v3.1.0
github.com/boltdb/bolt v1.3.0-58-ge9cf4fa
github.com/containerd/containerd 2386062ce152d6f158d22be5991fe11c7cf67535
github.com/containerd/containerd 938810e706bbcdbcb937ce63ba3e7c9ca329af64
github.com/containerd/continuity 86cec1535a968310e7532819f699ff2830ed7463
github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6
github.com/containerd/errdefs 546f045128093f82e92beadd08fa7fb4aa6cc4e0
github.com/containernetworking/cni v0.4.0
github.com/davecgh/go-spew v1.1.0
github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621
@ -27,7 +28,7 @@ github.com/kubernetes-incubator/cri-o 63a218a45844fd912f482dc85f9cc149e68e0e57
github.com/mailru/easyjson d5b7844b561a7bc640052f1b935f7b800330d7e0
github.com/Microsoft/go-winio v0.4.4
github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
github.com/opencontainers/image-spec 372ad780f63454fbbbbcc7cf80e5b90245c13e13
github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runc e775f0fba3ea329b8b766451c892c41a3d49594d
github.com/opencontainers/runtime-spec v1.0.0
github.com/opencontainers/runtime-tools e29f3ca4eb806a582ee1a1864c7b0563bd64c19b

View File

@ -9,6 +9,10 @@ containerd is an industry-standard container runtime with an emphasis on simplic
containerd is designed to be embedded into a larger system, rather than being used directly by developers or end-users.
## Getting Started
If you are interested in trying out containerd please see our [Getting Started Guide](docs/getting-started.md).
## Features
### Client
@ -168,16 +172,10 @@ Vendoring of external imports uses the [`vndr` tool](https://github.com/LK4D4/vn
Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
### Releases
### Releases and API Stability
containerd will be released with a 1.0 when feature complete and this version will be supported for 1 year with security and bug fixes applied and released.
The upgrade path for containerd is that the 0.0.x patch releases are always backward compatible with its major and minor version.
Minor (0.x.0) version will always be compatible with the previous minor release. i.e. 1.2.0 is backwards compatible with 1.1.0 and 1.1.0 is compatible with 1.0.0.
There is no compatibility guarantees with upgrades from two minor releases. i.e. 1.0.0 to 1.2.0.
There are not backwards compatibility guarantees with upgrades to major versions. i.e 1.0.0 to 2.0.0.
Each major version will be supported for 1 year with bug fixes and security patches.
Please see [RELEASES.md](RELEASES.md) for details on versioning and stability
of containerd components.
### Development reports.

18
vendor/github.com/containerd/containerd/api/README.md generated vendored Normal file
View File

@ -0,0 +1,18 @@
This directory contains the GRPC API definitions for containerd.
All defined services and messages have been aggregated into `*.pb.txt`
descriptors files in this directory. Definitions present here are considered
frozen after the release.
At release time, the current `next.pb.txt` file will be moved into place to
freeze the API changes for the minor version. For example, when 1.0.0 is
released, `next.pb.txt` should be moved to `1.0.txt`. Notice that we leave off
the patch number, since the API will be completely locked down for a given
patch series.
We may find that by default, protobuf descriptors are too noisy to lock down
API changes. In that case, we may filter out certain fields in the descriptors,
possibly regenerating for old versions.
This process is similar to the [process used to ensure backwards compatibility
in Go](https://github.com/golang/go/tree/master/api).

View File

@ -30,7 +30,6 @@ import google_protobuf1 "github.com/gogo/protobuf/types"
import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
import google_protobuf3 "github.com/gogo/protobuf/types"
import _ "github.com/gogo/protobuf/types"
import _ "github.com/containerd/containerd/api/types"
import time "time"
@ -2508,53 +2507,52 @@ func init() {
}
var fileDescriptorContainers = []byte{
// 757 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcb, 0x72, 0xd3, 0x4a,
0x10, 0x8d, 0x6c, 0x47, 0x8e, 0xdb, 0x9b, 0x5b, 0x73, 0x7d, 0x7d, 0x85, 0xa8, 0xb2, 0x8d, 0x57,
0x5e, 0x80, 0x4c, 0x0c, 0x05, 0x79, 0xac, 0xe2, 0xbc, 0x8a, 0xaa, 0x84, 0x4a, 0x0d, 0xb0, 0x81,
0x45, 0x90, 0xed, 0xb1, 0x23, 0x2c, 0x69, 0x84, 0x66, 0xec, 0x2a, 0x17, 0x0b, 0xf8, 0x04, 0xfe,
0x82, 0x5f, 0xc9, 0x92, 0x25, 0xab, 0x3c, 0xfc, 0x25, 0x94, 0x46, 0xa3, 0xc8, 0xf8, 0x51, 0xc8,
0x81, 0xec, 0xa6, 0x3d, 0x7d, 0xba, 0x8f, 0x4e, 0x9f, 0x96, 0x05, 0x47, 0x3d, 0x8b, 0x9f, 0x0d,
0x5a, 0x46, 0x9b, 0x3a, 0xf5, 0x36, 0x75, 0xb9, 0x69, 0xb9, 0xc4, 0xef, 0x4c, 0x1e, 0x4d, 0xcf,
0xaa, 0x33, 0xe2, 0x0f, 0xad, 0x36, 0x61, 0xf1, 0xef, 0xac, 0x3e, 0x5c, 0x9f, 0x88, 0x0c, 0xcf,
0xa7, 0x9c, 0xa2, 0x07, 0x31, 0xce, 0x88, 0x30, 0xc6, 0x44, 0xd6, 0x70, 0x5d, 0x2f, 0xf4, 0x68,
0x8f, 0x8a, 0xec, 0x7a, 0x70, 0x0a, 0x81, 0xfa, 0xbd, 0x1e, 0xa5, 0x3d, 0x9b, 0xd4, 0x45, 0xd4,
0x1a, 0x74, 0xeb, 0xa6, 0x3b, 0x92, 0x57, 0xf7, 0xa7, 0xaf, 0x88, 0xe3, 0xf1, 0xe8, 0xb2, 0x32,
0x7d, 0xd9, 0xb5, 0x88, 0xdd, 0x39, 0x75, 0x4c, 0xd6, 0x97, 0x19, 0xe5, 0xe9, 0x0c, 0x6e, 0x39,
0x84, 0x71, 0xd3, 0xf1, 0x64, 0xc2, 0x76, 0x22, 0x05, 0xf8, 0xc8, 0x23, 0xac, 0xde, 0x21, 0xac,
0xed, 0x5b, 0x1e, 0xa7, 0x7e, 0x08, 0xae, 0x5e, 0x66, 0x20, 0xb7, 0x1b, 0x65, 0xa2, 0x22, 0xa4,
0xac, 0x8e, 0xa6, 0x54, 0x94, 0x5a, 0xae, 0xa9, 0x8e, 0x2f, 0xca, 0xa9, 0x17, 0x7b, 0x38, 0x65,
0x75, 0xd0, 0x09, 0xa8, 0xb6, 0xd9, 0x22, 0x36, 0xd3, 0x52, 0x95, 0x74, 0x2d, 0xdf, 0xd8, 0x30,
0x7e, 0xab, 0x93, 0x71, 0x53, 0xd5, 0x38, 0x12, 0xd0, 0x7d, 0x97, 0xfb, 0x23, 0x2c, 0xeb, 0xa0,
0x02, 0xac, 0x5a, 0x8e, 0xd9, 0x23, 0x5a, 0x3a, 0x68, 0x86, 0xc3, 0x00, 0xbd, 0x84, 0xac, 0x3f,
0x70, 0x83, 0x07, 0xd4, 0x32, 0x15, 0xa5, 0x96, 0x6f, 0x3c, 0x5d, 0xaa, 0x11, 0x0e, 0xb1, 0x38,
0x2a, 0x82, 0x6a, 0x90, 0x61, 0x1e, 0x69, 0x6b, 0xab, 0xa2, 0x58, 0xc1, 0x08, 0xa5, 0x34, 0x22,
0x29, 0x8d, 0x1d, 0x77, 0x84, 0x45, 0x06, 0xaa, 0x40, 0x9e, 0xb9, 0xa6, 0xc7, 0xce, 0x28, 0xe7,
0xc4, 0xd7, 0x54, 0xc1, 0x6a, 0xf2, 0x27, 0x54, 0x05, 0xd5, 0xa7, 0x94, 0x77, 0x99, 0x96, 0x15,
0xfa, 0xc0, 0xf8, 0xa2, 0xac, 0x62, 0x4a, 0xf9, 0xc1, 0x2b, 0x2c, 0x6f, 0xd0, 0x2e, 0x40, 0xdb,
0x27, 0x26, 0x27, 0x9d, 0x53, 0x93, 0x6b, 0x6b, 0xa2, 0xab, 0x3e, 0xd3, 0xf5, 0x75, 0x34, 0xc0,
0xe6, 0xda, 0xf9, 0x45, 0x79, 0xe5, 0xeb, 0x65, 0x59, 0xc1, 0x39, 0x89, 0xdb, 0xe1, 0x41, 0x91,
0x81, 0xd7, 0x89, 0x8a, 0xe4, 0x96, 0x29, 0x22, 0x71, 0x3b, 0x5c, 0xdf, 0x84, 0xfc, 0x84, 0xec,
0xe8, 0x1f, 0x48, 0xf7, 0xc9, 0x28, 0x9c, 0x2c, 0x0e, 0x8e, 0xc1, 0x00, 0x86, 0xa6, 0x3d, 0x20,
0x5a, 0x2a, 0x1c, 0x80, 0x08, 0xb6, 0x52, 0x1b, 0x8a, 0x7e, 0x0c, 0x59, 0x29, 0x24, 0x42, 0x90,
0x71, 0x4d, 0x87, 0x48, 0x9c, 0x38, 0x23, 0x03, 0xb2, 0xd4, 0xe3, 0x16, 0x75, 0x99, 0x80, 0x2e,
0x92, 0x35, 0x4a, 0xaa, 0x3e, 0x82, 0x7f, 0x0f, 0x09, 0xbf, 0x19, 0x12, 0x26, 0x1f, 0x07, 0x84,
0xf1, 0x45, 0x56, 0xab, 0x9e, 0x41, 0xe1, 0xd7, 0x74, 0xe6, 0x51, 0x97, 0x11, 0x74, 0x02, 0xb9,
0x9b, 0xb1, 0x0b, 0x58, 0xbe, 0xf1, 0x70, 0x19, 0x73, 0x34, 0x33, 0x81, 0x4c, 0x38, 0x2e, 0x52,
0x5d, 0x87, 0xff, 0x8e, 0x2c, 0x16, 0xb7, 0x62, 0x11, 0x35, 0x0d, 0xb2, 0x5d, 0xcb, 0xe6, 0xc4,
0x67, 0x9a, 0x52, 0x49, 0xd7, 0x72, 0x38, 0x0a, 0xab, 0x36, 0x14, 0xa7, 0x21, 0x92, 0x1e, 0x06,
0x88, 0x1b, 0x0b, 0xd8, 0xed, 0xf8, 0x4d, 0x54, 0xa9, 0x7e, 0x80, 0xe2, 0xae, 0x70, 0xc5, 0x8c,
0x78, 0x7f, 0x5f, 0x8c, 0x3e, 0xfc, 0x3f, 0xd3, 0xeb, 0xce, 0x94, 0xff, 0xa6, 0x40, 0xf1, 0x8d,
0xb0, 0xea, 0xdd, 0x3f, 0x19, 0xda, 0x86, 0x7c, 0xb8, 0x16, 0xe2, 0xa5, 0x2a, 0x3d, 0x3b, 0xbb,
0x4f, 0x07, 0xc1, 0x7b, 0xf7, 0xd8, 0x64, 0x7d, 0x2c, 0xb7, 0x2f, 0x38, 0x07, 0xb2, 0xcc, 0x10,
0xbd, 0x33, 0x59, 0x1e, 0x43, 0x71, 0x8f, 0xd8, 0x64, 0x8e, 0x2a, 0x0b, 0x96, 0xa5, 0x71, 0x95,
0x01, 0x88, 0xcd, 0x88, 0x86, 0x90, 0x3e, 0x24, 0x1c, 0x3d, 0x4b, 0x40, 0x63, 0xce, 0x4a, 0xea,
0xcf, 0x97, 0xc6, 0x49, 0x29, 0x3e, 0x41, 0x26, 0x58, 0x0b, 0x94, 0xe4, 0x6f, 0x61, 0xee, 0xca,
0xe9, 0x9b, 0xb7, 0x40, 0xca, 0xe6, 0x9f, 0x41, 0x0d, 0x9d, 0x8b, 0x92, 0x14, 0x99, 0xbf, 0x50,
0xfa, 0xd6, 0x6d, 0xa0, 0x31, 0x81, 0xd0, 0x23, 0x89, 0x08, 0xcc, 0xf7, 0x7d, 0x22, 0x02, 0x8b,
0x9c, 0xf8, 0x0e, 0xd4, 0xd0, 0x37, 0x89, 0x08, 0xcc, 0xb7, 0x98, 0x5e, 0x9c, 0xd9, 0x88, 0xfd,
0xe0, 0x33, 0xa5, 0xf9, 0xfe, 0xfc, 0xba, 0xb4, 0xf2, 0xe3, 0xba, 0xb4, 0xf2, 0x65, 0x5c, 0x52,
0xce, 0xc7, 0x25, 0xe5, 0xfb, 0xb8, 0xa4, 0x5c, 0x8d, 0x4b, 0xca, 0xdb, 0x83, 0x3f, 0xf8, 0xf2,
0xda, 0x8e, 0xa3, 0x96, 0x2a, 0x3a, 0x3e, 0xf9, 0x19, 0x00, 0x00, 0xff, 0xff, 0xa1, 0xaf, 0xe2,
0x52, 0xca, 0x09, 0x00, 0x00,
// 738 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcb, 0x92, 0xd2, 0x40,
0x14, 0x9d, 0x40, 0x26, 0x0c, 0x97, 0x8d, 0xd5, 0x22, 0xc6, 0x58, 0x05, 0x98, 0x15, 0x0b, 0x0d,
0x0e, 0x5a, 0x3a, 0x8f, 0xd5, 0x30, 0xaf, 0xb2, 0x6a, 0xc6, 0x9a, 0x6a, 0x75, 0xa3, 0x8b, 0x31,
0x40, 0xc3, 0x44, 0x92, 0x74, 0x4c, 0x37, 0x54, 0x51, 0x2e, 0xf4, 0x13, 0xfc, 0x0b, 0x7f, 0x65,
0x96, 0x2e, 0x5d, 0xcd, 0x83, 0x2f, 0xb1, 0xd2, 0x49, 0x26, 0xc8, 0xa3, 0x84, 0x51, 0x76, 0x7d,
0xe9, 0x7b, 0xee, 0x3d, 0x39, 0xf7, 0xdc, 0x10, 0x38, 0xea, 0x58, 0xfc, 0xac, 0xd7, 0x30, 0x9a,
0xd4, 0xa9, 0x36, 0xa9, 0xcb, 0x4d, 0xcb, 0x25, 0x7e, 0x6b, 0xf4, 0x68, 0x7a, 0x56, 0x95, 0x11,
0xbf, 0x6f, 0x35, 0x09, 0x4b, 0x7e, 0x67, 0xd5, 0xfe, 0xfa, 0x48, 0x64, 0x78, 0x3e, 0xe5, 0x14,
0x3d, 0x4a, 0x70, 0x46, 0x8c, 0x31, 0x46, 0xb2, 0xfa, 0xeb, 0x5a, 0xbe, 0x43, 0x3b, 0x54, 0x64,
0x57, 0x83, 0x53, 0x08, 0xd4, 0x1e, 0x74, 0x28, 0xed, 0xd8, 0xa4, 0x2a, 0xa2, 0x46, 0xaf, 0x5d,
0x35, 0xdd, 0x41, 0x74, 0xf5, 0x70, 0xfc, 0x8a, 0x38, 0x1e, 0x8f, 0x2f, 0xcb, 0xe3, 0x97, 0x6d,
0x8b, 0xd8, 0xad, 0x53, 0xc7, 0x64, 0xdd, 0x28, 0xa3, 0x34, 0x9e, 0xc1, 0x2d, 0x87, 0x30, 0x6e,
0x3a, 0x5e, 0x98, 0xa0, 0x5f, 0xca, 0x90, 0xdd, 0x8d, 0x29, 0xa2, 0x02, 0xa4, 0xac, 0x96, 0x2a,
0x95, 0xa5, 0x4a, 0xb6, 0xae, 0x0c, 0x2f, 0x4a, 0xa9, 0x57, 0x7b, 0x38, 0x65, 0xb5, 0xd0, 0x09,
0x28, 0xb6, 0xd9, 0x20, 0x36, 0x53, 0x53, 0xe5, 0x74, 0x25, 0x57, 0xdb, 0x30, 0xfe, 0xfa, 0xa8,
0xc6, 0x4d, 0x55, 0xe3, 0x48, 0x40, 0xf7, 0x5d, 0xee, 0x0f, 0x70, 0x54, 0x07, 0xe5, 0x61, 0xd5,
0x72, 0xcc, 0x0e, 0x51, 0xd3, 0x41, 0x33, 0x1c, 0x06, 0xe8, 0x35, 0x64, 0xfc, 0x9e, 0x1b, 0x70,
0x54, 0xe5, 0xb2, 0x54, 0xc9, 0xd5, 0x9e, 0x2f, 0xd4, 0x08, 0x87, 0x58, 0x1c, 0x17, 0x41, 0x15,
0x90, 0x99, 0x47, 0x9a, 0xea, 0xaa, 0x28, 0x96, 0x37, 0x42, 0x35, 0x8c, 0x58, 0x0d, 0x63, 0xc7,
0x1d, 0x60, 0x91, 0x81, 0xca, 0x90, 0x63, 0xae, 0xe9, 0xb1, 0x33, 0xca, 0x39, 0xf1, 0x55, 0x45,
0xb0, 0x1a, 0xfd, 0x09, 0xe9, 0xa0, 0xf8, 0x94, 0xf2, 0x36, 0x53, 0x33, 0x42, 0x1f, 0x18, 0x5e,
0x94, 0x14, 0x4c, 0x29, 0x3f, 0x78, 0x83, 0xa3, 0x1b, 0xb4, 0x0b, 0xd0, 0xf4, 0x89, 0xc9, 0x49,
0xeb, 0xd4, 0xe4, 0xea, 0x9a, 0xe8, 0xaa, 0x4d, 0x74, 0x7d, 0x1b, 0xcf, 0xa0, 0xbe, 0x76, 0x7e,
0x51, 0x5a, 0xf9, 0x7e, 0x59, 0x92, 0x70, 0x36, 0xc2, 0xed, 0xf0, 0xa0, 0x48, 0xcf, 0x6b, 0xc5,
0x45, 0xb2, 0x8b, 0x14, 0x89, 0x70, 0x3b, 0x5c, 0xdb, 0x84, 0xdc, 0x88, 0xec, 0xe8, 0x0e, 0xa4,
0xbb, 0x64, 0x10, 0x4e, 0x16, 0x07, 0xc7, 0x60, 0x00, 0x7d, 0xd3, 0xee, 0x11, 0x35, 0x15, 0x0e,
0x40, 0x04, 0x5b, 0xa9, 0x0d, 0x49, 0x3b, 0x86, 0x4c, 0x24, 0x24, 0x42, 0x20, 0xbb, 0xa6, 0x43,
0x22, 0x9c, 0x38, 0x23, 0x03, 0x32, 0xd4, 0xe3, 0x16, 0x75, 0x99, 0x80, 0xce, 0x92, 0x35, 0x4e,
0xd2, 0x9f, 0xc0, 0xdd, 0x43, 0xc2, 0x6f, 0x86, 0x84, 0xc9, 0xe7, 0x1e, 0x61, 0x7c, 0x96, 0xd5,
0xf4, 0x33, 0xc8, 0xff, 0x99, 0xce, 0x3c, 0xea, 0x32, 0x82, 0x4e, 0x20, 0x7b, 0x33, 0x76, 0x01,
0xcb, 0xd5, 0x1e, 0x2f, 0x62, 0x8e, 0xba, 0x1c, 0xc8, 0x84, 0x93, 0x22, 0xfa, 0x3a, 0xdc, 0x3b,
0xb2, 0x58, 0xd2, 0x8a, 0xc5, 0xd4, 0x54, 0xc8, 0xb4, 0x2d, 0x9b, 0x13, 0x9f, 0xa9, 0x52, 0x39,
0x5d, 0xc9, 0xe2, 0x38, 0xd4, 0x6d, 0x28, 0x8c, 0x43, 0x22, 0x7a, 0x18, 0x20, 0x69, 0x2c, 0x60,
0xb7, 0xe3, 0x37, 0x52, 0x45, 0xff, 0x04, 0x85, 0x5d, 0xe1, 0x8a, 0x09, 0xf1, 0xfe, 0xbf, 0x18,
0x5d, 0xb8, 0x3f, 0xd1, 0x6b, 0x69, 0xca, 0xff, 0x90, 0xa0, 0xf0, 0x4e, 0x58, 0x75, 0xf9, 0x4f,
0x86, 0xb6, 0x21, 0x17, 0xae, 0x85, 0x78, 0x2f, 0x46, 0x9e, 0x9d, 0xdc, 0xa7, 0x83, 0xe0, 0xd5,
0x79, 0x6c, 0xb2, 0x2e, 0x8e, 0xb6, 0x2f, 0x38, 0x07, 0xb2, 0x4c, 0x10, 0x5d, 0x9a, 0x2c, 0x4f,
0xa1, 0xb0, 0x47, 0x6c, 0x32, 0x45, 0x95, 0x19, 0xcb, 0x52, 0xbb, 0x92, 0x01, 0x12, 0x33, 0xa2,
0x3e, 0xa4, 0x0f, 0x09, 0x47, 0x2f, 0xe6, 0xa0, 0x31, 0x65, 0x25, 0xb5, 0x97, 0x0b, 0xe3, 0x22,
0x29, 0xbe, 0x80, 0x1c, 0xac, 0x05, 0x9a, 0xe7, 0x6f, 0x61, 0xea, 0xca, 0x69, 0x9b, 0xb7, 0x40,
0x46, 0xcd, 0xbf, 0x82, 0x12, 0x3a, 0x17, 0xcd, 0x53, 0x64, 0xfa, 0x42, 0x69, 0x5b, 0xb7, 0x81,
0x26, 0x04, 0x42, 0x8f, 0xcc, 0x45, 0x60, 0xba, 0xef, 0xe7, 0x22, 0x30, 0xcb, 0x89, 0x1f, 0x40,
0x09, 0x7d, 0x33, 0x17, 0x81, 0xe9, 0x16, 0xd3, 0x0a, 0x13, 0x1b, 0xb1, 0x1f, 0x7c, 0x69, 0xd4,
0x3f, 0x9e, 0x5f, 0x17, 0x57, 0x7e, 0x5d, 0x17, 0x57, 0xbe, 0x0d, 0x8b, 0xd2, 0xf9, 0xb0, 0x28,
0xfd, 0x1c, 0x16, 0xa5, 0xab, 0x61, 0x51, 0x7a, 0x7f, 0xf0, 0x0f, 0x1f, 0x4f, 0xdb, 0x49, 0xd4,
0x50, 0x44, 0xc7, 0x67, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xef, 0xc2, 0x41, 0x7b, 0x8d, 0x09,
0x00, 0x00,
}

View File

@ -7,7 +7,6 @@ import "google/protobuf/any.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/field_mask.proto";
import "google/protobuf/timestamp.proto";
import "github.com/containerd/containerd/api/types/descriptor.proto";
option go_package = "github.com/containerd/containerd/api/services/containers/v1;containers";

View File

@ -20,8 +20,6 @@ import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import _ "github.com/golang/protobuf/ptypes/empty"
import _ "github.com/gogo/protobuf/types"
import containerd_types "github.com/containerd/containerd/api/types"
import containerd_types1 "github.com/containerd/containerd/api/types"
@ -1081,32 +1079,31 @@ func init() {
}
var fileDescriptorDiff = []byte{
// 427 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0x31, 0x6f, 0xd4, 0x30,
0x14, 0xc7, 0x6b, 0xee, 0x5a, 0x54, 0x5f, 0x91, 0x90, 0x85, 0x44, 0x94, 0x42, 0x38, 0x65, 0x4a,
0x41, 0xd8, 0xf4, 0x90, 0x3a, 0xd0, 0xa5, 0xa0, 0x4a, 0x4c, 0x2c, 0x51, 0x27, 0x90, 0x40, 0xb9,
0xe4, 0x25, 0xb5, 0x94, 0xc4, 0x6e, 0xec, 0x9c, 0x94, 0x8d, 0xcf, 0xc1, 0xd7, 0x61, 0xe9, 0xc8,
0xc8, 0x48, 0xf3, 0x49, 0x50, 0x1c, 0x07, 0x22, 0x90, 0x8e, 0xd0, 0xc9, 0x2f, 0x7e, 0xbf, 0xff,
0x7b, 0x7f, 0xbf, 0xd8, 0xf8, 0x2c, 0xe3, 0xfa, 0xb2, 0x5e, 0xd3, 0x58, 0x14, 0x2c, 0x16, 0xa5,
0x8e, 0x78, 0x09, 0x55, 0x32, 0x0e, 0x23, 0xc9, 0x99, 0x82, 0x6a, 0xc3, 0x63, 0x50, 0x2c, 0xe1,
0x69, 0xca, 0x36, 0xc7, 0x66, 0xa5, 0xb2, 0x12, 0x5a, 0x90, 0xc3, 0xdf, 0x2c, 0x1d, 0x38, 0x6a,
0xf2, 0x9b, 0x63, 0xf7, 0x41, 0x26, 0x32, 0x61, 0x38, 0xd6, 0x45, 0xbd, 0xc4, 0x3d, 0xcc, 0x84,
0xc8, 0x72, 0x60, 0xe6, 0x6b, 0x5d, 0xa7, 0x0c, 0x0a, 0xa9, 0x1b, 0x9b, 0x7c, 0xf2, 0x67, 0x52,
0xf3, 0x02, 0x94, 0x8e, 0x0a, 0x69, 0x81, 0x93, 0x49, 0x96, 0x75, 0x23, 0x41, 0xb1, 0x42, 0xd4,
0xa5, 0xb6, 0xba, 0xd3, 0xff, 0xd0, 0x25, 0xa0, 0xe2, 0x8a, 0x4b, 0x2d, 0xaa, 0x5e, 0xec, 0x5f,
0xe1, 0x83, 0xd7, 0x52, 0xe6, 0x4d, 0x08, 0x57, 0x35, 0x28, 0x4d, 0x5e, 0xe0, 0x79, 0x77, 0x46,
0x07, 0x2d, 0x51, 0xb0, 0x58, 0x3d, 0xa2, 0xa3, 0x21, 0x98, 0x0a, 0xf4, 0xfc, 0x57, 0x85, 0xd0,
0x90, 0x84, 0xe1, 0x3d, 0xe3, 0x46, 0x39, 0x77, 0x96, 0xb3, 0x60, 0xb1, 0x7a, 0xf8, 0xb7, 0xe6,
0x5d, 0x97, 0x0f, 0x2d, 0xe6, 0xbf, 0xc5, 0xf7, 0x6c, 0x4b, 0x25, 0x45, 0xa9, 0x80, 0x9c, 0xe0,
0xbb, 0x91, 0x94, 0x39, 0x87, 0x64, 0x52, 0xdb, 0x01, 0xf6, 0xbf, 0x20, 0xbc, 0x38, 0xe7, 0x69,
0x3a, 0x78, 0x7f, 0x86, 0xe7, 0x39, 0xa4, 0xda, 0x41, 0xdb, 0x7d, 0x18, 0x88, 0x3c, 0xc7, 0xbb,
0x15, 0xcf, 0x2e, 0xf5, 0xbf, 0x5c, 0xf7, 0x14, 0x79, 0x8c, 0x71, 0x01, 0x09, 0x8f, 0x3e, 0x75,
0x39, 0x67, 0xb6, 0x44, 0xc1, 0x7e, 0xb8, 0x6f, 0x76, 0x2e, 0x1a, 0x09, 0xe4, 0x3e, 0x9e, 0x55,
0x90, 0x3a, 0x73, 0xb3, 0xdf, 0x85, 0xfe, 0x19, 0x3e, 0xe8, 0xbd, 0xd9, 0x43, 0x0e, 0x83, 0x9d,
0x4d, 0x1d, 0xec, 0xea, 0x2b, 0xc2, 0xf3, 0xae, 0x04, 0xf9, 0x88, 0x77, 0xcd, 0xc0, 0xc8, 0x11,
0xdd, 0x72, 0x27, 0xe9, 0xf8, 0x3f, 0xba, 0x4f, 0xa7, 0xa0, 0xd6, 0xda, 0x07, 0xdb, 0x27, 0xd8,
0xaa, 0x19, 0x4d, 0xda, 0x3d, 0x9a, 0x40, 0xf6, 0xc5, 0xdf, 0x5c, 0x5c, 0xdf, 0x78, 0x3b, 0xdf,
0x6f, 0xbc, 0x9d, 0xcf, 0xad, 0x87, 0xae, 0x5b, 0x0f, 0x7d, 0x6b, 0x3d, 0xf4, 0xa3, 0xf5, 0xd0,
0xfb, 0x57, 0xb7, 0x7a, 0xa2, 0xa7, 0xdd, 0xba, 0xde, 0x33, 0xb7, 0xf7, 0xe5, 0xcf, 0x00, 0x00,
0x00, 0xff, 0xff, 0x44, 0x8b, 0x75, 0x5d, 0xe7, 0x03, 0x00, 0x00,
// 401 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0x41, 0x8b, 0xda, 0x40,
0x14, 0xc7, 0x9d, 0x26, 0x5a, 0x1c, 0x2d, 0x94, 0xa1, 0xd0, 0x90, 0xb6, 0x41, 0x72, 0x8a, 0x2d,
0x9d, 0x54, 0x0b, 0x1e, 0xea, 0xc5, 0x16, 0xa1, 0xa7, 0x5e, 0x82, 0xa7, 0x16, 0x5a, 0x62, 0x32,
0x89, 0x03, 0x9a, 0x19, 0x33, 0xa3, 0xe0, 0xad, 0x9f, 0x63, 0xbf, 0xce, 0x5e, 0x3c, 0xee, 0x71,
0x8f, 0x6b, 0x3e, 0xc9, 0x92, 0x49, 0xb2, 0x1b, 0x58, 0x70, 0xb3, 0x7b, 0x9a, 0xc7, 0xbc, 0xdf,
0xff, 0xbd, 0xff, 0xbc, 0xbc, 0xc0, 0x59, 0x4c, 0xe5, 0x6a, 0xb7, 0xc4, 0x01, 0xdb, 0xb8, 0x01,
0x4b, 0xa4, 0x4f, 0x13, 0x92, 0x86, 0xf5, 0xd0, 0xe7, 0xd4, 0x15, 0x24, 0xdd, 0xd3, 0x80, 0x08,
0x37, 0xa4, 0x51, 0xe4, 0xee, 0x47, 0xea, 0xc4, 0x3c, 0x65, 0x92, 0xa1, 0x77, 0xf7, 0x2c, 0xae,
0x38, 0xac, 0xf2, 0xfb, 0x91, 0xf9, 0x26, 0x66, 0x31, 0x53, 0x9c, 0x9b, 0x47, 0x85, 0xc4, 0x9c,
0x34, 0x6a, 0x2a, 0x0f, 0x9c, 0x08, 0x77, 0xc3, 0x76, 0x89, 0x2c, 0x75, 0xd3, 0x27, 0xe8, 0x42,
0x22, 0x82, 0x94, 0x72, 0xc9, 0xd2, 0x42, 0x6c, 0x6f, 0x61, 0xff, 0x3b, 0xe7, 0xeb, 0x83, 0x47,
0xb6, 0x3b, 0x22, 0x24, 0xfa, 0x02, 0xf5, 0xdc, 0xa5, 0x01, 0x06, 0xc0, 0xe9, 0x8d, 0xdf, 0xe3,
0xda, 0x33, 0x54, 0x05, 0x3c, 0xbf, 0xab, 0xe0, 0x29, 0x12, 0xb9, 0xb0, 0xa3, 0xdc, 0x08, 0xe3,
0xc5, 0x40, 0x73, 0x7a, 0xe3, 0xb7, 0x0f, 0x35, 0xbf, 0xf2, 0xbc, 0x57, 0x62, 0xf6, 0x4f, 0xf8,
0xaa, 0x6c, 0x29, 0x38, 0x4b, 0x04, 0x41, 0x13, 0xf8, 0xd2, 0xe7, 0x7c, 0x4d, 0x49, 0xd8, 0xa8,
0x6d, 0x05, 0xdb, 0x17, 0x00, 0xf6, 0xe6, 0x34, 0x8a, 0x2a, 0xef, 0x9f, 0xa0, 0xbe, 0x26, 0x91,
0x34, 0xc0, 0x79, 0x1f, 0x0a, 0x42, 0x9f, 0x61, 0x3b, 0xa5, 0xf1, 0x4a, 0x3e, 0xe6, 0xba, 0xa0,
0xd0, 0x07, 0x08, 0x37, 0x24, 0xa4, 0xfe, 0xbf, 0x3c, 0x67, 0x68, 0x03, 0xe0, 0x74, 0xbd, 0xae,
0xba, 0x59, 0x1c, 0x38, 0x41, 0xaf, 0xa1, 0x96, 0x92, 0xc8, 0xd0, 0xd5, 0x7d, 0x1e, 0xda, 0x33,
0xd8, 0x2f, 0xbc, 0x95, 0x8f, 0xac, 0x06, 0xab, 0x35, 0x1d, 0xec, 0xf8, 0x12, 0x40, 0x3d, 0x2f,
0x81, 0xfe, 0xc2, 0xb6, 0x1a, 0x18, 0x1a, 0xe2, 0x33, 0x5b, 0x85, 0xeb, 0xdf, 0xd1, 0xfc, 0xd8,
0x04, 0x2d, 0xad, 0xfd, 0x29, 0xfb, 0x38, 0x67, 0x35, 0xb5, 0x49, 0x9b, 0xc3, 0x06, 0x64, 0x51,
0xfc, 0xc7, 0xe2, 0x78, 0xb2, 0x5a, 0xd7, 0x27, 0xab, 0xf5, 0x3f, 0xb3, 0xc0, 0x31, 0xb3, 0xc0,
0x55, 0x66, 0x81, 0x9b, 0xcc, 0x02, 0xbf, 0xbf, 0x3d, 0xeb, 0x27, 0x9b, 0xe6, 0xe7, 0xb2, 0xa3,
0xb6, 0xf7, 0xeb, 0x6d, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6e, 0x16, 0x1d, 0x04, 0xa9, 0x03, 0x00,
0x00,
}

View File

@ -3,8 +3,6 @@ syntax = "proto3";
package containerd.services.diff.v1;
import "gogoproto/gogo.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/timestamp.proto";
import "github.com/containerd/containerd/api/types/mount.proto";
import "github.com/containerd/containerd/api/types/descriptor.proto";

View File

@ -19,8 +19,9 @@
ContainerUpdate
ContainerDelete
ContentDelete
SubscribeRequest
PublishRequest
ForwardRequest
SubscribeRequest
Envelope
ImageCreate
ImageUpdate
@ -38,6 +39,7 @@
TaskExit
TaskOOM
TaskExecAdded
TaskExecStarted
TaskPaused
TaskResumed
TaskCheckpointed
@ -49,6 +51,9 @@ import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import google_protobuf1 "github.com/gogo/protobuf/types"
import _ "github.com/containerd/containerd/protobuf/plugin"
import github_com_containerd_containerd_typeurl "github.com/containerd/containerd/typeurl"
import strings "strings"
import reflect "reflect"
@ -113,6 +118,101 @@ func init() {
proto.RegisterType((*ContainerUpdate)(nil), "containerd.services.events.v1.ContainerUpdate")
proto.RegisterType((*ContainerDelete)(nil), "containerd.services.events.v1.ContainerDelete")
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *ContainerCreate) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "id":
return string(m.ID), len(m.ID) > 0
case "image":
return string(m.Image), len(m.Image) > 0
case "runtime":
// NOTE(stevvooe): This is probably not correct in many cases.
// We assume that the target message also implements the Field
// method, which isn't likely true in a lot of cases.
//
// If you have a broken build and have found this comment,
// you may be closer to a solution.
if m.Runtime == nil {
return "", false
}
return m.Runtime.Field(fieldpath[1:])
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *ContainerCreate_Runtime) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "name":
return string(m.Name), len(m.Name) > 0
case "options":
decoded, err := github_com_containerd_containerd_typeurl.UnmarshalAny(m.Options)
if err != nil {
return "", false
}
adaptor, ok := decoded.(interface {
Field([]string) (string, bool)
})
if !ok {
return "", false
}
return adaptor.Field(fieldpath[1:])
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *ContainerUpdate) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "id":
return string(m.ID), len(m.ID) > 0
case "image":
return string(m.Image), len(m.Image) > 0
case "labels":
// Labels fields have been special-cased by name. If this breaks,
// add better special casing to fieldpath plugin.
if len(m.Labels) == 0 {
return "", false
}
value, ok := m.Labels[strings.Join(fieldpath[1:], ".")]
return value, ok
case "rootfs":
return string(m.RootFS), len(m.RootFS) > 0
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *ContainerDelete) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "id":
return string(m.ID), len(m.ID) > 0
}
return "", false
}
func (m *ContainerCreate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@ -1131,31 +1231,32 @@ func init() {
}
var fileDescriptorContainer = []byte{
// 401 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0x41, 0x8b, 0xd4, 0x30,
0x18, 0xdd, 0xb4, 0x6b, 0x07, 0xd3, 0x83, 0x12, 0x06, 0xa9, 0x05, 0xbb, 0x43, 0x4f, 0xe3, 0x25,
0x65, 0x47, 0x10, 0x5d, 0x41, 0x70, 0x77, 0x55, 0x04, 0x05, 0x89, 0x08, 0xe2, 0x2d, 0x9d, 0x66,
0x6a, 0xb0, 0x4d, 0x4a, 0x9b, 0x16, 0x7a, 0xf3, 0xe7, 0xcd, 0xd1, 0xa3, 0xa7, 0x61, 0xa6, 0x3f,
0xc1, 0x5f, 0x20, 0x4d, 0x5a, 0xa7, 0x08, 0x8a, 0x7a, 0x7b, 0x5f, 0xbe, 0xf7, 0xbe, 0xef, 0xbd,
0x24, 0xf0, 0x65, 0xca, 0xd5, 0xa7, 0x3a, 0xc6, 0x6b, 0x99, 0x47, 0x6b, 0x29, 0x14, 0xe5, 0x82,
0x95, 0xc9, 0x14, 0xd2, 0x82, 0x47, 0x15, 0x2b, 0x1b, 0xbe, 0x66, 0x55, 0xc4, 0x1a, 0x26, 0x54,
0x15, 0x35, 0xe7, 0x47, 0x06, 0x2e, 0x4a, 0xa9, 0x24, 0xba, 0x77, 0x94, 0xe0, 0x91, 0x8e, 0x0d,
0x1d, 0x37, 0xe7, 0xfe, 0x3c, 0x95, 0xa9, 0xd4, 0xcc, 0xa8, 0x47, 0x46, 0xe4, 0xdf, 0x4d, 0xa5,
0x4c, 0x33, 0x16, 0xe9, 0x2a, 0xae, 0x37, 0x11, 0x15, 0xad, 0x69, 0x85, 0x7b, 0x00, 0x6f, 0x5d,
0x8d, 0x23, 0xaf, 0x4a, 0x46, 0x15, 0x43, 0x77, 0xa0, 0xc5, 0x13, 0x0f, 0x2c, 0xc0, 0xf2, 0xe6,
0xa5, 0xd3, 0xed, 0xce, 0xac, 0x57, 0xd7, 0xc4, 0xe2, 0x09, 0x9a, 0xc3, 0x1b, 0x3c, 0xa7, 0x29,
0xf3, 0xac, 0xbe, 0x45, 0x4c, 0x81, 0xde, 0xc2, 0x59, 0x59, 0x0b, 0xc5, 0x73, 0xe6, 0xd9, 0x0b,
0xb0, 0x74, 0x57, 0x0f, 0xf1, 0x1f, 0x3d, 0xe2, 0x5f, 0xd6, 0x61, 0x62, 0xd4, 0x64, 0x1c, 0xe3,
0xbf, 0x81, 0xb3, 0xe1, 0x0c, 0x21, 0x78, 0x2a, 0x68, 0xce, 0x8c, 0x19, 0xa2, 0x31, 0xc2, 0x70,
0x26, 0x0b, 0xc5, 0xa5, 0xa8, 0xb4, 0x11, 0x77, 0x35, 0xc7, 0x26, 0x1f, 0x1e, 0xf3, 0xe1, 0x67,
0xa2, 0x25, 0x23, 0x29, 0xfc, 0x3e, 0x8d, 0xf8, 0xbe, 0x48, 0xfe, 0x3d, 0x22, 0x81, 0x4e, 0x46,
0x63, 0x96, 0x55, 0x9e, 0xbd, 0xb0, 0x97, 0xee, 0xea, 0xe2, 0x6f, 0x13, 0x9a, 0x6d, 0xf8, 0xb5,
0x16, 0x3f, 0x17, 0xaa, 0x6c, 0xc9, 0x30, 0x09, 0x85, 0xd0, 0x29, 0xa5, 0x54, 0x9b, 0xca, 0x3b,
0xd5, 0x2e, 0x60, 0xb7, 0x3b, 0x73, 0x88, 0x94, 0xea, 0xc5, 0x3b, 0x32, 0x74, 0xfc, 0xc7, 0xd0,
0x9d, 0x48, 0xd1, 0x6d, 0x68, 0x7f, 0x66, 0xed, 0x70, 0x17, 0x3d, 0xec, 0xed, 0x36, 0x34, 0xab,
0x7f, 0xda, 0xd5, 0xc5, 0x85, 0xf5, 0x08, 0x84, 0xf7, 0x27, 0x99, 0xaf, 0x59, 0xc6, 0x7e, 0x9f,
0xf9, 0xf2, 0xc3, 0xf6, 0x10, 0x9c, 0x7c, 0x3b, 0x04, 0x27, 0x5f, 0xba, 0x00, 0x6c, 0xbb, 0x00,
0x7c, 0xed, 0x02, 0xb0, 0xef, 0x02, 0xf0, 0xf1, 0xe9, 0x7f, 0xfe, 0xda, 0x27, 0x06, 0xc5, 0x8e,
0x7e, 0x90, 0x07, 0x3f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x68, 0xeb, 0xf5, 0x3f, 0xfe, 0x02, 0x00,
0x00,
// 429 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0x4d, 0x8b, 0xd4, 0x40,
0x10, 0xdd, 0xce, 0xac, 0x19, 0xec, 0x39, 0x28, 0xcd, 0x20, 0x71, 0xc0, 0xec, 0x30, 0xa7, 0xf1,
0xd2, 0xcd, 0x8e, 0x20, 0xba, 0x82, 0xe8, 0xee, 0xaa, 0x08, 0x0a, 0xd2, 0xe2, 0x45, 0xbc, 0xf4,
0x4c, 0x6a, 0xb2, 0x8d, 0x49, 0x77, 0x48, 0x3a, 0x81, 0xdc, 0xfc, 0x29, 0xfe, 0x9c, 0x3d, 0x7a,
0xf4, 0xb4, 0xec, 0xe6, 0x27, 0x88, 0x3f, 0x40, 0xd2, 0x9d, 0xec, 0x06, 0xc1, 0xcf, 0xdb, 0xab,
0xd4, 0x7b, 0x55, 0xef, 0x55, 0x1a, 0xbf, 0x88, 0xa5, 0x39, 0x29, 0xd7, 0x74, 0xa3, 0x53, 0xb6,
0xd1, 0xca, 0x08, 0xa9, 0x20, 0x8f, 0x86, 0x50, 0x64, 0x92, 0x15, 0x90, 0x57, 0x72, 0x03, 0x05,
0x83, 0x0a, 0x94, 0x29, 0x58, 0xb5, 0x7f, 0xc5, 0xa0, 0x59, 0xae, 0x8d, 0x26, 0x77, 0xae, 0x24,
0xb4, 0xa7, 0x53, 0x47, 0xa7, 0xd5, 0xfe, 0x6c, 0x1a, 0xeb, 0x58, 0x5b, 0x26, 0x6b, 0x91, 0x13,
0xcd, 0x6e, 0xc7, 0x5a, 0xc7, 0x09, 0x30, 0x5b, 0xad, 0xcb, 0x2d, 0x13, 0xaa, 0xee, 0x5a, 0x4f,
0xfe, 0x68, 0xec, 0x52, 0x94, 0x25, 0x65, 0x2c, 0x15, 0xdb, 0x4a, 0x48, 0xa2, 0x4c, 0x98, 0x13,
0x37, 0x61, 0x71, 0x8e, 0xf0, 0x8d, 0xa3, 0x9e, 0x7e, 0x94, 0x83, 0x30, 0x40, 0x6e, 0x61, 0x4f,
0x46, 0x01, 0x9a, 0xa3, 0xe5, 0xf5, 0x43, 0xbf, 0x39, 0xdb, 0xf3, 0x5e, 0x1e, 0x73, 0x4f, 0x46,
0x64, 0x8a, 0xaf, 0xc9, 0x54, 0xc4, 0x10, 0x78, 0x6d, 0x8b, 0xbb, 0x82, 0xbc, 0xc1, 0xe3, 0xbc,
0x54, 0x46, 0xa6, 0x10, 0x8c, 0xe6, 0x68, 0x39, 0x59, 0xdd, 0xa7, 0xbf, 0x4d, 0x49, 0x7f, 0x5a,
0x47, 0xb9, 0x53, 0xf3, 0x7e, 0xcc, 0xec, 0x35, 0x1e, 0x77, 0xdf, 0x08, 0xc1, 0xbb, 0x4a, 0xa4,
0xe0, 0xcc, 0x70, 0x8b, 0x09, 0xc5, 0x63, 0x9d, 0x19, 0xa9, 0x55, 0x61, 0x8d, 0x4c, 0x56, 0x53,
0xea, 0x2e, 0x44, 0xfb, 0xb0, 0xf4, 0xa9, 0xaa, 0x79, 0x4f, 0x5a, 0x7c, 0x1b, 0x46, 0x7c, 0x97,
0x45, 0xff, 0x1e, 0x91, 0x63, 0x3f, 0x11, 0x6b, 0x48, 0x8a, 0x60, 0x34, 0x1f, 0x2d, 0x27, 0xab,
0x83, 0xbf, 0x4d, 0xe8, 0xb6, 0xd1, 0x57, 0x56, 0xfc, 0x4c, 0x99, 0xbc, 0xe6, 0xdd, 0x24, 0xb2,
0xc0, 0x7e, 0xae, 0xb5, 0xd9, 0x16, 0xc1, 0xae, 0x75, 0x81, 0x9b, 0xb3, 0x3d, 0x9f, 0x6b, 0x6d,
0x9e, 0xbf, 0xe5, 0x5d, 0x67, 0xf6, 0x10, 0x4f, 0x06, 0x52, 0x72, 0x13, 0x8f, 0x3e, 0x42, 0xdd,
0xdd, 0xa2, 0x85, 0xad, 0xdd, 0x4a, 0x24, 0xe5, 0xa5, 0x5d, 0x5b, 0x1c, 0x78, 0x0f, 0xd0, 0xe2,
0xee, 0x20, 0xf3, 0x31, 0x24, 0xf0, 0xeb, 0xcc, 0x87, 0x1f, 0x4e, 0x2f, 0xc2, 0x9d, 0xaf, 0x17,
0xe1, 0xce, 0xa7, 0x26, 0x44, 0xa7, 0x4d, 0x88, 0xbe, 0x34, 0x21, 0x3a, 0x6f, 0x42, 0xf4, 0xf9,
0x7b, 0x88, 0xde, 0x3f, 0xfe, 0xcf, 0xb7, 0xff, 0xc8, 0xa1, 0xb5, 0x6f, 0x7f, 0xca, 0xbd, 0x1f,
0x01, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xae, 0x41, 0x3c, 0x44, 0x03, 0x00, 0x00,
}

View File

@ -4,8 +4,10 @@ package containerd.services.events.v1;
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
option (containerd.plugin.fieldpath_all) = true;
message ContainerCreate {
string id = 1;

View File

@ -8,6 +8,7 @@ import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import _ "github.com/containerd/containerd/protobuf/plugin"
import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
@ -32,6 +33,20 @@ func (*ContentDelete) Descriptor() ([]byte, []int) { return fileDescriptorConten
func init() {
proto.RegisterType((*ContentDelete)(nil), "containerd.services.events.v1.ContentDelete")
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *ContentDelete) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "digest":
return string(m.Digest), len(m.Digest) > 0
}
return "", false
}
func (m *ContentDelete) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@ -313,19 +328,21 @@ func init() {
}
var fileDescriptorContent = []byte{
// 210 bytes of a gzipped FileDescriptorProto
// 242 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4d, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0x82, 0x55, 0xa4, 0xe6, 0x95, 0xe8, 0x15,
0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x34, 0xe8, 0xc1, 0x14, 0xeb, 0x41, 0x14, 0xeb, 0x95,
0x19, 0x4a, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x55, 0xea, 0x83, 0x58, 0x10, 0x4d, 0x4a, 0xd1,
0x5c, 0xbc, 0xce, 0x10, 0x53, 0x5c, 0x52, 0x73, 0x52, 0x4b, 0x52, 0x85, 0xbc, 0xb8, 0xd8, 0x52,
0x32, 0xd3, 0x53, 0x8b, 0x4b, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x9d, 0x8c, 0x4e, 0xdc, 0x93,
0x67, 0xb8, 0x75, 0x4f, 0x5e, 0x0b, 0xc9, 0x91, 0xf9, 0x05, 0xa9, 0x79, 0x70, 0xcb, 0x8a, 0xf5,
0xd3, 0xf3, 0x75, 0x21, 0x5a, 0xf4, 0x5c, 0xc0, 0x54, 0x10, 0xd4, 0x04, 0xa7, 0x88, 0x13, 0x0f,
0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85,
0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x46, 0xd9, 0x91, 0xe9, 0x65, 0x6b, 0x08, 0x2b, 0x89,
0x0d, 0xec, 0x7a, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x41, 0x46, 0x06, 0x3b, 0x01,
0x19, 0x4a, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x55, 0xea, 0x83, 0x58, 0x10, 0x4d, 0x52, 0x0e,
0x04, 0xed, 0x06, 0xab, 0x4b, 0x2a, 0x4d, 0xd3, 0x2f, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0xd3, 0x4f,
0xcb, 0x4c, 0xcd, 0x49, 0x29, 0x48, 0x2c, 0xc9, 0x80, 0x98, 0xa0, 0x14, 0xcd, 0xc5, 0xeb, 0x0c,
0x71, 0x87, 0x4b, 0x6a, 0x4e, 0x6a, 0x49, 0xaa, 0x90, 0x17, 0x17, 0x5b, 0x4a, 0x66, 0x7a, 0x6a,
0x71, 0x89, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x93, 0xd1, 0x89, 0x7b, 0xf2, 0x0c, 0xb7, 0xee,
0xc9, 0x6b, 0x21, 0x59, 0x95, 0x5f, 0x90, 0x9a, 0x07, 0xb7, 0xa3, 0x58, 0x3f, 0x3d, 0x5f, 0x17,
0xa2, 0x45, 0xcf, 0x05, 0x4c, 0x05, 0x41, 0x4d, 0x70, 0x8a, 0x39, 0xf1, 0x50, 0x8e, 0xe1, 0xc6,
0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8,
0xe0, 0x91, 0x1c, 0xe3, 0x82, 0x2f, 0x72, 0x8c, 0x51, 0x76, 0x64, 0x06, 0x9c, 0x35, 0x84, 0x95,
0xc4, 0x06, 0xf6, 0x81, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x51, 0xce, 0xec, 0x89, 0x81, 0x01,
0x00, 0x00,
}

View File

@ -3,8 +3,10 @@ syntax = "proto3";
package containerd.services.events.v1;
import "gogoproto/gogo.proto";
import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
option (containerd.plugin.fieldpath_all) = true;
message ContentDelete {
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];

View File

@ -7,6 +7,7 @@ package events
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/containerd/containerd/protobuf/plugin"
import _ "github.com/gogo/protobuf/gogoproto"
import google_protobuf1 "github.com/gogo/protobuf/types"
import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
@ -14,6 +15,8 @@ import _ "github.com/gogo/protobuf/types"
import time "time"
import github_com_containerd_containerd_typeurl "github.com/containerd/containerd/typeurl"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
@ -32,21 +35,30 @@ var _ = fmt.Errorf
var _ = math.Inf
var _ = time.Kitchen
type PublishRequest struct {
Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"`
Event *google_protobuf1.Any `protobuf:"bytes,2,opt,name=event" json:"event,omitempty"`
}
func (m *PublishRequest) Reset() { *m = PublishRequest{} }
func (*PublishRequest) ProtoMessage() {}
func (*PublishRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{0} }
type ForwardRequest struct {
Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope" json:"envelope,omitempty"`
}
func (m *ForwardRequest) Reset() { *m = ForwardRequest{} }
func (*ForwardRequest) ProtoMessage() {}
func (*ForwardRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{1} }
type SubscribeRequest struct {
Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
}
func (m *SubscribeRequest) Reset() { *m = SubscribeRequest{} }
func (*SubscribeRequest) ProtoMessage() {}
func (*SubscribeRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{0} }
type PublishRequest struct {
Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope" json:"envelope,omitempty"`
}
func (m *PublishRequest) Reset() { *m = PublishRequest{} }
func (*PublishRequest) ProtoMessage() {}
func (*PublishRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{1} }
func (*SubscribeRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{2} }
type Envelope struct {
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,stdtime" json:"timestamp"`
@ -57,14 +69,45 @@ type Envelope struct {
func (m *Envelope) Reset() { *m = Envelope{} }
func (*Envelope) ProtoMessage() {}
func (*Envelope) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{2} }
func (*Envelope) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{3} }
func init() {
proto.RegisterType((*SubscribeRequest)(nil), "containerd.services.events.v1.SubscribeRequest")
proto.RegisterType((*PublishRequest)(nil), "containerd.services.events.v1.PublishRequest")
proto.RegisterType((*ForwardRequest)(nil), "containerd.services.events.v1.ForwardRequest")
proto.RegisterType((*SubscribeRequest)(nil), "containerd.services.events.v1.SubscribeRequest")
proto.RegisterType((*Envelope)(nil), "containerd.services.events.v1.Envelope")
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *Envelope) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
// unhandled: timestamp
case "namespace":
return string(m.Namespace), len(m.Namespace) > 0
case "topic":
return string(m.Topic), len(m.Topic) > 0
case "event":
decoded, err := github_com_containerd_containerd_typeurl.UnmarshalAny(m.Event)
if err != nil {
return "", false
}
adaptor, ok := decoded.(interface {
Field([]string) (string, bool)
})
if !ok {
return "", false
}
return adaptor.Field(fieldpath[1:])
}
return "", false
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
@ -76,7 +119,24 @@ const _ = grpc.SupportPackageIsVersion4
// Client API for Events service
type EventsClient interface {
// Publish an event to a topic.
//
// The event will be packed into a timestamp envelope with the namespace
// introspected from the context. The envelope will then be dispatched.
Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
// Forward sends an event that has already been packaged into an envelope
// with a timestamp and namespace.
//
// This is useful if earlier timestamping is required or when fowarding on
// behalf of another component, namespace or publisher.
Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
// Subscribe to a stream of events, possibly returning only that match any
// of the provided filters.
//
// Unlike many other methods in containerd, subscribers will get messages
// from all namespaces unless otherwise specified. If this is not desired,
// a filter can be provided in the format 'namespace==<namespace>' to
// restrict the received events.
Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error)
}
@ -97,6 +157,15 @@ func (c *eventsClient) Publish(ctx context.Context, in *PublishRequest, opts ...
return out, nil
}
func (c *eventsClient) Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
out := new(google_protobuf2.Empty)
err := grpc.Invoke(ctx, "/containerd.services.events.v1.Events/Forward", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *eventsClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error) {
stream, err := grpc.NewClientStream(ctx, &_Events_serviceDesc.Streams[0], c.cc, "/containerd.services.events.v1.Events/Subscribe", opts...)
if err != nil {
@ -132,7 +201,24 @@ func (x *eventsSubscribeClient) Recv() (*Envelope, error) {
// Server API for Events service
type EventsServer interface {
// Publish an event to a topic.
//
// The event will be packed into a timestamp envelope with the namespace
// introspected from the context. The envelope will then be dispatched.
Publish(context.Context, *PublishRequest) (*google_protobuf2.Empty, error)
// Forward sends an event that has already been packaged into an envelope
// with a timestamp and namespace.
//
// This is useful if earlier timestamping is required or when fowarding on
// behalf of another component, namespace or publisher.
Forward(context.Context, *ForwardRequest) (*google_protobuf2.Empty, error)
// Subscribe to a stream of events, possibly returning only that match any
// of the provided filters.
//
// Unlike many other methods in containerd, subscribers will get messages
// from all namespaces unless otherwise specified. If this is not desired,
// a filter can be provided in the format 'namespace==<namespace>' to
// restrict the received events.
Subscribe(*SubscribeRequest, Events_SubscribeServer) error
}
@ -158,6 +244,24 @@ func _Events_Publish_Handler(srv interface{}, ctx context.Context, dec func(inte
return interceptor(ctx, in, info, handler)
}
func _Events_Forward_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ForwardRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(EventsServer).Forward(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/containerd.services.events.v1.Events/Forward",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EventsServer).Forward(ctx, req.(*ForwardRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Events_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(SubscribeRequest)
if err := stream.RecvMsg(m); err != nil {
@ -187,6 +291,10 @@ var _Events_serviceDesc = grpc.ServiceDesc{
MethodName: "Publish",
Handler: _Events_Publish_Handler,
},
{
MethodName: "Forward",
Handler: _Events_Forward_Handler,
},
},
Streams: []grpc.StreamDesc{
{
@ -198,6 +306,68 @@ var _Events_serviceDesc = grpc.ServiceDesc{
Metadata: "github.com/containerd/containerd/api/services/events/v1/events.proto",
}
func (m *PublishRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PublishRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Topic) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintEvents(dAtA, i, uint64(len(m.Topic)))
i += copy(dAtA[i:], m.Topic)
}
if m.Event != nil {
dAtA[i] = 0x12
i++
i = encodeVarintEvents(dAtA, i, uint64(m.Event.Size()))
n1, err := m.Event.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
}
return i, nil
}
func (m *ForwardRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ForwardRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Envelope != nil {
dAtA[i] = 0xa
i++
i = encodeVarintEvents(dAtA, i, uint64(m.Envelope.Size()))
n2, err := m.Envelope.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n2
}
return i, nil
}
func (m *SubscribeRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@ -231,34 +401,6 @@ func (m *SubscribeRequest) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
func (m *PublishRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PublishRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Envelope != nil {
dAtA[i] = 0xa
i++
i = encodeVarintEvents(dAtA, i, uint64(m.Envelope.Size()))
n1, err := m.Envelope.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
}
return i, nil
}
func (m *Envelope) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@ -277,11 +419,11 @@ func (m *Envelope) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintEvents(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
if err != nil {
return 0, err
}
i += n2
i += n3
if len(m.Namespace) > 0 {
dAtA[i] = 0x12
i++
@ -298,11 +440,11 @@ func (m *Envelope) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x22
i++
i = encodeVarintEvents(dAtA, i, uint64(m.Event.Size()))
n3, err := m.Event.MarshalTo(dAtA[i:])
n4, err := m.Event.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n3
i += n4
}
return i, nil
}
@ -334,6 +476,30 @@ func encodeVarintEvents(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *PublishRequest) Size() (n int) {
var l int
_ = l
l = len(m.Topic)
if l > 0 {
n += 1 + l + sovEvents(uint64(l))
}
if m.Event != nil {
l = m.Event.Size()
n += 1 + l + sovEvents(uint64(l))
}
return n
}
func (m *ForwardRequest) Size() (n int) {
var l int
_ = l
if m.Envelope != nil {
l = m.Envelope.Size()
n += 1 + l + sovEvents(uint64(l))
}
return n
}
func (m *SubscribeRequest) Size() (n int) {
var l int
_ = l
@ -346,16 +512,6 @@ func (m *SubscribeRequest) Size() (n int) {
return n
}
func (m *PublishRequest) Size() (n int) {
var l int
_ = l
if m.Envelope != nil {
l = m.Envelope.Size()
n += 1 + l + sovEvents(uint64(l))
}
return n
}
func (m *Envelope) Size() (n int) {
var l int
_ = l
@ -389,6 +545,27 @@ func sovEvents(x uint64) (n int) {
func sozEvents(x uint64) (n int) {
return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *PublishRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&PublishRequest{`,
`Topic:` + fmt.Sprintf("%v", this.Topic) + `,`,
`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "google_protobuf1.Any", 1) + `,`,
`}`,
}, "")
return s
}
func (this *ForwardRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ForwardRequest{`,
`Envelope:` + strings.Replace(fmt.Sprintf("%v", this.Envelope), "Envelope", "Envelope", 1) + `,`,
`}`,
}, "")
return s
}
func (this *SubscribeRequest) String() string {
if this == nil {
return "nil"
@ -399,16 +576,6 @@ func (this *SubscribeRequest) String() string {
}, "")
return s
}
func (this *PublishRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&PublishRequest{`,
`Envelope:` + strings.Replace(fmt.Sprintf("%v", this.Envelope), "Envelope", "Envelope", 1) + `,`,
`}`,
}, "")
return s
}
func (this *Envelope) String() string {
if this == nil {
return "nil"
@ -430,6 +597,201 @@ func valueToStringEvents(v interface{}) string {
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *PublishRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvents
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PublishRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PublishRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvents
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvents
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Topic = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvents
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthEvents
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Event == nil {
m.Event = &google_protobuf1.Any{}
}
if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipEvents(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthEvents
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ForwardRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvents
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ForwardRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ForwardRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Envelope", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvents
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthEvents
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Envelope == nil {
m.Envelope = &Envelope{}
}
if err := m.Envelope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipEvents(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthEvents
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SubscribeRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@ -509,89 +871,6 @@ func (m *SubscribeRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *PublishRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvents
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PublishRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PublishRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Envelope", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvents
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthEvents
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Envelope == nil {
m.Envelope = &Envelope{}
}
if err := m.Envelope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipEvents(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthEvents
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Envelope) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@ -873,31 +1152,34 @@ func init() {
}
var fileDescriptorEvents = []byte{
// 407 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xcd, 0x6e, 0xd3, 0x40,
0x10, 0xc7, 0xb3, 0x84, 0x7c, 0x78, 0x91, 0x10, 0x5a, 0x45, 0xc8, 0x18, 0x70, 0xa2, 0x5c, 0x88,
0x10, 0xec, 0x92, 0x70, 0x44, 0x42, 0x22, 0x90, 0x7b, 0x64, 0x40, 0x42, 0xdc, 0x6c, 0x77, 0xe2,
0xac, 0x64, 0x7b, 0x5d, 0xef, 0xda, 0x52, 0x6e, 0x7d, 0x84, 0x3e, 0x49, 0x5f, 0xa2, 0x97, 0x1c,
0x7b, 0xec, 0xa9, 0x6d, 0xfc, 0x24, 0x55, 0xfc, 0x91, 0xb4, 0x89, 0xd4, 0x54, 0xbd, 0xcd, 0xec,
0xff, 0x37, 0x3b, 0xfb, 0x9f, 0x59, 0xfc, 0xcb, 0xe3, 0x6a, 0x9e, 0x38, 0xd4, 0x15, 0x01, 0x73,
0x45, 0xa8, 0x6c, 0x1e, 0x42, 0x7c, 0x74, 0x37, 0xb4, 0x23, 0xce, 0x24, 0xc4, 0x29, 0x77, 0x41,
0x32, 0x48, 0x21, 0x54, 0x92, 0xa5, 0xc3, 0x32, 0xa2, 0x51, 0x2c, 0x94, 0x20, 0xef, 0xb7, 0x3c,
0xad, 0x58, 0x5a, 0x12, 0xe9, 0xd0, 0xe8, 0x78, 0xc2, 0x13, 0x39, 0xc9, 0xd6, 0x51, 0x51, 0x64,
0xbc, 0xf1, 0x84, 0xf0, 0x7c, 0x60, 0x79, 0xe6, 0x24, 0x33, 0x66, 0x87, 0x8b, 0x52, 0x7a, 0xbb,
0x2b, 0x41, 0x10, 0xa9, 0x4a, 0xec, 0xee, 0x8a, 0x8a, 0x07, 0x20, 0x95, 0x1d, 0x44, 0x05, 0xd0,
0xff, 0x84, 0x5f, 0xfd, 0x4e, 0x1c, 0xe9, 0xc6, 0xdc, 0x01, 0x0b, 0x8e, 0x13, 0x90, 0x8a, 0xe8,
0xb8, 0x35, 0xe3, 0xbe, 0x82, 0x58, 0xea, 0xa8, 0x57, 0x1f, 0x68, 0x56, 0x95, 0xf6, 0xff, 0xe2,
0x97, 0xd3, 0xc4, 0xf1, 0xb9, 0x9c, 0x57, 0xec, 0x4f, 0xdc, 0x86, 0x30, 0x05, 0x5f, 0x44, 0xa0,
0xa3, 0x1e, 0x1a, 0xbc, 0x18, 0x7d, 0xa0, 0x0f, 0x1a, 0xa4, 0x93, 0x12, 0xb7, 0x36, 0x85, 0xfd,
0x33, 0x84, 0xdb, 0xd5, 0x31, 0x19, 0x63, 0x6d, 0xf3, 0xc8, 0xf2, 0x4a, 0x83, 0x16, 0x36, 0x68,
0x65, 0x83, 0xfe, 0xa9, 0x88, 0x71, 0x7b, 0x79, 0xd5, 0xad, 0x9d, 0x5e, 0x77, 0x91, 0xb5, 0x2d,
0x23, 0xef, 0xb0, 0x16, 0xda, 0x01, 0xc8, 0xc8, 0x76, 0x41, 0x7f, 0xd6, 0x43, 0x03, 0xcd, 0xda,
0x1e, 0x90, 0x0e, 0x6e, 0x28, 0x11, 0x71, 0x57, 0xaf, 0xe7, 0x4a, 0x91, 0x90, 0x8f, 0xb8, 0x91,
0x3f, 0x52, 0x7f, 0x9e, 0xf7, 0xec, 0xec, 0xf5, 0xfc, 0x11, 0x2e, 0xac, 0x02, 0x19, 0x9d, 0x23,
0xdc, 0x9c, 0xe4, 0x8e, 0xc8, 0x14, 0xb7, 0xca, 0x91, 0x90, 0xcf, 0x07, 0x9c, 0xdf, 0x1f, 0x9d,
0xf1, 0x7a, 0xaf, 0xc3, 0x64, 0xbd, 0x39, 0xe2, 0x61, 0x6d, 0xb3, 0x12, 0xc2, 0x0e, 0xdc, 0xb9,
0xbb, 0x3c, 0xe3, 0xb1, 0xe3, 0xff, 0x82, 0xc6, 0xff, 0x96, 0x2b, 0xb3, 0x76, 0xb9, 0x32, 0x6b,
0x27, 0x99, 0x89, 0x96, 0x99, 0x89, 0x2e, 0x32, 0x13, 0xdd, 0x64, 0x26, 0xfa, 0xff, 0xfd, 0x89,
0x3f, 0xfd, 0x5b, 0x11, 0x39, 0xcd, 0xdc, 0xd2, 0xd7, 0xdb, 0x00, 0x00, 0x00, 0xff, 0xff, 0x13,
0x35, 0xd0, 0x60, 0x32, 0x03, 0x00, 0x00,
// 462 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcd, 0x8e, 0xd3, 0x30,
0x14, 0x85, 0xeb, 0xf9, 0x6d, 0x3c, 0xd2, 0x08, 0x45, 0x15, 0x2a, 0x01, 0xd2, 0xaa, 0x1b, 0x2a,
0x04, 0x0e, 0x53, 0x76, 0x20, 0x21, 0x28, 0x94, 0xf5, 0x28, 0x80, 0x84, 0xd8, 0x25, 0xe9, 0x6d,
0x6a, 0x29, 0xb1, 0x4d, 0xec, 0x04, 0xcd, 0x6e, 0x1e, 0x81, 0x0d, 0x6f, 0xc2, 0x86, 0x37, 0xe8,
0x92, 0x25, 0x2b, 0x60, 0xfa, 0x24, 0xa8, 0x89, 0xdd, 0x30, 0x2d, 0x10, 0x34, 0xbb, 0x6b, 0xdf,
0xe3, 0xcf, 0xb9, 0xe7, 0x38, 0xf8, 0x45, 0x4c, 0xd5, 0x3c, 0x0f, 0x49, 0xc4, 0x53, 0x2f, 0xe2,
0x4c, 0x05, 0x94, 0x41, 0x36, 0xfd, 0xbd, 0x0c, 0x04, 0xf5, 0x24, 0x64, 0x05, 0x8d, 0x40, 0x7a,
0x50, 0x00, 0x53, 0xd2, 0x2b, 0x4e, 0x74, 0x45, 0x44, 0xc6, 0x15, 0xb7, 0x6f, 0xd7, 0x7a, 0x62,
0xb4, 0x44, 0x2b, 0x8a, 0x13, 0xe7, 0x69, 0xe3, 0x25, 0x25, 0x26, 0xcc, 0x67, 0x9e, 0x48, 0xf2,
0x98, 0x32, 0x6f, 0x46, 0x21, 0x99, 0x8a, 0x40, 0xcd, 0xab, 0x0b, 0x9c, 0x4e, 0xcc, 0x63, 0x5e,
0x96, 0xde, 0xaa, 0xd2, 0xbb, 0x37, 0x62, 0xce, 0xe3, 0x04, 0xea, 0xd3, 0x01, 0x3b, 0xd3, 0xad,
0x9b, 0x9b, 0x2d, 0x48, 0x85, 0x32, 0xcd, 0xde, 0x66, 0x53, 0xd1, 0x14, 0xa4, 0x0a, 0x52, 0x51,
0x09, 0x06, 0x3e, 0x3e, 0x3e, 0xcd, 0xc3, 0x84, 0xca, 0xb9, 0x0f, 0xef, 0x73, 0x90, 0xca, 0xee,
0xe0, 0x7d, 0xc5, 0x05, 0x8d, 0xba, 0xa8, 0x8f, 0x86, 0x96, 0x5f, 0x2d, 0xec, 0xbb, 0x78, 0xbf,
0x9c, 0xb2, 0xbb, 0xd3, 0x47, 0xc3, 0xa3, 0x51, 0x87, 0x54, 0x60, 0x62, 0xc0, 0xe4, 0x19, 0x3b,
0xf3, 0x2b, 0xc9, 0xe0, 0x0d, 0x3e, 0x7e, 0xc9, 0xb3, 0x0f, 0x41, 0x36, 0x35, 0xcc, 0xe7, 0xb8,
0x0d, 0xac, 0x80, 0x84, 0x0b, 0x28, 0xb1, 0x47, 0xa3, 0x3b, 0xe4, 0x9f, 0x46, 0x92, 0x89, 0x96,
0xfb, 0xeb, 0x83, 0x83, 0x7b, 0xf8, 0xda, 0xab, 0x3c, 0x94, 0x51, 0x46, 0x43, 0x30, 0xe0, 0x2e,
0x3e, 0x9c, 0xd1, 0x44, 0x41, 0x26, 0xbb, 0xa8, 0xbf, 0x3b, 0xb4, 0x7c, 0xb3, 0x1c, 0x7c, 0x46,
0xb8, 0x6d, 0x20, 0xf6, 0x18, 0x5b, 0xeb, 0xc1, 0xf5, 0x07, 0x38, 0x5b, 0x13, 0xbc, 0x36, 0x8a,
0x71, 0x7b, 0xf1, 0xbd, 0xd7, 0xfa, 0xf8, 0xa3, 0x87, 0xfc, 0xfa, 0x98, 0x7d, 0x0b, 0x5b, 0x2c,
0x48, 0x41, 0x8a, 0x20, 0x82, 0xd2, 0x05, 0xcb, 0xaf, 0x37, 0x6a, 0xd7, 0x76, 0xff, 0xe8, 0xda,
0x5e, 0xa3, 0x6b, 0x8f, 0xf6, 0xce, 0xbf, 0xf4, 0xd0, 0xe8, 0xd3, 0x0e, 0x3e, 0x98, 0x94, 0x2e,
0xd8, 0xa7, 0xf8, 0x50, 0x47, 0x63, 0xdf, 0x6f, 0x70, 0xeb, 0x72, 0x84, 0xce, 0xf5, 0xad, 0x7b,
0x26, 0xab, 0x37, 0xb1, 0x22, 0xea, 0x60, 0x1a, 0x89, 0x97, 0x03, 0xfc, 0x2b, 0x31, 0xc6, 0xd6,
0x3a, 0x13, 0xdb, 0x6b, 0x60, 0x6e, 0xa6, 0xe7, 0xfc, 0xef, 0x23, 0x78, 0x80, 0xc6, 0x6f, 0x17,
0x17, 0x6e, 0xeb, 0xdb, 0x85, 0xdb, 0x3a, 0x5f, 0xba, 0x68, 0xb1, 0x74, 0xd1, 0xd7, 0xa5, 0x8b,
0x7e, 0x2e, 0x5d, 0xf4, 0xee, 0xc9, 0x15, 0xff, 0xeb, 0xc7, 0x55, 0x15, 0x1e, 0x94, 0x23, 0x3d,
0xfc, 0x15, 0x00, 0x00, 0xff, 0xff, 0x1c, 0x38, 0x37, 0x72, 0x20, 0x04, 0x00, 0x00,
}

View File

@ -2,6 +2,7 @@ syntax = "proto3";
package containerd.services.events.v1;
import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
import "google/protobuf/empty.proto";
@ -10,19 +11,44 @@ import "google/protobuf/timestamp.proto";
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
service Events {
// Publish an event to a topic.
//
// The event will be packed into a timestamp envelope with the namespace
// introspected from the context. The envelope will then be dispatched.
rpc Publish(PublishRequest) returns (google.protobuf.Empty);
// Forward sends an event that has already been packaged into an envelope
// with a timestamp and namespace.
//
// This is useful if earlier timestamping is required or when fowarding on
// behalf of another component, namespace or publisher.
rpc Forward(ForwardRequest) returns (google.protobuf.Empty);
// Subscribe to a stream of events, possibly returning only that match any
// of the provided filters.
//
// Unlike many other methods in containerd, subscribers will get messages
// from all namespaces unless otherwise specified. If this is not desired,
// a filter can be provided in the format 'namespace==<namespace>' to
// restrict the received events.
rpc Subscribe(SubscribeRequest) returns (stream Envelope);
}
message PublishRequest {
string topic = 1;
google.protobuf.Any event = 2;
}
message ForwardRequest {
Envelope envelope = 1;
}
message SubscribeRequest {
repeated string filters = 1;
}
message PublishRequest {
Envelope envelope = 1;
}
message Envelope {
option (containerd.plugin.fieldpath) = true;
google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
string namespace = 2;
string topic = 3;

View File

@ -7,6 +7,7 @@ package events
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/containerd/containerd/protobuf/plugin"
import strings "strings"
import reflect "reflect"
@ -50,6 +51,64 @@ func init() {
proto.RegisterType((*ImageUpdate)(nil), "containerd.services.images.v1.ImageUpdate")
proto.RegisterType((*ImageDelete)(nil), "containerd.services.images.v1.ImageDelete")
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *ImageCreate) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "name":
return string(m.Name), len(m.Name) > 0
case "labels":
// Labels fields have been special-cased by name. If this breaks,
// add better special casing to fieldpath plugin.
if len(m.Labels) == 0 {
return "", false
}
value, ok := m.Labels[strings.Join(fieldpath[1:], ".")]
return value, ok
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *ImageUpdate) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "name":
return string(m.Name), len(m.Name) > 0
case "labels":
// Labels fields have been special-cased by name. If this breaks,
// add better special casing to fieldpath plugin.
if len(m.Labels) == 0 {
return "", false
}
value, ok := m.Labels[strings.Join(fieldpath[1:], ".")]
return value, ok
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *ImageDelete) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "name":
return string(m.Name), len(m.Name) > 0
}
return "", false
}
func (m *ImageCreate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@ -881,22 +940,24 @@ func init() {
}
var fileDescriptorImage = []byte{
// 263 bytes of a gzipped FileDescriptorProto
// 296 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4e, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0x67, 0xe6, 0x26, 0xa6, 0xa7, 0xea,
0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x94, 0xeb, 0xc1, 0x94, 0xea, 0x81, 0x15, 0x14,
0xeb, 0x95, 0x19, 0x2a, 0xad, 0x61, 0xe4, 0xe2, 0xf6, 0x04, 0xf1, 0x9c, 0x8b, 0x52, 0x13, 0x4b,
0x52, 0x85, 0x84, 0xb8, 0x58, 0xf2, 0x12, 0x73, 0x53, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83,
0xc0, 0x6c, 0x21, 0x3f, 0x2e, 0xb6, 0x9c, 0xc4, 0xa4, 0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66,
0x0d, 0x6e, 0x23, 0x33, 0x3d, 0xbc, 0x66, 0xea, 0x21, 0x99, 0xa7, 0xe7, 0x03, 0xd6, 0xe8, 0x9a,
0x57, 0x52, 0x54, 0x19, 0x04, 0x35, 0x45, 0xca, 0x92, 0x8b, 0x1b, 0x49, 0x58, 0x48, 0x80, 0x8b,
0x39, 0x3b, 0xb5, 0x12, 0x6a, 0x23, 0x88, 0x29, 0x24, 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a,
0x2a, 0xc1, 0x04, 0x16, 0x83, 0x70, 0xac, 0x98, 0x2c, 0x18, 0x11, 0xce, 0x0d, 0x2d, 0x48, 0xa1,
0xaa, 0x73, 0x21, 0xe6, 0x51, 0xdb, 0xb9, 0x8a, 0x50, 0xd7, 0xba, 0xa4, 0xe6, 0xa4, 0x62, 0x77,
0xad, 0x53, 0xc4, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x34, 0x3c, 0x92, 0x63, 0x3c,
0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0xa3, 0xec, 0xc8, 0x8c,
0x7e, 0x6b, 0x08, 0x2b, 0x89, 0x0d, 0x9c, 0x00, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x13,
0x7c, 0x2c, 0x4a, 0x47, 0x02, 0x00, 0x00,
0xeb, 0x95, 0x19, 0x4a, 0x39, 0x10, 0xb4, 0x03, 0x6c, 0x4c, 0x52, 0x69, 0x9a, 0x7e, 0x41, 0x4e,
0x69, 0x7a, 0x66, 0x9e, 0x7e, 0x5a, 0x66, 0x6a, 0x4e, 0x4a, 0x41, 0x62, 0x49, 0x06, 0xc4, 0x02,
0xa5, 0x35, 0x8c, 0x5c, 0xdc, 0x9e, 0x20, 0xf3, 0x9c, 0x8b, 0x52, 0x13, 0x4b, 0x52, 0x85, 0x84,
0xb8, 0x58, 0xf2, 0x12, 0x73, 0x53, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0x21,
0x3f, 0x2e, 0xb6, 0x9c, 0xc4, 0xa4, 0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23,
0x33, 0x3d, 0xbc, 0xae, 0xd2, 0x43, 0x32, 0x4f, 0xcf, 0x07, 0xac, 0xd1, 0x35, 0xaf, 0xa4, 0xa8,
0x32, 0x08, 0x6a, 0x8a, 0x94, 0x25, 0x17, 0x37, 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a,
0x25, 0xd4, 0x46, 0x10, 0x53, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09,
0x2c, 0x06, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x22, 0x9c, 0x1b, 0x5a, 0x90, 0x42, 0x55, 0xe7, 0x42,
0xcc, 0xa3, 0xb6, 0x73, 0x15, 0xa1, 0xae, 0x75, 0x49, 0xcd, 0x49, 0xc5, 0xee, 0x5a, 0xa7, 0x98,
0x13, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c,
0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x2e, 0xf8, 0x22, 0xc7, 0x18, 0x65, 0x47,
0x66, 0x22, 0xb2, 0x86, 0xb0, 0x92, 0xd8, 0xc0, 0xb1, 0x6c, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff,
0x44, 0x99, 0x59, 0x31, 0x8d, 0x02, 0x00, 0x00,
}

View File

@ -2,7 +2,10 @@ syntax = "proto3";
package containerd.services.images.v1;
import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
option (containerd.plugin.fieldpath_all) = true;
message ImageCreate {
string name = 1;

View File

@ -8,6 +8,7 @@ import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import _ "github.com/containerd/containerd/protobuf/plugin"
import strings "strings"
import reflect "reflect"
@ -51,6 +52,64 @@ func init() {
proto.RegisterType((*NamespaceUpdate)(nil), "containerd.services.events.v1.NamespaceUpdate")
proto.RegisterType((*NamespaceDelete)(nil), "containerd.services.events.v1.NamespaceDelete")
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *NamespaceCreate) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "name":
return string(m.Name), len(m.Name) > 0
case "labels":
// Labels fields have been special-cased by name. If this breaks,
// add better special casing to fieldpath plugin.
if len(m.Labels) == 0 {
return "", false
}
value, ok := m.Labels[strings.Join(fieldpath[1:], ".")]
return value, ok
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *NamespaceUpdate) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "name":
return string(m.Name), len(m.Name) > 0
case "labels":
// Labels fields have been special-cased by name. If this breaks,
// add better special casing to fieldpath plugin.
if len(m.Labels) == 0 {
return "", false
}
value, ok := m.Labels[strings.Join(fieldpath[1:], ".")]
return value, ok
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *NamespaceDelete) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "name":
return string(m.Name), len(m.Name) > 0
}
return "", false
}
func (m *NamespaceCreate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@ -882,23 +941,25 @@ func init() {
}
var fileDescriptorNamespace = []byte{
// 277 bytes of a gzipped FileDescriptorProto
// 310 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4f, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0xe7, 0x25, 0xe6, 0xa6, 0x16, 0x17,
0x24, 0x26, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0xb4, 0xe8, 0xc1, 0x94,
0xeb, 0x41, 0x94, 0xeb, 0x95, 0x19, 0x4a, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x55, 0xea, 0x83,
0x58, 0x10, 0x4d, 0x4a, 0x5b, 0x18, 0xb9, 0xf8, 0xfd, 0x60, 0x06, 0x39, 0x17, 0xa5, 0x26, 0x96,
0xa4, 0x0a, 0x09, 0x71, 0xb1, 0x80, 0xcc, 0x96, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xb3,
0x85, 0x82, 0xb8, 0xd8, 0x72, 0x12, 0x93, 0x52, 0x73, 0x8a, 0x25, 0x98, 0x14, 0x98, 0x35, 0xb8,
0x8d, 0xac, 0xf4, 0xf0, 0xda, 0xa6, 0x87, 0x66, 0xa6, 0x9e, 0x0f, 0x58, 0xb3, 0x6b, 0x5e, 0x49,
0x51, 0x65, 0x10, 0xd4, 0x24, 0x29, 0x4b, 0x2e, 0x6e, 0x24, 0x61, 0x21, 0x01, 0x2e, 0xe6, 0xec,
0xd4, 0x4a, 0xa8, 0xad, 0x20, 0xa6, 0x90, 0x08, 0x17, 0x6b, 0x59, 0x62, 0x4e, 0x69, 0xaa, 0x04,
0x13, 0x58, 0x0c, 0xc2, 0xb1, 0x62, 0xb2, 0x60, 0x44, 0x75, 0x76, 0x68, 0x41, 0x0a, 0xd5, 0x9d,
0x0d, 0x31, 0x93, 0xda, 0xce, 0x56, 0x45, 0x72, 0xb5, 0x4b, 0x6a, 0x4e, 0x2a, 0x76, 0x57, 0x3b,
0x45, 0x9c, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f,
0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x31, 0xca, 0x8e, 0xcc, 0xc4, 0x62,
0x0d, 0x61, 0x25, 0xb1, 0x81, 0x63, 0xdd, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xbe, 0xf0, 0x68,
0xa6, 0x75, 0x02, 0x00, 0x00,
0x58, 0x10, 0x4d, 0x52, 0x0e, 0x04, 0x6d, 0x07, 0xab, 0x4b, 0x2a, 0x4d, 0xd3, 0x2f, 0xc8, 0x29,
0x4d, 0xcf, 0xcc, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, 0x49, 0x29, 0x48, 0x2c, 0xc9, 0x80, 0x98, 0xa0,
0xb4, 0x85, 0x91, 0x8b, 0xdf, 0x0f, 0xe6, 0x14, 0xe7, 0xa2, 0xd4, 0xc4, 0x92, 0x54, 0x21, 0x21,
0x2e, 0x16, 0x90, 0xeb, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0xa1, 0x20, 0x2e,
0xb6, 0x9c, 0xc4, 0xa4, 0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x2b, 0x3d,
0xbc, 0xee, 0xd5, 0x43, 0x33, 0x53, 0xcf, 0x07, 0xac, 0xd9, 0x35, 0xaf, 0xa4, 0xa8, 0x32, 0x08,
0x6a, 0x92, 0x94, 0x25, 0x17, 0x37, 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, 0x25, 0xd4,
0x56, 0x10, 0x53, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, 0x2c, 0x06,
0xe1, 0x58, 0x31, 0x59, 0x30, 0xa2, 0x3a, 0x3b, 0xb4, 0x20, 0x85, 0xea, 0xce, 0x86, 0x98, 0x49,
0x6d, 0x67, 0xab, 0x22, 0xb9, 0xda, 0x25, 0x35, 0x27, 0x15, 0xbb, 0xab, 0x9d, 0x62, 0x4e, 0x3c,
0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17,
0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0xb8, 0xe0, 0x8b, 0x1c, 0x63, 0x94, 0x1d, 0x99, 0x49,
0xce, 0x1a, 0xc2, 0x4a, 0x62, 0x03, 0xc7, 0xbc, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x58, 0x7e,
0x6c, 0xc6, 0xbb, 0x02, 0x00, 0x00,
}

View File

@ -3,8 +3,10 @@ syntax = "proto3";
package containerd.services.events.v1;
import "gogoproto/gogo.proto";
import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
option (containerd.plugin.fieldpath_all) = true;
message NamespaceCreate {
string name = 1;

View File

@ -7,6 +7,7 @@ package events
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/containerd/containerd/protobuf/plugin"
import strings "strings"
import reflect "reflect"
@ -49,6 +50,52 @@ func init() {
proto.RegisterType((*SnapshotCommit)(nil), "containerd.services.events.v1.SnapshotCommit")
proto.RegisterType((*SnapshotRemove)(nil), "containerd.services.events.v1.SnapshotRemove")
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *SnapshotPrepare) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "key":
return string(m.Key), len(m.Key) > 0
case "parent":
return string(m.Parent), len(m.Parent) > 0
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *SnapshotCommit) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "key":
return string(m.Key), len(m.Key) > 0
case "name":
return string(m.Name), len(m.Name) > 0
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *SnapshotRemove) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "key":
return string(m.Key), len(m.Key) > 0
}
return "", false
}
func (m *SnapshotPrepare) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@ -656,19 +703,21 @@ func init() {
}
var fileDescriptorSnapshot = []byte{
// 219 bytes of a gzipped FileDescriptorProto
// 252 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0x17, 0xe7, 0x25, 0x16, 0x14, 0x67,
0xe4, 0x97, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x74, 0xe8, 0xc1, 0x54, 0xeb,
0x41, 0x54, 0xeb, 0x95, 0x19, 0x2a, 0x59, 0x73, 0xf1, 0x07, 0x43, 0x35, 0x04, 0x14, 0xa5, 0x16,
0x24, 0x16, 0xa5, 0x0a, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70,
0x06, 0x81, 0x98, 0x42, 0x62, 0x5c, 0x6c, 0x20, 0x99, 0xbc, 0x12, 0x09, 0x26, 0xb0, 0x20, 0x94,
0xa7, 0x64, 0xc6, 0xc5, 0x07, 0xd3, 0xec, 0x9c, 0x9f, 0x9b, 0x9b, 0x59, 0x82, 0x45, 0xaf, 0x10,
0x17, 0x4b, 0x5e, 0x62, 0x6e, 0x2a, 0x54, 0x27, 0x98, 0xad, 0xa4, 0x84, 0xd0, 0x17, 0x94, 0x9a,
0x9b, 0x5f, 0x86, 0xc5, 0x4e, 0xa7, 0x88, 0x13, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0x68,
0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31,
0x46, 0xd9, 0x91, 0x19, 0x32, 0xd6, 0x10, 0x56, 0x12, 0x1b, 0x38, 0x60, 0x8c, 0x01, 0x01, 0x00,
0x00, 0xff, 0xff, 0x10, 0x4c, 0x3d, 0xb2, 0x62, 0x01, 0x00, 0x00,
0x41, 0x54, 0xeb, 0x95, 0x19, 0x4a, 0x39, 0x10, 0xb4, 0x06, 0x6c, 0x4c, 0x52, 0x69, 0x9a, 0x7e,
0x41, 0x4e, 0x69, 0x7a, 0x66, 0x9e, 0x7e, 0x5a, 0x66, 0x6a, 0x4e, 0x4a, 0x41, 0x62, 0x49, 0x06,
0xc4, 0x02, 0x25, 0x6b, 0x2e, 0xfe, 0x60, 0xa8, 0x95, 0x01, 0x45, 0xa9, 0x05, 0x89, 0x45, 0xa9,
0x42, 0x02, 0x5c, 0xcc, 0xd9, 0xa9, 0x95, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x20, 0xa6,
0x90, 0x18, 0x17, 0x1b, 0x48, 0x26, 0xaf, 0x44, 0x82, 0x09, 0x2c, 0x08, 0xe5, 0x29, 0x99, 0x71,
0xf1, 0xc1, 0x34, 0x3b, 0xe7, 0xe7, 0xe6, 0x66, 0x96, 0x60, 0xd1, 0x2b, 0xc4, 0xc5, 0x92, 0x97,
0x98, 0x9b, 0x0a, 0xd5, 0x09, 0x66, 0x2b, 0x29, 0x21, 0xf4, 0x05, 0xa5, 0xe6, 0xe6, 0x97, 0x61,
0xb1, 0xd3, 0x29, 0xe6, 0xc4, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x1a, 0x1e, 0xc9, 0x31,
0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x0b, 0xbe, 0xc8,
0x31, 0x46, 0xd9, 0x91, 0x19, 0xbe, 0xd6, 0x10, 0x56, 0x12, 0x1b, 0xd8, 0xf7, 0xc6, 0x80, 0x00,
0x00, 0x00, 0xff, 0xff, 0x3a, 0x82, 0x7a, 0xa7, 0xa8, 0x01, 0x00, 0x00,
}

View File

@ -2,7 +2,10 @@ syntax = "proto3";
package containerd.services.events.v1;
import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
option (containerd.plugin.fieldpath_all) = true;
message SnapshotPrepare {
string key = 1;

View File

@ -10,6 +10,7 @@ import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import _ "github.com/gogo/protobuf/types"
import containerd_types "github.com/containerd/containerd/api/types"
import _ "github.com/containerd/containerd/protobuf/plugin"
import time "time"
@ -93,20 +94,29 @@ func (*TaskOOM) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{
type TaskExecAdded struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
}
func (m *TaskExecAdded) Reset() { *m = TaskExecAdded{} }
func (*TaskExecAdded) ProtoMessage() {}
func (*TaskExecAdded) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{6} }
type TaskExecStarted struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
}
func (m *TaskExecStarted) Reset() { *m = TaskExecStarted{} }
func (*TaskExecStarted) ProtoMessage() {}
func (*TaskExecStarted) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{7} }
type TaskPaused struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
}
func (m *TaskPaused) Reset() { *m = TaskPaused{} }
func (*TaskPaused) ProtoMessage() {}
func (*TaskPaused) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{7} }
func (*TaskPaused) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{8} }
type TaskResumed struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
@ -114,7 +124,7 @@ type TaskResumed struct {
func (m *TaskResumed) Reset() { *m = TaskResumed{} }
func (*TaskResumed) ProtoMessage() {}
func (*TaskResumed) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{8} }
func (*TaskResumed) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{9} }
type TaskCheckpointed struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
@ -123,7 +133,7 @@ type TaskCheckpointed struct {
func (m *TaskCheckpointed) Reset() { *m = TaskCheckpointed{} }
func (*TaskCheckpointed) ProtoMessage() {}
func (*TaskCheckpointed) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{9} }
func (*TaskCheckpointed) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{10} }
func init() {
proto.RegisterType((*TaskCreate)(nil), "containerd.services.events.v1.TaskCreate")
@ -133,10 +143,205 @@ func init() {
proto.RegisterType((*TaskExit)(nil), "containerd.services.events.v1.TaskExit")
proto.RegisterType((*TaskOOM)(nil), "containerd.services.events.v1.TaskOOM")
proto.RegisterType((*TaskExecAdded)(nil), "containerd.services.events.v1.TaskExecAdded")
proto.RegisterType((*TaskExecStarted)(nil), "containerd.services.events.v1.TaskExecStarted")
proto.RegisterType((*TaskPaused)(nil), "containerd.services.events.v1.TaskPaused")
proto.RegisterType((*TaskResumed)(nil), "containerd.services.events.v1.TaskResumed")
proto.RegisterType((*TaskCheckpointed)(nil), "containerd.services.events.v1.TaskCheckpointed")
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *TaskCreate) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
// unhandled: rootfs
// unhandled: pid
case "container_id":
return string(m.ContainerID), len(m.ContainerID) > 0
case "bundle":
return string(m.Bundle), len(m.Bundle) > 0
case "io":
// NOTE(stevvooe): This is probably not correct in many cases.
// We assume that the target message also implements the Field
// method, which isn't likely true in a lot of cases.
//
// If you have a broken build and have found this comment,
// you may be closer to a solution.
if m.IO == nil {
return "", false
}
return m.IO.Field(fieldpath[1:])
case "checkpoint":
return string(m.Checkpoint), len(m.Checkpoint) > 0
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *TaskStart) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
// unhandled: pid
case "container_id":
return string(m.ContainerID), len(m.ContainerID) > 0
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *TaskDelete) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
// unhandled: pid
// unhandled: exit_status
// unhandled: exited_at
case "container_id":
return string(m.ContainerID), len(m.ContainerID) > 0
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *TaskIO) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "stdin":
return string(m.Stdin), len(m.Stdin) > 0
case "stdout":
return string(m.Stdout), len(m.Stdout) > 0
case "stderr":
return string(m.Stderr), len(m.Stderr) > 0
case "terminal":
return fmt.Sprint(m.Terminal), true
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *TaskExit) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
// unhandled: pid
// unhandled: exit_status
// unhandled: exited_at
case "container_id":
return string(m.ContainerID), len(m.ContainerID) > 0
case "id":
return string(m.ID), len(m.ID) > 0
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *TaskOOM) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "container_id":
return string(m.ContainerID), len(m.ContainerID) > 0
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *TaskExecAdded) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "container_id":
return string(m.ContainerID), len(m.ContainerID) > 0
case "exec_id":
return string(m.ExecID), len(m.ExecID) > 0
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *TaskExecStarted) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
// unhandled: pid
case "container_id":
return string(m.ContainerID), len(m.ContainerID) > 0
case "exec_id":
return string(m.ExecID), len(m.ExecID) > 0
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *TaskPaused) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "container_id":
return string(m.ContainerID), len(m.ContainerID) > 0
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *TaskResumed) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "container_id":
return string(m.ContainerID), len(m.ContainerID) > 0
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *TaskCheckpointed) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "container_id":
return string(m.ContainerID), len(m.ContainerID) > 0
case "checkpoint":
return string(m.Checkpoint), len(m.Checkpoint) > 0
}
return "", false
}
func (m *TaskCreate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@ -400,6 +605,36 @@ func (m *TaskExecAdded) Marshal() (dAtA []byte, err error) {
}
func (m *TaskExecAdded) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.ContainerID) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
i += copy(dAtA[i:], m.ContainerID)
}
if len(m.ExecID) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintTask(dAtA, i, uint64(len(m.ExecID)))
i += copy(dAtA[i:], m.ExecID)
}
return i, nil
}
func (m *TaskExecStarted) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TaskExecStarted) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
@ -645,6 +880,20 @@ func (m *TaskOOM) Size() (n int) {
}
func (m *TaskExecAdded) Size() (n int) {
var l int
_ = l
l = len(m.ContainerID)
if l > 0 {
n += 1 + l + sovTask(uint64(l))
}
l = len(m.ExecID)
if l > 0 {
n += 1 + l + sovTask(uint64(l))
}
return n
}
func (m *TaskExecStarted) Size() (n int) {
var l int
_ = l
l = len(m.ContainerID)
@ -789,6 +1038,17 @@ func (this *TaskExecAdded) String() string {
return "nil"
}
s := strings.Join([]string{`&TaskExecAdded{`,
`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
`}`,
}, "")
return s
}
func (this *TaskExecStarted) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&TaskExecStarted{`,
`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
@ -1741,6 +2001,114 @@ func (m *TaskExecAdded) Unmarshal(dAtA []byte) error {
return fmt.Errorf("proto: TaskExecAdded: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTask
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ContainerID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTask
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ExecID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTask(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTask
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TaskExecStarted) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TaskExecStarted: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TaskExecStarted: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
@ -2215,43 +2583,46 @@ func init() {
}
var fileDescriptorTask = []byte{
// 607 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcf, 0x6e, 0xd3, 0x40,
0x10, 0xc6, 0x6b, 0xa7, 0x75, 0x93, 0x09, 0x15, 0x95, 0x55, 0x41, 0x14, 0x09, 0x3b, 0x32, 0x42,
0xca, 0xc9, 0x56, 0x8b, 0xc4, 0x05, 0x15, 0x91, 0x34, 0x1c, 0x72, 0xa8, 0x02, 0x6e, 0x0f, 0x88,
0x4b, 0xe4, 0xd8, 0xd3, 0x74, 0x69, 0xe2, 0x8d, 0xbc, 0xe3, 0xa8, 0x70, 0xe2, 0x11, 0x78, 0x1a,
0x9e, 0xa1, 0x07, 0x0e, 0x1c, 0x39, 0x05, 0xea, 0xc7, 0xe0, 0x84, 0xd6, 0xeb, 0xa4, 0xe1, 0x5f,
0x85, 0x7c, 0xdb, 0x19, 0x7f, 0x3b, 0xfe, 0xe6, 0xe7, 0xf1, 0x40, 0x77, 0xcc, 0xe8, 0x3c, 0x1d,
0xb9, 0x21, 0x9f, 0x7a, 0x21, 0x8f, 0x29, 0x60, 0x31, 0x26, 0xd1, 0xfa, 0x31, 0x98, 0x31, 0x4f,
0x60, 0x32, 0x67, 0x21, 0x0a, 0x0f, 0xe7, 0x18, 0x93, 0xf0, 0xe6, 0xfb, 0x1e, 0x05, 0xe2, 0xc2,
0x9d, 0x25, 0x9c, 0xb8, 0xf9, 0xe0, 0x46, 0xed, 0x2e, 0x95, 0xae, 0x52, 0xba, 0xf3, 0xfd, 0xe6,
0xde, 0x98, 0x8f, 0x79, 0xae, 0xf4, 0xe4, 0x49, 0x5d, 0x6a, 0xda, 0x63, 0xce, 0xc7, 0x13, 0xf4,
0xf2, 0x68, 0x94, 0x9e, 0x79, 0xc4, 0xa6, 0x28, 0x28, 0x98, 0xce, 0x0a, 0xc1, 0x93, 0xff, 0x72,
0x46, 0xef, 0x66, 0x28, 0xbc, 0x29, 0x4f, 0x63, 0x52, 0xf7, 0x9c, 0x1f, 0x1a, 0xc0, 0x69, 0x20,
0x2e, 0x8e, 0x12, 0x0c, 0x08, 0xcd, 0x03, 0xb8, 0xb3, 0xba, 0x32, 0x64, 0x51, 0x43, 0x6b, 0x69,
0xed, 0x5a, 0xf7, 0x6e, 0xb6, 0xb0, 0xeb, 0x47, 0xcb, 0x7c, 0xbf, 0xe7, 0xd7, 0x57, 0xa2, 0x7e,
0x64, 0xde, 0x03, 0x63, 0x94, 0xc6, 0xd1, 0x04, 0x1b, 0xba, 0x54, 0xfb, 0x45, 0x64, 0x7a, 0x60,
0x24, 0x9c, 0xd3, 0x99, 0x68, 0x54, 0x5a, 0x95, 0x76, 0xfd, 0xe0, 0xbe, 0xbb, 0xd6, 0x79, 0xee,
0xc4, 0x3d, 0x96, 0x4e, 0xfc, 0x42, 0x66, 0x1e, 0x82, 0xce, 0x78, 0x63, 0xb3, 0xa5, 0xb5, 0xeb,
0x07, 0x8f, 0xdc, 0x5b, 0x31, 0xb9, 0xd2, 0x73, 0x7f, 0xd0, 0x35, 0xb2, 0x85, 0xad, 0xf7, 0x07,
0xbe, 0xce, 0xb8, 0x69, 0x01, 0x84, 0xe7, 0x18, 0x5e, 0xcc, 0x38, 0x8b, 0xa9, 0xb1, 0x95, 0x7b,
0x59, 0xcb, 0x98, 0xbb, 0x50, 0x99, 0xb1, 0xa8, 0x61, 0xb4, 0xb4, 0xf6, 0x8e, 0x2f, 0x8f, 0xce,
0x2b, 0xa8, 0xc9, 0x3a, 0x27, 0x14, 0x24, 0x54, 0xaa, 0xf5, 0xa2, 0xa4, 0x7e, 0x53, 0xf2, 0x53,
0xc1, 0xb3, 0x87, 0x13, 0x2c, 0xc9, 0xf3, 0x8f, 0xa2, 0xa6, 0x0d, 0x75, 0xbc, 0x64, 0x34, 0x14,
0x14, 0x50, 0x2a, 0x71, 0xca, 0x27, 0x20, 0x53, 0x27, 0x79, 0xc6, 0xec, 0x40, 0x4d, 0x46, 0x18,
0x0d, 0x03, 0x2a, 0x00, 0x36, 0x5d, 0x35, 0x32, 0xee, 0x72, 0x64, 0xdc, 0xd3, 0xe5, 0xc8, 0x74,
0xab, 0x57, 0x0b, 0x7b, 0xe3, 0xe3, 0x37, 0x5b, 0xf3, 0xab, 0xea, 0x5a, 0x87, 0x9c, 0xb7, 0x60,
0x28, 0xa6, 0xe6, 0x1e, 0x6c, 0x09, 0x8a, 0x58, 0xac, 0xcc, 0xfa, 0x2a, 0x90, 0x5f, 0x59, 0x50,
0xc4, 0x53, 0x5a, 0x7e, 0x65, 0x15, 0x15, 0x79, 0x4c, 0x92, 0xdc, 0x96, 0xca, 0x63, 0x92, 0x98,
0x4d, 0xa8, 0x12, 0x26, 0x53, 0x16, 0x07, 0x93, 0xdc, 0x51, 0xd5, 0x5f, 0xc5, 0xce, 0x67, 0x0d,
0xaa, 0xf2, 0x65, 0x2f, 0x2e, 0x19, 0x95, 0x1c, 0x39, 0xbd, 0x20, 0x54, 0x2b, 0x46, 0xa0, 0xe7,
0xeb, 0x6c, 0x85, 0xae, 0xf2, 0x4f, 0x74, 0x9b, 0xb7, 0xa3, 0xdb, 0x2a, 0x85, 0xee, 0x10, 0xb6,
0x65, 0x37, 0x83, 0xc1, 0x71, 0x99, 0x66, 0x9c, 0xf7, 0xb0, 0xa3, 0x60, 0x60, 0xd8, 0x89, 0x22,
0x8c, 0x4a, 0x11, 0x79, 0x08, 0xdb, 0x78, 0x89, 0xe1, 0x70, 0x85, 0x05, 0xb2, 0x85, 0x6d, 0xc8,
0x9a, 0xfd, 0x9e, 0x6f, 0xc8, 0x47, 0xfd, 0xbf, 0xe0, 0x71, 0x9e, 0xab, 0x69, 0x7d, 0x19, 0xa4,
0xa2, 0xdc, 0x8b, 0x9d, 0x0e, 0xd4, 0x65, 0x05, 0x1f, 0x45, 0x3a, 0x2d, 0x59, 0xe2, 0x0c, 0x76,
0xf3, 0x15, 0xb4, 0xfa, 0x55, 0x4b, 0x32, 0xf8, 0x75, 0x01, 0xe8, 0xbf, 0x2f, 0x80, 0xee, 0xeb,
0xab, 0x6b, 0x6b, 0xe3, 0xeb, 0xb5, 0xb5, 0xf1, 0x21, 0xb3, 0xb4, 0xab, 0xcc, 0xd2, 0xbe, 0x64,
0x96, 0xf6, 0x3d, 0xb3, 0xb4, 0x37, 0xcf, 0x4a, 0xee, 0xf5, 0xa7, 0xea, 0x34, 0x32, 0xf2, 0x49,
0x79, 0xfc, 0x33, 0x00, 0x00, 0xff, 0xff, 0xbd, 0xfa, 0x67, 0x90, 0x20, 0x06, 0x00, 0x00,
// 648 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0xc1, 0x6e, 0xd3, 0x40,
0x10, 0x86, 0x6b, 0xa7, 0x75, 0x93, 0x0d, 0x55, 0x2b, 0xab, 0x82, 0x28, 0x12, 0x76, 0x64, 0x84,
0x94, 0x93, 0xad, 0x16, 0x89, 0x0b, 0x2a, 0x6a, 0xd2, 0x70, 0xc8, 0xa1, 0x0a, 0xb8, 0x3d, 0x21,
0xa4, 0xc8, 0xb1, 0x27, 0xc9, 0xd2, 0xc4, 0x6b, 0x79, 0xc7, 0x51, 0x91, 0x38, 0xf0, 0x08, 0x3c,
0x02, 0x4f, 0xc1, 0x33, 0xf4, 0xc0, 0x81, 0x23, 0xa7, 0x40, 0xfd, 0x0c, 0x9c, 0x38, 0xa1, 0xf5,
0x3a, 0x6e, 0xa1, 0xa2, 0x20, 0x4b, 0xdc, 0x76, 0xc6, 0x33, 0xff, 0xcc, 0x7c, 0x3b, 0xd9, 0x90,
0xee, 0x84, 0xe2, 0x34, 0x19, 0xd9, 0x3e, 0x9b, 0x3b, 0x3e, 0x0b, 0xd1, 0xa3, 0x21, 0xc4, 0xc1,
0xf5, 0xa3, 0x17, 0x51, 0x87, 0x43, 0xbc, 0xa0, 0x3e, 0x70, 0x07, 0x16, 0x10, 0x22, 0x77, 0x16,
0x7b, 0x0e, 0x7a, 0xfc, 0xcc, 0x8e, 0x62, 0x86, 0x4c, 0xbf, 0x7f, 0x15, 0x6d, 0xaf, 0x22, 0x6d,
0x19, 0x69, 0x2f, 0xf6, 0x9a, 0xbb, 0x13, 0x36, 0x61, 0x59, 0xa4, 0x23, 0x4e, 0x32, 0xa9, 0x69,
0x4e, 0x18, 0x9b, 0xcc, 0xc0, 0xc9, 0xac, 0x51, 0x32, 0x76, 0x90, 0xce, 0x81, 0xa3, 0x37, 0x8f,
0xf2, 0x80, 0xc7, 0xff, 0xd4, 0x19, 0xbe, 0x89, 0x80, 0x3b, 0x73, 0x96, 0x84, 0x98, 0xe7, 0x1d,
0xfe, 0x35, 0xaf, 0x28, 0x19, 0xcd, 0x92, 0x09, 0x0d, 0x9d, 0x31, 0x85, 0x59, 0x10, 0x79, 0x38,
0x95, 0x0a, 0xd6, 0x0f, 0x85, 0x90, 0x53, 0x8f, 0x9f, 0x1d, 0xc5, 0xe0, 0x21, 0xe8, 0xfb, 0xe4,
0x4e, 0x91, 0x3c, 0xa4, 0x41, 0x43, 0x69, 0x29, 0xed, 0x5a, 0x77, 0x3b, 0x5d, 0x9a, 0xf5, 0xa3,
0x95, 0xbf, 0xdf, 0x73, 0xeb, 0x45, 0x50, 0x3f, 0xd0, 0xef, 0x12, 0x6d, 0x94, 0x84, 0xc1, 0x0c,
0x1a, 0xaa, 0x88, 0x76, 0x73, 0x4b, 0x77, 0x88, 0x16, 0x33, 0x86, 0x63, 0xde, 0xa8, 0xb4, 0x2a,
0xed, 0xfa, 0xfe, 0x3d, 0xfb, 0x1a, 0xbb, 0x6c, 0x16, 0xfb, 0x58, 0xcc, 0xe2, 0xe6, 0x61, 0xfa,
0x01, 0x51, 0x29, 0x6b, 0xac, 0xb7, 0x94, 0x76, 0x7d, 0xff, 0xa1, 0x7d, 0x2b, 0x68, 0x5b, 0xf4,
0xdc, 0x1f, 0x74, 0xb5, 0x74, 0x69, 0xaa, 0xfd, 0x81, 0xab, 0x52, 0xa6, 0x1b, 0x84, 0xf8, 0x53,
0xf0, 0xcf, 0x22, 0x46, 0x43, 0x6c, 0x6c, 0x64, 0xbd, 0x5c, 0xf3, 0xe8, 0x3b, 0xa4, 0x12, 0xd1,
0xa0, 0xa1, 0xb5, 0x94, 0xf6, 0x96, 0x2b, 0x8e, 0xd6, 0x0b, 0x52, 0x13, 0x3a, 0x27, 0xe8, 0xc5,
0x58, 0x6a, 0xf4, 0x5c, 0x52, 0xbd, 0x92, 0xfc, 0x98, 0xf3, 0xec, 0xc1, 0x0c, 0x4a, 0xf2, 0xbc,
0x21, 0xaa, 0x9b, 0xa4, 0x0e, 0xe7, 0x14, 0x87, 0x1c, 0x3d, 0x4c, 0x04, 0x4e, 0xf1, 0x85, 0x08,
0xd7, 0x49, 0xe6, 0xd1, 0x3b, 0xa4, 0x26, 0x2c, 0x08, 0x86, 0x1e, 0xe6, 0x00, 0x9b, 0xb6, 0x5c,
0x3a, 0x7b, 0xb5, 0x01, 0xf6, 0xe9, 0x6a, 0xe9, 0xba, 0xd5, 0x8b, 0xa5, 0xb9, 0xf6, 0xfe, 0xab,
0xa9, 0xb8, 0x55, 0x99, 0xd6, 0x41, 0xeb, 0x35, 0xd1, 0x24, 0x53, 0x7d, 0x97, 0x6c, 0x70, 0x0c,
0x68, 0x28, 0x9b, 0x75, 0xa5, 0x21, 0x6e, 0x99, 0x63, 0xc0, 0x12, 0x5c, 0xdd, 0xb2, 0xb4, 0x72,
0x3f, 0xc4, 0x71, 0xd6, 0x96, 0xf4, 0x43, 0x1c, 0xeb, 0x4d, 0x52, 0x45, 0x88, 0xe7, 0x34, 0xf4,
0x66, 0x59, 0x47, 0x55, 0xb7, 0xb0, 0xad, 0x4f, 0x0a, 0xa9, 0x8a, 0x62, 0xcf, 0xce, 0x29, 0x96,
0x5c, 0x39, 0x35, 0x27, 0x54, 0xcb, 0x57, 0xa0, 0xe7, 0xaa, 0xb4, 0x40, 0x57, 0xf9, 0x23, 0xba,
0xf5, 0xdb, 0xd1, 0x6d, 0x94, 0x42, 0x77, 0x40, 0x36, 0xc5, 0x34, 0x83, 0xc1, 0x71, 0x99, 0x61,
0xac, 0x29, 0xd9, 0x92, 0x30, 0xc0, 0xef, 0x04, 0x01, 0x04, 0xa5, 0x88, 0x3c, 0x20, 0x9b, 0x70,
0x0e, 0xfe, 0xb0, 0xc0, 0x42, 0xd2, 0xa5, 0xa9, 0x09, 0xcd, 0x7e, 0xcf, 0xd5, 0xc4, 0xa7, 0x7e,
0x60, 0xbd, 0x25, 0xdb, 0xab, 0x4a, 0xd9, 0xce, 0xff, 0xc7, 0x5a, 0x37, 0xaf, 0xc2, 0x3a, 0x94,
0xbf, 0x8c, 0xe7, 0x5e, 0xc2, 0xcb, 0x15, 0xb6, 0x3a, 0xa4, 0x2e, 0x14, 0x5c, 0xe0, 0xc9, 0xbc,
0xa4, 0xc4, 0x98, 0xec, 0x64, 0xcf, 0x5d, 0xf1, 0x2c, 0x94, 0x64, 0xf0, 0xeb, 0x63, 0xa3, 0xfe,
0xfe, 0xd8, 0x74, 0x5f, 0x5d, 0x5c, 0x1a, 0x6b, 0x5f, 0x2e, 0x8d, 0xb5, 0x77, 0xa9, 0xa1, 0x5c,
0xa4, 0x86, 0xf2, 0x39, 0x35, 0x94, 0x6f, 0xa9, 0xa1, 0x7c, 0xf8, 0x6e, 0x28, 0x2f, 0x9f, 0x96,
0xfc, 0x27, 0x7a, 0x22, 0x4f, 0x23, 0x2d, 0xdb, 0xcc, 0x47, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff,
0x76, 0xdf, 0xe7, 0xaa, 0xd2, 0x06, 0x00, 0x00,
}

View File

@ -5,8 +5,10 @@ package containerd.services.events.v1;
import "gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "github.com/containerd/containerd/api/types/mount.proto";
import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
option (containerd.plugin.fieldpath_all) = true;
message TaskCreate {
string container_id = 1;
@ -51,6 +53,11 @@ message TaskOOM {
message TaskExecAdded {
string container_id = 1;
string exec_id = 2;
}
message TaskExecStarted {
string container_id = 1;
string exec_id = 2;
uint32 pid = 3;
}

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,8 @@ package containerd.services.snapshots.v1;
import "gogoproto/gogo.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/field_mask.proto";
import "google/protobuf/timestamp.proto";
import "github.com/containerd/containerd/api/types/mount.proto";
option go_package = "github.com/containerd/containerd/api/services/snapshot/v1;snapshot";
@ -16,6 +18,7 @@ service Snapshots {
rpc Commit(CommitSnapshotRequest) returns (google.protobuf.Empty);
rpc Remove(RemoveSnapshotRequest) returns (google.protobuf.Empty);
rpc Stat(StatSnapshotRequest) returns (StatSnapshotResponse);
rpc Update(UpdateSnapshotRequest) returns (UpdateSnapshotResponse);
rpc List(ListSnapshotsRequest) returns (stream ListSnapshotsResponse);
rpc Usage(UsageRequest) returns (UsageResponse);
}
@ -24,6 +27,9 @@ message PrepareSnapshotRequest {
string snapshotter = 1;
string key = 2;
string parent = 3;
// Labels are arbitrary data on snapshots.
map<string, string> labels = 4;
}
message PrepareSnapshotResponse {
@ -34,6 +40,9 @@ message ViewSnapshotRequest {
string snapshotter = 1;
string key = 2;
string parent = 3;
// Labels are arbitrary data on snapshots.
map<string, string> labels = 4;
}
message ViewSnapshotResponse {
@ -58,6 +67,9 @@ message CommitSnapshotRequest {
string snapshotter = 1;
string name = 2;
string key = 3;
// Labels are arbitrary data on snapshots.
map<string, string> labels = 4;
}
message StatSnapshotRequest {
@ -79,12 +91,38 @@ message Info {
string name = 1;
string parent = 2;
Kind kind = 3;
// CreatedAt provides the time at which the snapshot was created.
google.protobuf.Timestamp created_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
// UpdatedAt provides the time the info was last updated.
google.protobuf.Timestamp updated_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
// Labels are arbitrary data on snapshots.
map<string, string> labels = 6;
}
message StatSnapshotResponse {
Info info = 1 [(gogoproto.nullable) = false];
}
message UpdateSnapshotRequest {
string snapshotter = 1;
Info info = 2 [(gogoproto.nullable) = false];
// UpdateMask specifies which fields to perform the update on. If empty,
// the operation applies to all fields.
//
// In info, Name, Parent, Kind, Created are immutable,
// other field may be updated using this mask.
// If no mask is provided, all mutable field are updated.
google.protobuf.FieldMask update_mask = 3;
}
message UpdateSnapshotResponse {
Info info = 1 [(gogoproto.nullable) = false];
}
message ListSnapshotsRequest{
string snapshotter = 1;
}

View File

@ -11,12 +11,13 @@
It has these top-level messages:
CreateTaskRequest
CreateTaskResponse
StartTaskRequest
StartRequest
StartResponse
DeleteTaskRequest
DeleteResponse
DeleteProcessRequest
GetTaskRequest
GetTaskResponse
GetRequest
GetResponse
ListTasksRequest
ListTasksResponse
KillRequest
@ -102,13 +103,22 @@ func (m *CreateTaskResponse) Reset() { *m = CreateTaskRespons
func (*CreateTaskResponse) ProtoMessage() {}
func (*CreateTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{1} }
type StartTaskRequest struct {
type StartRequest struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
}
func (m *StartTaskRequest) Reset() { *m = StartTaskRequest{} }
func (*StartTaskRequest) ProtoMessage() {}
func (*StartTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{2} }
func (m *StartRequest) Reset() { *m = StartRequest{} }
func (*StartRequest) ProtoMessage() {}
func (*StartRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{2} }
type StartResponse struct {
Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"`
}
func (m *StartResponse) Reset() { *m = StartResponse{} }
func (*StartResponse) ProtoMessage() {}
func (*StartResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{3} }
type DeleteTaskRequest struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
@ -116,7 +126,7 @@ type DeleteTaskRequest struct {
func (m *DeleteTaskRequest) Reset() { *m = DeleteTaskRequest{} }
func (*DeleteTaskRequest) ProtoMessage() {}
func (*DeleteTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{3} }
func (*DeleteTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{4} }
type DeleteResponse struct {
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
@ -127,7 +137,7 @@ type DeleteResponse struct {
func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
func (*DeleteResponse) ProtoMessage() {}
func (*DeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{4} }
func (*DeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{5} }
type DeleteProcessRequest struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
@ -136,23 +146,24 @@ type DeleteProcessRequest struct {
func (m *DeleteProcessRequest) Reset() { *m = DeleteProcessRequest{} }
func (*DeleteProcessRequest) ProtoMessage() {}
func (*DeleteProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{5} }
func (*DeleteProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{6} }
type GetTaskRequest struct {
type GetRequest struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
}
func (m *GetTaskRequest) Reset() { *m = GetTaskRequest{} }
func (*GetTaskRequest) ProtoMessage() {}
func (*GetTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{6} }
func (m *GetRequest) Reset() { *m = GetRequest{} }
func (*GetRequest) ProtoMessage() {}
func (*GetRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{7} }
type GetTaskResponse struct {
Task *containerd_v1_types.Task `protobuf:"bytes,1,opt,name=task" json:"task,omitempty"`
type GetResponse struct {
Process *containerd_v1_types.Process `protobuf:"bytes,1,opt,name=process" json:"process,omitempty"`
}
func (m *GetTaskResponse) Reset() { *m = GetTaskResponse{} }
func (*GetTaskResponse) ProtoMessage() {}
func (*GetTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{7} }
func (m *GetResponse) Reset() { *m = GetResponse{} }
func (*GetResponse) ProtoMessage() {}
func (*GetResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{8} }
type ListTasksRequest struct {
Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
@ -160,15 +171,15 @@ type ListTasksRequest struct {
func (m *ListTasksRequest) Reset() { *m = ListTasksRequest{} }
func (*ListTasksRequest) ProtoMessage() {}
func (*ListTasksRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{8} }
func (*ListTasksRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{9} }
type ListTasksResponse struct {
Tasks []*containerd_v1_types.Task `protobuf:"bytes,1,rep,name=tasks" json:"tasks,omitempty"`
Tasks []*containerd_v1_types.Process `protobuf:"bytes,1,rep,name=tasks" json:"tasks,omitempty"`
}
func (m *ListTasksResponse) Reset() { *m = ListTasksResponse{} }
func (*ListTasksResponse) ProtoMessage() {}
func (*ListTasksResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{9} }
func (*ListTasksResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{10} }
type KillRequest struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
@ -179,7 +190,7 @@ type KillRequest struct {
func (m *KillRequest) Reset() { *m = KillRequest{} }
func (*KillRequest) ProtoMessage() {}
func (*KillRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{10} }
func (*KillRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{11} }
type ExecProcessRequest struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
@ -197,15 +208,14 @@ type ExecProcessRequest struct {
func (m *ExecProcessRequest) Reset() { *m = ExecProcessRequest{} }
func (*ExecProcessRequest) ProtoMessage() {}
func (*ExecProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{11} }
func (*ExecProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{12} }
type ExecProcessResponse struct {
Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"`
}
func (m *ExecProcessResponse) Reset() { *m = ExecProcessResponse{} }
func (*ExecProcessResponse) ProtoMessage() {}
func (*ExecProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{12} }
func (*ExecProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{13} }
type ResizePtyRequest struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
@ -216,7 +226,7 @@ type ResizePtyRequest struct {
func (m *ResizePtyRequest) Reset() { *m = ResizePtyRequest{} }
func (*ResizePtyRequest) ProtoMessage() {}
func (*ResizePtyRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{13} }
func (*ResizePtyRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{14} }
type CloseIORequest struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
@ -226,7 +236,7 @@ type CloseIORequest struct {
func (m *CloseIORequest) Reset() { *m = CloseIORequest{} }
func (*CloseIORequest) ProtoMessage() {}
func (*CloseIORequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{14} }
func (*CloseIORequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{15} }
type PauseTaskRequest struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
@ -234,7 +244,7 @@ type PauseTaskRequest struct {
func (m *PauseTaskRequest) Reset() { *m = PauseTaskRequest{} }
func (*PauseTaskRequest) ProtoMessage() {}
func (*PauseTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{15} }
func (*PauseTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{16} }
type ResumeTaskRequest struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
@ -242,7 +252,7 @@ type ResumeTaskRequest struct {
func (m *ResumeTaskRequest) Reset() { *m = ResumeTaskRequest{} }
func (*ResumeTaskRequest) ProtoMessage() {}
func (*ResumeTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{16} }
func (*ResumeTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{17} }
type ListPidsRequest struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
@ -250,7 +260,7 @@ type ListPidsRequest struct {
func (m *ListPidsRequest) Reset() { *m = ListPidsRequest{} }
func (*ListPidsRequest) ProtoMessage() {}
func (*ListPidsRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{17} }
func (*ListPidsRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{18} }
type ListPidsResponse struct {
Pids []uint32 `protobuf:"varint,1,rep,packed,name=pids" json:"pids,omitempty"`
@ -258,7 +268,7 @@ type ListPidsResponse struct {
func (m *ListPidsResponse) Reset() { *m = ListPidsResponse{} }
func (*ListPidsResponse) ProtoMessage() {}
func (*ListPidsResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{18} }
func (*ListPidsResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{19} }
type CheckpointTaskRequest struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
@ -268,7 +278,7 @@ type CheckpointTaskRequest struct {
func (m *CheckpointTaskRequest) Reset() { *m = CheckpointTaskRequest{} }
func (*CheckpointTaskRequest) ProtoMessage() {}
func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{19} }
func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{20} }
type CheckpointTaskResponse struct {
Descriptors []*containerd_types1.Descriptor `protobuf:"bytes,1,rep,name=descriptors" json:"descriptors,omitempty"`
@ -276,7 +286,7 @@ type CheckpointTaskResponse struct {
func (m *CheckpointTaskResponse) Reset() { *m = CheckpointTaskResponse{} }
func (*CheckpointTaskResponse) ProtoMessage() {}
func (*CheckpointTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{20} }
func (*CheckpointTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{21} }
type UpdateTaskRequest struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
@ -285,17 +295,18 @@ type UpdateTaskRequest struct {
func (m *UpdateTaskRequest) Reset() { *m = UpdateTaskRequest{} }
func (*UpdateTaskRequest) ProtoMessage() {}
func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{21} }
func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{22} }
func init() {
proto.RegisterType((*CreateTaskRequest)(nil), "containerd.services.tasks.v1.CreateTaskRequest")
proto.RegisterType((*CreateTaskResponse)(nil), "containerd.services.tasks.v1.CreateTaskResponse")
proto.RegisterType((*StartTaskRequest)(nil), "containerd.services.tasks.v1.StartTaskRequest")
proto.RegisterType((*StartRequest)(nil), "containerd.services.tasks.v1.StartRequest")
proto.RegisterType((*StartResponse)(nil), "containerd.services.tasks.v1.StartResponse")
proto.RegisterType((*DeleteTaskRequest)(nil), "containerd.services.tasks.v1.DeleteTaskRequest")
proto.RegisterType((*DeleteResponse)(nil), "containerd.services.tasks.v1.DeleteResponse")
proto.RegisterType((*DeleteProcessRequest)(nil), "containerd.services.tasks.v1.DeleteProcessRequest")
proto.RegisterType((*GetTaskRequest)(nil), "containerd.services.tasks.v1.GetTaskRequest")
proto.RegisterType((*GetTaskResponse)(nil), "containerd.services.tasks.v1.GetTaskResponse")
proto.RegisterType((*GetRequest)(nil), "containerd.services.tasks.v1.GetRequest")
proto.RegisterType((*GetResponse)(nil), "containerd.services.tasks.v1.GetResponse")
proto.RegisterType((*ListTasksRequest)(nil), "containerd.services.tasks.v1.ListTasksRequest")
proto.RegisterType((*ListTasksResponse)(nil), "containerd.services.tasks.v1.ListTasksResponse")
proto.RegisterType((*KillRequest)(nil), "containerd.services.tasks.v1.KillRequest")
@ -325,16 +336,16 @@ const _ = grpc.SupportPackageIsVersion4
type TasksClient interface {
// Create a task.
Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error)
// Start a task.
Start(ctx context.Context, in *StartTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
// Start a process.
Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error)
// Delete a task and on disk state.
Delete(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteResponse, error)
DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error)
Get(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*GetTaskResponse, error)
Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error)
List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error)
// Kill a task or process.
Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*ExecProcessResponse, error)
Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
@ -361,8 +372,8 @@ func (c *tasksClient) Create(ctx context.Context, in *CreateTaskRequest, opts ..
return out, nil
}
func (c *tasksClient) Start(ctx context.Context, in *StartTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
out := new(google_protobuf.Empty)
func (c *tasksClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) {
out := new(StartResponse)
err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Start", in, out, c.cc, opts...)
if err != nil {
return nil, err
@ -388,8 +399,8 @@ func (c *tasksClient) DeleteProcess(ctx context.Context, in *DeleteProcessReques
return out, nil
}
func (c *tasksClient) Get(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*GetTaskResponse, error) {
out := new(GetTaskResponse)
func (c *tasksClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) {
out := new(GetResponse)
err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Get", in, out, c.cc, opts...)
if err != nil {
return nil, err
@ -415,8 +426,8 @@ func (c *tasksClient) Kill(ctx context.Context, in *KillRequest, opts ...grpc.Ca
return out, nil
}
func (c *tasksClient) Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*ExecProcessResponse, error) {
out := new(ExecProcessResponse)
func (c *tasksClient) Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
out := new(google_protobuf.Empty)
err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Exec", in, out, c.cc, opts...)
if err != nil {
return nil, err
@ -492,16 +503,16 @@ func (c *tasksClient) Update(ctx context.Context, in *UpdateTaskRequest, opts ..
type TasksServer interface {
// Create a task.
Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error)
// Start a task.
Start(context.Context, *StartTaskRequest) (*google_protobuf.Empty, error)
// Start a process.
Start(context.Context, *StartRequest) (*StartResponse, error)
// Delete a task and on disk state.
Delete(context.Context, *DeleteTaskRequest) (*DeleteResponse, error)
DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error)
Get(context.Context, *GetTaskRequest) (*GetTaskResponse, error)
Get(context.Context, *GetRequest) (*GetResponse, error)
List(context.Context, *ListTasksRequest) (*ListTasksResponse, error)
// Kill a task or process.
Kill(context.Context, *KillRequest) (*google_protobuf.Empty, error)
Exec(context.Context, *ExecProcessRequest) (*ExecProcessResponse, error)
Exec(context.Context, *ExecProcessRequest) (*google_protobuf.Empty, error)
ResizePty(context.Context, *ResizePtyRequest) (*google_protobuf.Empty, error)
CloseIO(context.Context, *CloseIORequest) (*google_protobuf.Empty, error)
Pause(context.Context, *PauseTaskRequest) (*google_protobuf.Empty, error)
@ -534,7 +545,7 @@ func _Tasks_Create_Handler(srv interface{}, ctx context.Context, dec func(interf
}
func _Tasks_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StartTaskRequest)
in := new(StartRequest)
if err := dec(in); err != nil {
return nil, err
}
@ -546,7 +557,7 @@ func _Tasks_Start_Handler(srv interface{}, ctx context.Context, dec func(interfa
FullMethod: "/containerd.services.tasks.v1.Tasks/Start",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TasksServer).Start(ctx, req.(*StartTaskRequest))
return srv.(TasksServer).Start(ctx, req.(*StartRequest))
}
return interceptor(ctx, in, info, handler)
}
@ -588,7 +599,7 @@ func _Tasks_DeleteProcess_Handler(srv interface{}, ctx context.Context, dec func
}
func _Tasks_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetTaskRequest)
in := new(GetRequest)
if err := dec(in); err != nil {
return nil, err
}
@ -600,7 +611,7 @@ func _Tasks_Get_Handler(srv interface{}, ctx context.Context, dec func(interface
FullMethod: "/containerd.services.tasks.v1.Tasks/Get",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TasksServer).Get(ctx, req.(*GetTaskRequest))
return srv.(TasksServer).Get(ctx, req.(*GetRequest))
}
return interceptor(ctx, in, info, handler)
}
@ -967,7 +978,7 @@ func (m *CreateTaskResponse) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
func (m *StartTaskRequest) Marshal() (dAtA []byte, err error) {
func (m *StartRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
@ -977,7 +988,7 @@ func (m *StartTaskRequest) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
func (m *StartTaskRequest) MarshalTo(dAtA []byte) (int, error) {
func (m *StartRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
@ -988,6 +999,35 @@ func (m *StartTaskRequest) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
i += copy(dAtA[i:], m.ContainerID)
}
if len(m.ExecID) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
i += copy(dAtA[i:], m.ExecID)
}
return i, nil
}
func (m *StartResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *StartResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Pid != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintTasks(dAtA, i, uint64(m.Pid))
}
return i, nil
}
@ -1087,7 +1127,7 @@ func (m *DeleteProcessRequest) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
func (m *GetTaskRequest) Marshal() (dAtA []byte, err error) {
func (m *GetRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
@ -1097,7 +1137,7 @@ func (m *GetTaskRequest) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
func (m *GetTaskRequest) MarshalTo(dAtA []byte) (int, error) {
func (m *GetRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
@ -1108,10 +1148,16 @@ func (m *GetTaskRequest) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
i += copy(dAtA[i:], m.ContainerID)
}
if len(m.ExecID) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
i += copy(dAtA[i:], m.ExecID)
}
return i, nil
}
func (m *GetTaskResponse) Marshal() (dAtA []byte, err error) {
func (m *GetResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
@ -1121,16 +1167,16 @@ func (m *GetTaskResponse) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
func (m *GetTaskResponse) MarshalTo(dAtA []byte) (int, error) {
func (m *GetResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Task != nil {
if m.Process != nil {
dAtA[i] = 0xa
i++
i = encodeVarintTasks(dAtA, i, uint64(m.Task.Size()))
n4, err := m.Task.MarshalTo(dAtA[i:])
i = encodeVarintTasks(dAtA, i, uint64(m.Process.Size()))
n4, err := m.Process.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
@ -1321,11 +1367,6 @@ func (m *ExecProcessResponse) MarshalTo(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if m.Pid != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintTasks(dAtA, i, uint64(m.Pid))
}
return i, nil
}
@ -1699,13 +1740,26 @@ func (m *CreateTaskResponse) Size() (n int) {
return n
}
func (m *StartTaskRequest) Size() (n int) {
func (m *StartRequest) Size() (n int) {
var l int
_ = l
l = len(m.ContainerID)
if l > 0 {
n += 1 + l + sovTasks(uint64(l))
}
l = len(m.ExecID)
if l > 0 {
n += 1 + l + sovTasks(uint64(l))
}
return n
}
func (m *StartResponse) Size() (n int) {
var l int
_ = l
if m.Pid != 0 {
n += 1 + sovTasks(uint64(m.Pid))
}
return n
}
@ -1751,21 +1805,25 @@ func (m *DeleteProcessRequest) Size() (n int) {
return n
}
func (m *GetTaskRequest) Size() (n int) {
func (m *GetRequest) Size() (n int) {
var l int
_ = l
l = len(m.ContainerID)
if l > 0 {
n += 1 + l + sovTasks(uint64(l))
}
l = len(m.ExecID)
if l > 0 {
n += 1 + l + sovTasks(uint64(l))
}
return n
}
func (m *GetTaskResponse) Size() (n int) {
func (m *GetResponse) Size() (n int) {
var l int
_ = l
if m.Task != nil {
l = m.Task.Size()
if m.Process != nil {
l = m.Process.Size()
n += 1 + l + sovTasks(uint64(l))
}
return n
@ -1849,9 +1907,6 @@ func (m *ExecProcessRequest) Size() (n int) {
func (m *ExecProcessResponse) Size() (n int) {
var l int
_ = l
if m.Pid != 0 {
n += 1 + sovTasks(uint64(m.Pid))
}
return n
}
@ -2020,12 +2075,23 @@ func (this *CreateTaskResponse) String() string {
}, "")
return s
}
func (this *StartTaskRequest) String() string {
func (this *StartRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&StartTaskRequest{`,
s := strings.Join([]string{`&StartRequest{`,
`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
`}`,
}, "")
return s
}
func (this *StartResponse) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&StartResponse{`,
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
`}`,
}, "")
return s
@ -2064,22 +2130,23 @@ func (this *DeleteProcessRequest) String() string {
}, "")
return s
}
func (this *GetTaskRequest) String() string {
func (this *GetRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&GetTaskRequest{`,
s := strings.Join([]string{`&GetRequest{`,
`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
`}`,
}, "")
return s
}
func (this *GetTaskResponse) String() string {
func (this *GetResponse) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&GetTaskResponse{`,
`Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "containerd_v1_types.Task", 1) + `,`,
s := strings.Join([]string{`&GetResponse{`,
`Process:` + strings.Replace(fmt.Sprintf("%v", this.Process), "Process", "containerd_v1_types.Process", 1) + `,`,
`}`,
}, "")
return s
@ -2099,7 +2166,7 @@ func (this *ListTasksResponse) String() string {
return "nil"
}
s := strings.Join([]string{`&ListTasksResponse{`,
`Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "containerd_v1_types.Task", 1) + `,`,
`Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Process", "containerd_v1_types.Process", 1) + `,`,
`}`,
}, "")
return s
@ -2138,7 +2205,6 @@ func (this *ExecProcessResponse) String() string {
return "nil"
}
s := strings.Join([]string{`&ExecProcessResponse{`,
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
`}`,
}, "")
return s
@ -2630,7 +2696,7 @@ func (m *CreateTaskResponse) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *StartTaskRequest) Unmarshal(dAtA []byte) error {
func (m *StartRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@ -2653,10 +2719,10 @@ func (m *StartTaskRequest) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: StartTaskRequest: wiretype end group for non-group")
return fmt.Errorf("proto: StartRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: StartTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire)
return fmt.Errorf("proto: StartRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
@ -2688,6 +2754,104 @@ func (m *StartTaskRequest) Unmarshal(dAtA []byte) error {
}
m.ContainerID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTasks
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTasks
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ExecID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTasks(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTasks
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *StartResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTasks
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: StartResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: StartResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
}
m.Pid = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTasks
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Pid |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTasks(dAtA[iNdEx:])
@ -3043,7 +3207,7 @@ func (m *DeleteProcessRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *GetTaskRequest) Unmarshal(dAtA []byte) error {
func (m *GetRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@ -3066,10 +3230,10 @@ func (m *GetTaskRequest) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetTaskRequest: wiretype end group for non-group")
return fmt.Errorf("proto: GetRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire)
return fmt.Errorf("proto: GetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
@ -3101,6 +3265,35 @@ func (m *GetTaskRequest) Unmarshal(dAtA []byte) error {
}
m.ContainerID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTasks
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTasks
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ExecID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTasks(dAtA[iNdEx:])
@ -3122,7 +3315,7 @@ func (m *GetTaskRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *GetTaskResponse) Unmarshal(dAtA []byte) error {
func (m *GetResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@ -3145,15 +3338,15 @@ func (m *GetTaskResponse) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GetTaskResponse: wiretype end group for non-group")
return fmt.Errorf("proto: GetResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GetTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire)
return fmt.Errorf("proto: GetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field Process", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@ -3177,10 +3370,10 @@ func (m *GetTaskResponse) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Task == nil {
m.Task = &containerd_v1_types.Task{}
if m.Process == nil {
m.Process = &containerd_v1_types.Process{}
}
if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
if err := m.Process.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@ -3339,7 +3532,7 @@ func (m *ListTasksResponse) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Tasks = append(m.Tasks, &containerd_v1_types.Task{})
m.Tasks = append(m.Tasks, &containerd_v1_types.Process{})
if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@ -3789,25 +3982,6 @@ func (m *ExecProcessResponse) Unmarshal(dAtA []byte) error {
return fmt.Errorf("proto: ExecProcessResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
}
m.Pid = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTasks
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Pid |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTasks(dAtA[iNdEx:])
@ -4896,80 +5070,81 @@ func init() {
}
var fileDescriptorTasks = []byte{
// 1200 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4d, 0x6f, 0x1b, 0xc5,
0x1b, 0xef, 0xfa, 0x65, 0x63, 0x3f, 0xfe, 0xa7, 0x49, 0xf6, 0x1f, 0x82, 0xbb, 0x54, 0x76, 0xb4,
0x48, 0x60, 0x2a, 0xb2, 0x4b, 0x5c, 0xc4, 0x81, 0x56, 0x88, 0x24, 0x0e, 0x91, 0x05, 0x28, 0xe9,
0xa6, 0x20, 0xd4, 0x8b, 0xb5, 0xf1, 0x4e, 0x9c, 0x51, 0xec, 0x9d, 0xed, 0xce, 0x38, 0x4d, 0xe0,
0x00, 0x1f, 0xa1, 0x57, 0x2e, 0x5c, 0xf9, 0x2a, 0x39, 0x72, 0x44, 0x08, 0x05, 0xea, 0x6f, 0xc1,
0x0d, 0xcd, 0x8b, 0x37, 0x1b, 0x3b, 0x7e, 0x49, 0x9d, 0x72, 0x49, 0x66, 0x66, 0x9f, 0xdf, 0x33,
0xcf, 0xdb, 0xfc, 0x9e, 0x47, 0x86, 0xcd, 0x16, 0x66, 0x47, 0xdd, 0x03, 0xbb, 0x49, 0x3a, 0x4e,
0x93, 0x04, 0xcc, 0xc3, 0x01, 0x8a, 0xfc, 0xe4, 0xd2, 0x0b, 0xb1, 0x43, 0x51, 0x74, 0x82, 0x9b,
0x88, 0x3a, 0xcc, 0xa3, 0xc7, 0xd4, 0x39, 0x59, 0x97, 0x0b, 0x3b, 0x8c, 0x08, 0x23, 0xc6, 0xfd,
0x4b, 0x69, 0xbb, 0x2f, 0x69, 0x4b, 0x81, 0x93, 0x75, 0xf3, 0x9d, 0x16, 0x21, 0xad, 0x36, 0x72,
0x84, 0xec, 0x41, 0xf7, 0xd0, 0x41, 0x9d, 0x90, 0x9d, 0x49, 0xa8, 0x79, 0x6f, 0xf0, 0xa3, 0x17,
0xf4, 0x3f, 0x2d, 0xb7, 0x48, 0x8b, 0x88, 0xa5, 0xc3, 0x57, 0xea, 0xf4, 0x93, 0xa9, 0xec, 0x65,
0x67, 0x21, 0xa2, 0x4e, 0x87, 0x74, 0x03, 0xa6, 0x70, 0x8f, 0x6e, 0x80, 0xf3, 0x11, 0x6d, 0x46,
0x38, 0x64, 0x24, 0x52, 0xe0, 0x4f, 0x6f, 0x00, 0xe6, 0x7e, 0x8b, 0x3f, 0x0a, 0x5b, 0x1e, 0xf4,
0x90, 0xe1, 0x0e, 0xa2, 0xcc, 0xeb, 0x84, 0x52, 0xc0, 0x3a, 0x4f, 0xc1, 0xd2, 0x56, 0x84, 0x3c,
0x86, 0x9e, 0x7a, 0xf4, 0xd8, 0x45, 0xcf, 0xbb, 0x88, 0x32, 0xa3, 0x0a, 0xff, 0x8b, 0xd5, 0x37,
0xb0, 0x5f, 0xd4, 0x56, 0xb5, 0x4a, 0x7e, 0x73, 0xa1, 0x77, 0x51, 0x2e, 0x6c, 0xf5, 0xcf, 0xeb,
0x35, 0xb7, 0x10, 0x0b, 0xd5, 0x7d, 0xc3, 0x01, 0x3d, 0x22, 0x84, 0x1d, 0xd2, 0x62, 0x7a, 0x35,
0x5d, 0x29, 0x54, 0xdf, 0xb6, 0x13, 0x89, 0x11, 0xd6, 0xd9, 0x5f, 0xf3, 0x90, 0xb8, 0x4a, 0xcc,
0x58, 0x86, 0x2c, 0x65, 0x3e, 0x0e, 0x8a, 0x19, 0xae, 0xdd, 0x95, 0x1b, 0x63, 0x05, 0x74, 0xca,
0x7c, 0xd2, 0x65, 0xc5, 0xac, 0x38, 0x56, 0x3b, 0x75, 0x8e, 0xa2, 0xa8, 0xa8, 0xc7, 0xe7, 0x28,
0x8a, 0x0c, 0x13, 0x72, 0x0c, 0x45, 0x1d, 0x1c, 0x78, 0xed, 0xe2, 0xdc, 0xaa, 0x56, 0xc9, 0xb9,
0xf1, 0xde, 0x78, 0x0c, 0xd0, 0x3c, 0x42, 0xcd, 0xe3, 0x90, 0xe0, 0x80, 0x15, 0x73, 0xab, 0x5a,
0xa5, 0x50, 0xbd, 0x3f, 0x6c, 0x56, 0x2d, 0x8e, 0xb8, 0x9b, 0x90, 0x37, 0x6c, 0x98, 0x23, 0x21,
0xc3, 0x24, 0xa0, 0xc5, 0xbc, 0x80, 0x2e, 0xdb, 0x32, 0x9a, 0x76, 0x3f, 0x9a, 0xf6, 0x46, 0x70,
0xe6, 0xf6, 0x85, 0xac, 0x67, 0x60, 0x24, 0x23, 0x49, 0x43, 0x12, 0x50, 0xf4, 0x5a, 0xa1, 0x5c,
0x84, 0x74, 0x88, 0xfd, 0x62, 0x6a, 0x55, 0xab, 0xcc, 0xbb, 0x7c, 0x69, 0x7d, 0x01, 0x8b, 0xfb,
0xcc, 0x8b, 0xd8, 0x8c, 0x49, 0xb2, 0x76, 0x60, 0xa9, 0x86, 0xda, 0x68, 0xe6, 0x6c, 0x5b, 0xbf,
0x68, 0x70, 0x57, 0x6a, 0x8a, 0x3d, 0x5d, 0x81, 0x54, 0x0c, 0xd6, 0x7b, 0x17, 0xe5, 0x54, 0xbd,
0xe6, 0xa6, 0xf0, 0x35, 0xde, 0x18, 0x65, 0x28, 0xa0, 0x53, 0xcc, 0x1a, 0x94, 0x79, 0xac, 0xcb,
0xeb, 0x85, 0x7f, 0x01, 0x7e, 0xb4, 0x2f, 0x4e, 0x8c, 0x0d, 0xc8, 0xf3, 0x1d, 0xf2, 0x1b, 0x1e,
0x13, 0xe5, 0x51, 0xa8, 0x9a, 0x43, 0xc1, 0x7f, 0xda, 0x2f, 0xe5, 0xcd, 0xdc, 0xf9, 0x45, 0xf9,
0xce, 0xcb, 0xbf, 0xca, 0x9a, 0x9b, 0x93, 0xb0, 0x0d, 0x66, 0x11, 0x58, 0x96, 0xf6, 0xed, 0x45,
0xa4, 0x89, 0x28, 0x9d, 0xa5, 0xb4, 0xdf, 0x85, 0x39, 0x74, 0x8a, 0x9a, 0x0d, 0xe5, 0x45, 0x7e,
0x13, 0x7a, 0x17, 0x65, 0x7d, 0xfb, 0x14, 0x35, 0xeb, 0x35, 0x57, 0xe7, 0x9f, 0xea, 0xbe, 0x55,
0x83, 0xbb, 0x3b, 0x68, 0xe6, 0x04, 0x7d, 0x0e, 0x0b, 0xb1, 0x16, 0x15, 0xd7, 0x35, 0xc8, 0xf0,
0x17, 0x2d, 0xe0, 0x85, 0xea, 0xbd, 0x64, 0xfd, 0x9e, 0xac, 0xab, 0x12, 0x16, 0x00, 0x21, 0x66,
0x3d, 0x80, 0xc5, 0xaf, 0x30, 0x15, 0x2a, 0x62, 0xa7, 0x57, 0x40, 0x3f, 0xc4, 0x6d, 0x86, 0x22,
0x69, 0x83, 0xab, 0x76, 0x56, 0x0d, 0x96, 0x12, 0xb2, 0xea, 0x3e, 0x07, 0xb2, 0x82, 0x3e, 0x8b,
0x9a, 0x78, 0xc7, 0x63, 0x2e, 0x94, 0x72, 0xd6, 0x4b, 0x0d, 0x0a, 0x5f, 0xe2, 0x76, 0xfb, 0x4d,
0x87, 0x58, 0x70, 0x00, 0x6e, 0xf1, 0x97, 0x2e, 0x4b, 0x46, 0xed, 0x78, 0x85, 0x79, 0xed, 0xb6,
0x28, 0x94, 0x9c, 0xcb, 0x97, 0xd6, 0x3f, 0x1a, 0x18, 0x1c, 0x7c, 0x0b, 0xc9, 0x8f, 0x69, 0x2a,
0x75, 0x3d, 0x4d, 0xa5, 0x47, 0xd0, 0x54, 0x66, 0x24, 0x4d, 0x65, 0x07, 0x68, 0xaa, 0x02, 0x19,
0x1a, 0xa2, 0xa6, 0x20, 0xb6, 0x51, 0x2c, 0x23, 0x24, 0x92, 0x51, 0x9a, 0x1b, 0x59, 0x88, 0xef,
0xc3, 0xff, 0xaf, 0xb8, 0xae, 0xd2, 0xaa, 0x9e, 0xa1, 0x76, 0x49, 0x2a, 0x3f, 0x6b, 0xb0, 0xe8,
0x22, 0x8a, 0xbf, 0x47, 0x7b, 0xec, 0xec, 0x8d, 0x27, 0x6f, 0x19, 0xb2, 0x2f, 0xb0, 0xcf, 0x8e,
0x54, 0xee, 0xe4, 0x86, 0xc7, 0xeb, 0x08, 0xe1, 0xd6, 0x91, 0x7c, 0xe6, 0xf3, 0xae, 0xda, 0x59,
0x3f, 0xc2, 0xdd, 0xad, 0x36, 0xa1, 0xa8, 0xbe, 0xfb, 0x5f, 0x18, 0x26, 0x13, 0x9c, 0x16, 0x79,
0x91, 0x1b, 0xce, 0xb8, 0x7b, 0x5e, 0x97, 0xa2, 0x5b, 0x60, 0x5c, 0x17, 0xd1, 0x6e, 0x67, 0x66,
0x45, 0xdb, 0xb0, 0xc0, 0xdf, 0xea, 0x1e, 0xf6, 0x67, 0x29, 0x67, 0xeb, 0x3d, 0x49, 0x0f, 0x52,
0x8d, 0x2a, 0x0d, 0x03, 0x32, 0x21, 0xf6, 0xe5, 0x83, 0x9f, 0x77, 0xc5, 0xda, 0xfa, 0x53, 0x83,
0xb7, 0xb6, 0xe2, 0x66, 0x38, 0xeb, 0x70, 0xd0, 0x80, 0xa5, 0xd0, 0x8b, 0x50, 0xc0, 0x1a, 0x89,
0x86, 0x2c, 0x53, 0x52, 0xe5, 0xe4, 0xfd, 0xc7, 0x45, 0xf9, 0x41, 0x62, 0xcc, 0x21, 0x21, 0x0a,
0x62, 0x38, 0x75, 0x5a, 0x64, 0xcd, 0xc7, 0x2d, 0x44, 0x99, 0x5d, 0x13, 0xff, 0xdc, 0x45, 0xa9,
0x6c, 0xeb, 0xda, 0x66, 0x9d, 0x9e, 0xa6, 0x59, 0x7f, 0x07, 0x2b, 0x83, 0xde, 0xa9, 0x60, 0x7c,
0x06, 0x85, 0xcb, 0x11, 0xac, 0x4f, 0x82, 0xe3, 0xa7, 0x86, 0x24, 0xc0, 0xfa, 0x01, 0x96, 0xbe,
0x09, 0xfd, 0x5b, 0x18, 0xa8, 0xaa, 0x90, 0x8f, 0x10, 0x25, 0xdd, 0xa8, 0x89, 0xa8, 0x88, 0xd5,
0x28, 0xa7, 0x2e, 0xc5, 0xaa, 0xbf, 0x16, 0x20, 0x2b, 0xd8, 0xdc, 0x38, 0x06, 0x5d, 0x4e, 0x23,
0x86, 0x63, 0x8f, 0x9b, 0x90, 0xed, 0xa1, 0xe9, 0xcf, 0xfc, 0x68, 0x7a, 0x80, 0x8a, 0xd9, 0x2e,
0x64, 0xc5, 0x78, 0x62, 0xd8, 0xe3, 0xa1, 0x83, 0x33, 0x8c, 0xb9, 0x32, 0xe4, 0xd0, 0x36, 0x9f,
0xcf, 0x8d, 0x16, 0xe8, 0xb2, 0x7b, 0x4f, 0xb2, 0x7e, 0x68, 0x9a, 0x31, 0x3f, 0x9c, 0x06, 0x10,
0x5b, 0xfe, 0x1c, 0xe6, 0xaf, 0x8c, 0x09, 0x46, 0x75, 0x1a, 0xf8, 0xd5, 0xb6, 0x72, 0xc3, 0x2b,
0x0f, 0x20, 0xbd, 0x83, 0x98, 0x31, 0x01, 0x74, 0x75, 0x96, 0x30, 0xd7, 0xa6, 0x94, 0x56, 0x77,
0xb4, 0x20, 0xc3, 0x5f, 0xf9, 0xa4, 0x7c, 0x0c, 0x0e, 0x0a, 0xa6, 0x33, 0xb5, 0xbc, 0xba, 0xa8,
0x0e, 0x19, 0xde, 0xfa, 0x8d, 0x0f, 0xc6, 0x03, 0x13, 0xe3, 0xc1, 0xc8, 0x9c, 0x1f, 0x43, 0x86,
0x33, 0xb3, 0x31, 0xa1, 0xfc, 0x86, 0xdb, 0xba, 0xb9, 0x7e, 0x03, 0x84, 0xb2, 0x7b, 0x1f, 0xf2,
0x71, 0xeb, 0x9b, 0x14, 0xa5, 0xc1, 0x1e, 0x39, 0xd2, 0x83, 0x5d, 0x98, 0x53, 0x4d, 0x6b, 0x52,
0x76, 0xaf, 0xf6, 0xb6, 0x31, 0x0a, 0xb3, 0xa2, 0x09, 0x4d, 0xb2, 0x70, 0xb0, 0x53, 0x8d, 0x54,
0xf8, 0x04, 0x74, 0xd9, 0x8d, 0x26, 0xbd, 0xab, 0xa1, 0x9e, 0x35, 0x52, 0x25, 0x86, 0x5c, 0xbf,
0xa1, 0x18, 0x6b, 0x93, 0xcb, 0x27, 0xd1, 0xbf, 0x4c, 0x7b, 0x5a, 0x71, 0x95, 0xb4, 0x17, 0x00,
0x09, 0xca, 0x7f, 0x38, 0x21, 0xc4, 0xd7, 0x35, 0x2f, 0xf3, 0xe3, 0x9b, 0x81, 0xd4, 0xc5, 0x4f,
0x40, 0x97, 0x9c, 0x3e, 0x29, 0x6c, 0x43, 0xcc, 0x3f, 0x2a, 0x6c, 0x9b, 0xdf, 0x9e, 0xbf, 0x2a,
0xdd, 0xf9, 0xfd, 0x55, 0xe9, 0xce, 0x4f, 0xbd, 0x92, 0x76, 0xde, 0x2b, 0x69, 0xbf, 0xf5, 0x4a,
0xda, 0xdf, 0xbd, 0x92, 0xf6, 0xec, 0xf1, 0xeb, 0xfd, 0x28, 0xf2, 0x48, 0x2c, 0x0e, 0x74, 0x71,
0xcf, 0xc3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x8f, 0xc3, 0x21, 0xef, 0x5b, 0x11, 0x00, 0x00,
// 1207 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x1b, 0x45,
0x14, 0xef, 0xfa, 0x63, 0x6d, 0x3f, 0x37, 0x6d, 0x32, 0xa4, 0xc1, 0x2c, 0x95, 0x1d, 0x16, 0x09,
0x99, 0x42, 0x77, 0xa9, 0x8b, 0x7a, 0xa0, 0x15, 0x52, 0x13, 0x87, 0xc8, 0x02, 0xd4, 0x74, 0x53,
0x10, 0xca, 0xc5, 0x6c, 0xbc, 0x13, 0x67, 0x14, 0x7b, 0x77, 0xbb, 0x33, 0x4e, 0x13, 0x38, 0xc0,
0x9f, 0xd0, 0x0b, 0x07, 0x2e, 0xfc, 0x3d, 0x39, 0x72, 0x44, 0x08, 0x05, 0xea, 0xff, 0x82, 0x1b,
0x9a, 0x0f, 0x6f, 0x36, 0x76, 0xfc, 0x91, 0xba, 0xe1, 0x92, 0xcc, 0xcc, 0xbe, 0xaf, 0xf9, 0xbd,
0x37, 0xbf, 0xf7, 0x12, 0x58, 0x6b, 0x13, 0xb6, 0xdf, 0xdb, 0xb5, 0x5a, 0x41, 0xd7, 0x6e, 0x05,
0x3e, 0x73, 0x89, 0x8f, 0x23, 0x2f, 0xb9, 0x74, 0x43, 0x62, 0x53, 0x1c, 0x1d, 0x92, 0x16, 0xa6,
0x36, 0x73, 0xe9, 0x01, 0xb5, 0x0f, 0xef, 0xc9, 0x85, 0x15, 0x46, 0x01, 0x0b, 0xd0, 0xed, 0x33,
0x69, 0x6b, 0x20, 0x69, 0x49, 0x81, 0xc3, 0x7b, 0xc6, 0xbb, 0xed, 0x20, 0x68, 0x77, 0xb0, 0x2d,
0x64, 0x77, 0x7b, 0x7b, 0x36, 0xee, 0x86, 0xec, 0x58, 0xaa, 0x1a, 0xef, 0x0c, 0x7f, 0x74, 0xfd,
0xc1, 0xa7, 0xe5, 0x76, 0xd0, 0x0e, 0xc4, 0xd2, 0xe6, 0x2b, 0x75, 0xfa, 0x60, 0xa6, 0x78, 0xd9,
0x71, 0x88, 0xa9, 0xdd, 0x0d, 0x7a, 0x3e, 0x53, 0x7a, 0x0f, 0x2f, 0xa1, 0xe7, 0x61, 0xda, 0x8a,
0x48, 0xc8, 0x82, 0x48, 0x29, 0x7f, 0x76, 0x09, 0x65, 0x7e, 0x6f, 0xf1, 0x43, 0xe9, 0x56, 0x86,
0x6f, 0xc8, 0x48, 0x17, 0x53, 0xe6, 0x76, 0x43, 0x29, 0x60, 0x9e, 0xa4, 0x60, 0x69, 0x3d, 0xc2,
0x2e, 0xc3, 0xcf, 0x5c, 0x7a, 0xe0, 0xe0, 0xe7, 0x3d, 0x4c, 0x19, 0xaa, 0xc1, 0xf5, 0xd8, 0x7c,
0x93, 0x78, 0x25, 0x6d, 0x55, 0xab, 0x16, 0xd6, 0x6e, 0xf6, 0x4f, 0x2b, 0xc5, 0xf5, 0xc1, 0x79,
0xa3, 0xee, 0x14, 0x63, 0xa1, 0x86, 0x87, 0x6c, 0xd0, 0xa3, 0x20, 0x60, 0x7b, 0xb4, 0x94, 0x5e,
0x4d, 0x57, 0x8b, 0xb5, 0xb7, 0xad, 0x44, 0x62, 0x44, 0x74, 0xd6, 0xd7, 0x1c, 0x12, 0x47, 0x89,
0xa1, 0x65, 0xc8, 0x52, 0xe6, 0x11, 0xbf, 0x94, 0xe1, 0xd6, 0x1d, 0xb9, 0x41, 0x2b, 0xa0, 0x53,
0xe6, 0x05, 0x3d, 0x56, 0xca, 0x8a, 0x63, 0xb5, 0x53, 0xe7, 0x38, 0x8a, 0x4a, 0x7a, 0x7c, 0x8e,
0xa3, 0x08, 0x19, 0x90, 0x67, 0x38, 0xea, 0x12, 0xdf, 0xed, 0x94, 0x72, 0xab, 0x5a, 0x35, 0xef,
0xc4, 0x7b, 0xf4, 0x08, 0xa0, 0xb5, 0x8f, 0x5b, 0x07, 0x61, 0x40, 0x7c, 0x56, 0xca, 0xaf, 0x6a,
0xd5, 0x62, 0xed, 0xf6, 0x68, 0x58, 0xf5, 0x18, 0x71, 0x27, 0x21, 0x8f, 0x2c, 0xc8, 0x05, 0x21,
0x23, 0x81, 0x4f, 0x4b, 0x05, 0xa1, 0xba, 0x6c, 0x49, 0x34, 0xad, 0x01, 0x9a, 0xd6, 0x63, 0xff,
0xd8, 0x19, 0x08, 0x99, 0x3b, 0x80, 0x92, 0x48, 0xd2, 0x30, 0xf0, 0x29, 0x7e, 0x2d, 0x28, 0x17,
0x21, 0x1d, 0x12, 0xaf, 0x94, 0x5a, 0xd5, 0xaa, 0x0b, 0x0e, 0x5f, 0x9a, 0x6d, 0xb8, 0xbe, 0xcd,
0xdc, 0x88, 0xcd, 0x93, 0xa0, 0xf7, 0x21, 0x87, 0x8f, 0x70, 0xab, 0xa9, 0x2c, 0x17, 0xd6, 0xa0,
0x7f, 0x5a, 0xd1, 0x37, 0x8e, 0x70, 0xab, 0x51, 0x77, 0x74, 0xfe, 0xa9, 0xe1, 0x99, 0xef, 0xc1,
0x82, 0x72, 0xa4, 0xe2, 0x57, 0xb1, 0x68, 0x67, 0xb1, 0x6c, 0xc2, 0x52, 0x1d, 0x77, 0xf0, 0xdc,
0x15, 0x63, 0xfe, 0xa6, 0xc1, 0x0d, 0x69, 0x29, 0xf6, 0xb6, 0x02, 0xa9, 0x58, 0x59, 0xef, 0x9f,
0x56, 0x52, 0x8d, 0xba, 0x93, 0x22, 0x17, 0x20, 0x82, 0x2a, 0x50, 0xc4, 0x47, 0x84, 0x35, 0x29,
0x73, 0x59, 0x8f, 0xd7, 0x1c, 0xff, 0x02, 0xfc, 0x68, 0x5b, 0x9c, 0xa0, 0xc7, 0x50, 0xe0, 0x3b,
0xec, 0x35, 0x5d, 0x26, 0x4a, 0xac, 0x58, 0x33, 0x46, 0x12, 0xf8, 0x6c, 0xf0, 0x1c, 0xd6, 0xf2,
0x27, 0xa7, 0x95, 0x6b, 0x2f, 0xff, 0xae, 0x68, 0x4e, 0x5e, 0xaa, 0x3d, 0x66, 0x66, 0x00, 0xcb,
0x32, 0xbe, 0xad, 0x28, 0x68, 0x61, 0x4a, 0xaf, 0x1c, 0x7d, 0x0c, 0xb0, 0x89, 0xaf, 0x3e, 0xc9,
0x1b, 0x50, 0x14, 0x6e, 0x14, 0xe8, 0x0f, 0x20, 0x17, 0xca, 0x0b, 0x0a, 0x17, 0x43, 0x6f, 0xe4,
0xf0, 0x9e, 0x7a, 0x26, 0x03, 0x10, 0x06, 0xc2, 0xe6, 0x1d, 0x58, 0xfc, 0x8a, 0x50, 0xc6, 0xcb,
0x20, 0x86, 0x66, 0x05, 0xf4, 0x3d, 0xd2, 0x61, 0x38, 0x92, 0xd1, 0x3a, 0x6a, 0xc7, 0x8b, 0x26,
0x21, 0x1b, 0xbf, 0x8d, 0xac, 0x20, 0xea, 0x92, 0x26, 0x18, 0x63, 0xb2, 0x5b, 0x29, 0x6a, 0xbe,
0xd4, 0xa0, 0xf8, 0x25, 0xe9, 0x74, 0xae, 0x1a, 0x24, 0x41, 0x38, 0xa4, 0xcd, 0x69, 0x45, 0xd6,
0x96, 0xda, 0xf1, 0x52, 0x74, 0x3b, 0x1d, 0x51, 0x51, 0x79, 0x87, 0x2f, 0xcd, 0x7f, 0x35, 0x40,
0x5c, 0xf9, 0x0d, 0x54, 0x49, 0xcc, 0x89, 0xa9, 0x8b, 0x39, 0x31, 0x3d, 0x86, 0x13, 0x33, 0x63,
0x39, 0x31, 0x3b, 0xc4, 0x89, 0x55, 0xc8, 0xd0, 0x10, 0xb7, 0x04, 0x8b, 0x8e, 0xa3, 0x34, 0x21,
0x91, 0x44, 0x29, 0x37, 0xb6, 0x94, 0x6e, 0xc1, 0x5b, 0xe7, 0xae, 0x2e, 0x33, 0x6b, 0xfe, 0xaa,
0xc1, 0xa2, 0x83, 0x29, 0xf9, 0x01, 0x6f, 0xb1, 0xe3, 0x2b, 0x4f, 0xd5, 0x32, 0x64, 0x5f, 0x10,
0x8f, 0xed, 0xab, 0x4c, 0xc9, 0x0d, 0x47, 0x67, 0x1f, 0x93, 0xf6, 0xbe, 0x7c, 0xfd, 0x0b, 0x8e,
0xda, 0x99, 0x3f, 0xc1, 0x8d, 0xf5, 0x4e, 0x40, 0x71, 0xe3, 0xc9, 0xff, 0x11, 0x98, 0x4c, 0x67,
0x5a, 0x64, 0x41, 0x6e, 0xcc, 0x2f, 0x60, 0x71, 0xcb, 0xed, 0xd1, 0xb9, 0xf9, 0x73, 0x13, 0x96,
0x1c, 0x4c, 0x7b, 0xdd, 0xb9, 0x0d, 0x6d, 0xc0, 0x4d, 0xfe, 0x38, 0xb7, 0x88, 0x37, 0x4f, 0xf1,
0x9a, 0x1f, 0x48, 0x3e, 0x90, 0x66, 0xd4, 0x13, 0x47, 0x90, 0x09, 0x89, 0x27, 0x5f, 0xf8, 0x82,
0x23, 0xd6, 0xe6, 0x5f, 0x1a, 0xdc, 0x5a, 0x8f, 0xfb, 0xec, 0xbc, 0x73, 0x47, 0x13, 0x96, 0x42,
0x37, 0xc2, 0x3e, 0x6b, 0x26, 0x7a, 0xbd, 0x4c, 0x49, 0x8d, 0x73, 0xfa, 0x9f, 0xa7, 0x95, 0x3b,
0x89, 0x09, 0x2a, 0x08, 0xb1, 0x1f, 0xab, 0x53, 0xbb, 0x1d, 0xdc, 0xf5, 0x48, 0x1b, 0x53, 0x66,
0xd5, 0xc5, 0x2f, 0x67, 0x51, 0x1a, 0x5b, 0xbf, 0x70, 0x0e, 0x48, 0xcf, 0x32, 0x07, 0x7c, 0x07,
0x2b, 0xc3, 0xb7, 0x53, 0x60, 0x7c, 0x0e, 0xc5, 0xb3, 0xe9, 0xee, 0x42, 0xd6, 0x1b, 0x19, 0x48,
0x92, 0x0a, 0xe6, 0x8f, 0xb0, 0xf4, 0x4d, 0xe8, 0xbd, 0x81, 0x59, 0xad, 0x06, 0x85, 0x08, 0xd3,
0xa0, 0x17, 0xb5, 0x30, 0x15, 0x58, 0x8d, 0xbb, 0xd4, 0x99, 0x58, 0xed, 0x97, 0x22, 0x64, 0x05,
0x7d, 0xa3, 0x03, 0xd0, 0xe5, 0xa0, 0x83, 0x6c, 0x6b, 0xd2, 0xf0, 0x6d, 0x8d, 0x0c, 0x96, 0xc6,
0x27, 0xb3, 0x2b, 0x28, 0xcc, 0xbe, 0x87, 0xac, 0x18, 0x48, 0xd0, 0x9d, 0xc9, 0xaa, 0xc9, 0xf1,
0xc8, 0xf8, 0x68, 0x26, 0x59, 0xe5, 0xa1, 0x0d, 0xba, 0xec, 0xf2, 0xd3, 0xae, 0x33, 0x32, 0xf5,
0x18, 0x1f, 0xcf, 0xa2, 0x10, 0x3b, 0x7a, 0x0e, 0x0b, 0xe7, 0xc6, 0x09, 0x54, 0x9b, 0x45, 0xfd,
0x7c, 0x57, 0xb9, 0xa4, 0xcb, 0x1d, 0x48, 0x6f, 0x62, 0x86, 0xaa, 0x93, 0x95, 0xce, 0x66, 0x0e,
0xe3, 0xc3, 0x19, 0x24, 0x63, 0xdc, 0x32, 0xfc, 0xb9, 0x23, 0x6b, 0xb2, 0xca, 0xf0, 0x88, 0x60,
0xd8, 0x33, 0xcb, 0x2b, 0x47, 0x0d, 0xc8, 0xf0, 0x8e, 0x8f, 0xa6, 0xc4, 0x96, 0x98, 0x0a, 0x8c,
0x95, 0x91, 0x6a, 0xde, 0xe0, 0x7f, 0xf7, 0xa1, 0x2d, 0xc8, 0x70, 0x8a, 0x46, 0x53, 0xea, 0x70,
0xb4, 0x9b, 0x8f, 0xb5, 0xb8, 0x0d, 0x85, 0xb8, 0xd1, 0x4d, 0x83, 0x62, 0xb8, 0x23, 0x8e, 0x35,
0xfa, 0x04, 0x72, 0xaa, 0x45, 0xa1, 0x29, 0xf9, 0x3e, 0xdf, 0xc9, 0x26, 0x18, 0xcc, 0x8a, 0x96,
0x33, 0x2d, 0xc2, 0xe1, 0xbe, 0x34, 0xd6, 0xe0, 0x53, 0xd0, 0x65, 0xef, 0x99, 0xf6, 0x68, 0x46,
0x3a, 0xd4, 0x58, 0x93, 0x04, 0xf2, 0x83, 0xf6, 0x81, 0xee, 0x4e, 0xaf, 0x91, 0x44, 0xb7, 0x32,
0xac, 0x59, 0xc5, 0x55, 0x45, 0xbd, 0x00, 0x48, 0x10, 0xfc, 0xfd, 0x29, 0x10, 0x5f, 0xd4, 0xaa,
0x8c, 0x4f, 0x2f, 0xa7, 0xa4, 0x1c, 0x3f, 0x05, 0x5d, 0x32, 0xf8, 0x34, 0xd8, 0x46, 0x78, 0x7e,
0x1c, 0x6c, 0x6b, 0xdf, 0x9e, 0xbc, 0x2a, 0x5f, 0xfb, 0xe3, 0x55, 0xf9, 0xda, 0xcf, 0xfd, 0xb2,
0x76, 0xd2, 0x2f, 0x6b, 0xbf, 0xf7, 0xcb, 0xda, 0x3f, 0xfd, 0xb2, 0xb6, 0xf3, 0xe8, 0xf5, 0xfe,
0xbb, 0xf2, 0x50, 0x2c, 0x76, 0x75, 0xe1, 0xe7, 0xfe, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x17,
0xec, 0xf8, 0x44, 0xa4, 0x11, 0x00, 0x00,
}

View File

@ -16,22 +16,22 @@ service Tasks {
// Create a task.
rpc Create(CreateTaskRequest) returns (CreateTaskResponse);
// Start a task.
rpc Start(StartTaskRequest) returns (google.protobuf.Empty);
// Start a process.
rpc Start(StartRequest) returns (StartResponse);
// Delete a task and on disk state.
rpc Delete(DeleteTaskRequest) returns (DeleteResponse);
rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse);
rpc Get(GetTaskRequest) returns (GetTaskResponse);
rpc Get(GetRequest) returns (GetResponse);
rpc List(ListTasksRequest) returns (ListTasksResponse);
// Kill a task or process.
rpc Kill(KillRequest) returns (google.protobuf.Empty);
rpc Exec(ExecProcessRequest) returns (ExecProcessResponse);
rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty);
rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty);
@ -74,8 +74,13 @@ message CreateTaskResponse {
uint32 pid = 2;
}
message StartTaskRequest {
message StartRequest {
string container_id = 1;
string exec_id = 2;
}
message StartResponse {
uint32 pid = 1;
}
message DeleteTaskRequest {
@ -94,12 +99,13 @@ message DeleteProcessRequest {
string exec_id = 2;
}
message GetTaskRequest {
message GetRequest {
string container_id = 1;
string exec_id = 2;
}
message GetTaskResponse {
containerd.v1.types.Task task = 1;
message GetResponse {
containerd.v1.types.Process process = 1;
}
message ListTasksRequest {
@ -107,7 +113,7 @@ message ListTasksRequest {
}
message ListTasksResponse {
repeated containerd.v1.types.Task tasks = 1;
repeated containerd.v1.types.Process tasks = 1;
}
message KillRequest {
@ -132,7 +138,6 @@ message ExecProcessRequest {
}
message ExecProcessResponse {
uint32 pid = 1;
}
message ResizePtyRequest {

View File

@ -9,7 +9,7 @@
github.com/containerd/containerd/api/types/task/task.proto
It has these top-level messages:
Task
Process
*/
package task
@ -17,7 +17,6 @@ import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import google_protobuf1 "github.com/gogo/protobuf/types"
import strings "strings"
import reflect "reflect"
@ -43,6 +42,7 @@ const (
StatusRunning Status = 2
StatusStopped Status = 3
StatusPaused Status = 4
StatusPausing Status = 5
)
var Status_name = map[int32]string{
@ -51,6 +51,7 @@ var Status_name = map[int32]string{
2: "RUNNING",
3: "STOPPED",
4: "PAUSED",
5: "PAUSING",
}
var Status_value = map[string]int32{
"UNKNOWN": 0,
@ -58,6 +59,7 @@ var Status_value = map[string]int32{
"RUNNING": 2,
"STOPPED": 3,
"PAUSED": 4,
"PAUSING": 5,
}
func (x Status) String() string {
@ -65,26 +67,27 @@ func (x Status) String() string {
}
func (Status) EnumDescriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} }
type Task struct {
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
Status Status `protobuf:"varint,3,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"`
Spec *google_protobuf1.Any `protobuf:"bytes,4,opt,name=spec" json:"spec,omitempty"`
Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"`
Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"`
Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"`
Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"`
type Process struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"`
Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"`
Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"`
Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"`
Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"`
ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
}
func (m *Task) Reset() { *m = Task{} }
func (*Task) ProtoMessage() {}
func (*Task) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} }
func (m *Process) Reset() { *m = Process{} }
func (*Process) ProtoMessage() {}
func (*Process) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} }
func init() {
proto.RegisterType((*Task)(nil), "containerd.v1.types.Task")
proto.RegisterType((*Process)(nil), "containerd.v1.types.Process")
proto.RegisterEnum("containerd.v1.types.Status", Status_name, Status_value)
}
func (m *Task) Marshal() (dAtA []byte, err error) {
func (m *Process) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
@ -94,37 +97,33 @@ func (m *Task) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
func (m *Task) MarshalTo(dAtA []byte) (int, error) {
func (m *Process) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.ID) > 0 {
if len(m.ContainerID) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
i += copy(dAtA[i:], m.ContainerID)
}
if len(m.ID) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintTask(dAtA, i, uint64(len(m.ID)))
i += copy(dAtA[i:], m.ID)
}
if m.Pid != 0 {
dAtA[i] = 0x10
dAtA[i] = 0x18
i++
i = encodeVarintTask(dAtA, i, uint64(m.Pid))
}
if m.Status != 0 {
dAtA[i] = 0x18
dAtA[i] = 0x20
i++
i = encodeVarintTask(dAtA, i, uint64(m.Status))
}
if m.Spec != nil {
dAtA[i] = 0x22
i++
i = encodeVarintTask(dAtA, i, uint64(m.Spec.Size()))
n1, err := m.Spec.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
}
if len(m.Stdin) > 0 {
dAtA[i] = 0x2a
i++
@ -153,6 +152,11 @@ func (m *Task) MarshalTo(dAtA []byte) (int, error) {
}
i++
}
if m.ExitStatus != 0 {
dAtA[i] = 0x48
i++
i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus))
}
return i, nil
}
@ -183,9 +187,13 @@ func encodeVarintTask(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *Task) Size() (n int) {
func (m *Process) Size() (n int) {
var l int
_ = l
l = len(m.ContainerID)
if l > 0 {
n += 1 + l + sovTask(uint64(l))
}
l = len(m.ID)
if l > 0 {
n += 1 + l + sovTask(uint64(l))
@ -196,10 +204,6 @@ func (m *Task) Size() (n int) {
if m.Status != 0 {
n += 1 + sovTask(uint64(m.Status))
}
if m.Spec != nil {
l = m.Spec.Size()
n += 1 + l + sovTask(uint64(l))
}
l = len(m.Stdin)
if l > 0 {
n += 1 + l + sovTask(uint64(l))
@ -215,6 +219,9 @@ func (m *Task) Size() (n int) {
if m.Terminal {
n += 2
}
if m.ExitStatus != 0 {
n += 1 + sovTask(uint64(m.ExitStatus))
}
return n
}
@ -231,19 +238,20 @@ func sovTask(x uint64) (n int) {
func sozTask(x uint64) (n int) {
return sovTask(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Task) String() string {
func (this *Process) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Task{`,
s := strings.Join([]string{`&Process{`,
`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf1.Any", 1) + `,`,
`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
`}`,
}, "")
return s
@ -256,7 +264,7 @@ func valueToStringTask(v interface{}) string {
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Task) Unmarshal(dAtA []byte) error {
func (m *Process) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@ -279,13 +287,42 @@ func (m *Task) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Task: wiretype end group for non-group")
return fmt.Errorf("proto: Process: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire)
return fmt.Errorf("proto: Process: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTask
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ContainerID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
@ -314,7 +351,7 @@ func (m *Task) Unmarshal(dAtA []byte) error {
}
m.ID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
}
@ -333,7 +370,7 @@ func (m *Task) Unmarshal(dAtA []byte) error {
break
}
}
case 3:
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
@ -352,39 +389,6 @@ func (m *Task) Unmarshal(dAtA []byte) error {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTask
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Spec == nil {
m.Spec = &google_protobuf1.Any{}
}
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
@ -492,6 +496,25 @@ func (m *Task) Unmarshal(dAtA []byte) error {
}
}
m.Terminal = bool(v != 0)
case 9:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType)
}
m.ExitStatus = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ExitStatus |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTask(dAtA[iNdEx:])
@ -623,32 +646,33 @@ func init() {
}
var fileDescriptorTask = []byte{
// 431 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x6e, 0xd3, 0x30,
0x1c, 0xc6, 0xeb, 0xb4, 0xcb, 0x8a, 0x61, 0x53, 0x30, 0xd5, 0x64, 0x02, 0x32, 0x11, 0xa7, 0x88,
0x83, 0x23, 0xb6, 0x1b, 0xb7, 0x6e, 0xad, 0xd0, 0x84, 0x94, 0x55, 0x6e, 0x2b, 0xce, 0x6e, 0x6d,
0x82, 0xd5, 0xcd, 0x8e, 0x12, 0x07, 0xd4, 0x1b, 0x47, 0xb4, 0x77, 0xd8, 0x09, 0x9e, 0x81, 0x03,
0x4f, 0xb0, 0x23, 0x47, 0x4e, 0x88, 0xe5, 0x0d, 0x78, 0x03, 0x14, 0xa7, 0x8c, 0x1e, 0x76, 0x89,
0xfe, 0xdf, 0xf7, 0xfb, 0xa4, 0xfc, 0x64, 0xf8, 0x2a, 0x53, 0xf6, 0x7d, 0xb5, 0xa0, 0x4b, 0x73,
0x91, 0x2c, 0x8d, 0xb6, 0x5c, 0x69, 0x59, 0x88, 0xed, 0x93, 0xe7, 0x2a, 0xb1, 0xeb, 0x5c, 0x96,
0x89, 0xe5, 0xe5, 0xca, 0x7d, 0x68, 0x5e, 0x18, 0x6b, 0xd0, 0xa3, 0xff, 0x2b, 0xfa, 0xe1, 0x25,
0x75, 0xa3, 0x70, 0x90, 0x99, 0xcc, 0x38, 0x9e, 0x34, 0x57, 0x3b, 0x0d, 0x1f, 0x67, 0xc6, 0x64,
0xe7, 0x32, 0x71, 0x69, 0x51, 0xbd, 0x4b, 0xb8, 0x5e, 0xb7, 0xe8, 0xf9, 0x1f, 0x00, 0x7b, 0x33,
0x5e, 0xae, 0xd0, 0x01, 0xf4, 0x94, 0xc0, 0x20, 0x02, 0xf1, 0xbd, 0x63, 0xbf, 0xfe, 0xf5, 0xcc,
0x3b, 0x1d, 0x31, 0x4f, 0x09, 0x14, 0xc0, 0x6e, 0xae, 0x04, 0xf6, 0x22, 0x10, 0xef, 0xb1, 0xe6,
0x44, 0x47, 0xd0, 0x2f, 0x2d, 0xb7, 0x55, 0x89, 0xbb, 0x11, 0x88, 0xf7, 0x0f, 0x9f, 0xd0, 0x3b,
0x4c, 0xe8, 0xd4, 0x4d, 0xd8, 0x66, 0x8a, 0x62, 0xd8, 0x2b, 0x73, 0xb9, 0xc4, 0xbd, 0x08, 0xc4,
0xf7, 0x0f, 0x07, 0xb4, 0x35, 0xa2, 0xff, 0x8c, 0xe8, 0x50, 0xaf, 0x99, 0x5b, 0xa0, 0x01, 0xdc,
0x29, 0xad, 0x50, 0x1a, 0xef, 0x34, 0x2e, 0xac, 0x0d, 0xe8, 0xa0, 0xf9, 0xa9, 0x30, 0x95, 0xc5,
0xbe, 0xab, 0x37, 0x69, 0xd3, 0xcb, 0xa2, 0xc0, 0xbb, 0xb7, 0xbd, 0x2c, 0x0a, 0x14, 0xc2, 0xbe,
0x95, 0xc5, 0x85, 0xd2, 0xfc, 0x1c, 0xf7, 0x23, 0x10, 0xf7, 0xd9, 0x6d, 0x7e, 0xf1, 0x0d, 0x40,
0xbf, 0xd5, 0x43, 0x04, 0xee, 0xce, 0xd3, 0x37, 0xe9, 0xd9, 0xdb, 0x34, 0xe8, 0x84, 0x0f, 0x2f,
0xaf, 0xa2, 0xbd, 0x16, 0xcc, 0xf5, 0x4a, 0x9b, 0x8f, 0xba, 0xe1, 0x27, 0x6c, 0x3c, 0x9c, 0x8d,
0x47, 0x01, 0xd8, 0xe6, 0x27, 0x85, 0xe4, 0x56, 0x8a, 0x86, 0xb3, 0x79, 0x9a, 0x9e, 0xa6, 0xaf,
0x03, 0x6f, 0x9b, 0xb3, 0x4a, 0x6b, 0xa5, 0xb3, 0x86, 0x4f, 0x67, 0x67, 0x93, 0xc9, 0x78, 0x14,
0x74, 0xb7, 0xf9, 0xd4, 0x9a, 0x3c, 0x97, 0x02, 0x3d, 0x85, 0xfe, 0x64, 0x38, 0x9f, 0x8e, 0x47,
0x41, 0x2f, 0x0c, 0x2e, 0xaf, 0xa2, 0x07, 0x2d, 0x9e, 0xf0, 0xaa, 0x94, 0x22, 0xdc, 0xff, 0xfc,
0x85, 0x74, 0xbe, 0x7f, 0x25, 0x1b, 0xdb, 0x63, 0x7c, 0x7d, 0x43, 0x3a, 0x3f, 0x6f, 0x48, 0xe7,
0x53, 0x4d, 0xc0, 0x75, 0x4d, 0xc0, 0x8f, 0x9a, 0x80, 0xdf, 0x35, 0x01, 0x0b, 0xdf, 0x3d, 0xe4,
0xd1, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdf, 0xcf, 0x9e, 0x89, 0x51, 0x02, 0x00, 0x00,
// 447 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x4f, 0x6f, 0xd3, 0x30,
0x18, 0xc6, 0xeb, 0x74, 0x4d, 0x3b, 0x77, 0x1b, 0xc1, 0x4c, 0x93, 0x55, 0x90, 0x1b, 0x71, 0xaa,
0x38, 0xa4, 0x62, 0xbb, 0x71, 0xdb, 0x9a, 0x0a, 0x55, 0x48, 0x59, 0xe4, 0xae, 0xe2, 0x38, 0x65,
0xb5, 0x15, 0xac, 0x31, 0x3b, 0x72, 0x1c, 0xfe, 0xdc, 0x38, 0xa2, 0x7d, 0x87, 0x89, 0x03, 0x7c,
0x0a, 0x3e, 0xc1, 0x8e, 0x9c, 0x10, 0xa7, 0x89, 0xe5, 0x93, 0x20, 0x27, 0xa1, 0xe4, 0xc0, 0x25,
0x7a, 0xde, 0xe7, 0xf7, 0xbc, 0x6f, 0x9e, 0x48, 0x81, 0x2f, 0x52, 0x61, 0xde, 0x14, 0x17, 0xc1,
0x5a, 0x5d, 0x4d, 0xd7, 0x4a, 0x9a, 0x44, 0x48, 0xae, 0x59, 0x5b, 0x26, 0x99, 0x98, 0x9a, 0x8f,
0x19, 0xcf, 0xa7, 0x26, 0xc9, 0x2f, 0xab, 0x47, 0x90, 0x69, 0x65, 0x14, 0x7a, 0xf4, 0x2f, 0x15,
0xbc, 0x7b, 0x1e, 0x54, 0xa1, 0xd1, 0x7e, 0xaa, 0x52, 0x55, 0xf1, 0xa9, 0x55, 0x75, 0xf4, 0xe9,
0x17, 0x07, 0xf6, 0x63, 0xad, 0xd6, 0x3c, 0xcf, 0xd1, 0x21, 0xdc, 0xd9, 0x2c, 0x9e, 0x0b, 0x86,
0x81, 0x0f, 0x26, 0xdb, 0x27, 0x0f, 0xca, 0xbb, 0xf1, 0x70, 0xf6, 0xd7, 0x5f, 0x84, 0x74, 0xb8,
0x09, 0x2d, 0x18, 0x3a, 0x80, 0x8e, 0x60, 0xd8, 0xa9, 0x92, 0x6e, 0x79, 0x37, 0x76, 0x16, 0x21,
0x75, 0x04, 0x43, 0x1e, 0xec, 0x66, 0x82, 0xe1, 0xae, 0x0f, 0x26, 0xbb, 0xd4, 0x4a, 0x74, 0x04,
0xdd, 0xdc, 0x24, 0xa6, 0xc8, 0xf1, 0x96, 0x0f, 0x26, 0x7b, 0x87, 0x8f, 0x83, 0xff, 0xb4, 0x0c,
0x96, 0x55, 0x84, 0x36, 0x51, 0xb4, 0x0f, 0x7b, 0xb9, 0x61, 0x42, 0xe2, 0x9e, 0x7d, 0x03, 0xad,
0x07, 0x74, 0x60, 0x4f, 0x31, 0x55, 0x18, 0xec, 0x56, 0x76, 0x33, 0x35, 0x3e, 0xd7, 0x1a, 0xf7,
0x37, 0x3e, 0xd7, 0x1a, 0x8d, 0xe0, 0xc0, 0x70, 0x7d, 0x25, 0x64, 0xf2, 0x16, 0x0f, 0x7c, 0x30,
0x19, 0xd0, 0xcd, 0x8c, 0xc6, 0x70, 0xc8, 0x3f, 0x08, 0x73, 0xde, 0x74, 0xdb, 0xae, 0x0a, 0x43,
0x6b, 0xd5, 0x55, 0x9e, 0xfd, 0x04, 0xd0, 0xad, 0x25, 0x22, 0xb0, 0xbf, 0x8a, 0x5e, 0x45, 0xa7,
0xaf, 0x23, 0xaf, 0x33, 0x7a, 0x78, 0x7d, 0xe3, 0xef, 0xd6, 0x60, 0x25, 0x2f, 0xa5, 0x7a, 0x2f,
0x2d, 0x9f, 0xd1, 0xf9, 0xf1, 0xd9, 0x3c, 0xf4, 0x40, 0x9b, 0xcf, 0x34, 0x4f, 0x0c, 0x67, 0x96,
0xd3, 0x55, 0x14, 0x2d, 0xa2, 0x97, 0x9e, 0xd3, 0xe6, 0xb4, 0x90, 0x52, 0xc8, 0xd4, 0xf2, 0xe5,
0xd9, 0x69, 0x1c, 0xcf, 0x43, 0xaf, 0xdb, 0xe6, 0x4b, 0xa3, 0xb2, 0x8c, 0x33, 0xf4, 0x04, 0xba,
0xf1, 0xf1, 0x6a, 0x39, 0x0f, 0xbd, 0xad, 0x91, 0x77, 0x7d, 0xe3, 0xef, 0xd4, 0x38, 0x4e, 0x8a,
0xbc, 0xbe, 0x6e, 0xa9, 0xbd, 0xde, 0x6b, 0x6f, 0x5b, 0x2c, 0x64, 0x3a, 0xda, 0xfb, 0xfc, 0x95,
0x74, 0xbe, 0x7f, 0x23, 0xcd, 0xd7, 0x9c, 0xe0, 0xdb, 0x7b, 0xd2, 0xf9, 0x75, 0x4f, 0x3a, 0x9f,
0x4a, 0x02, 0x6e, 0x4b, 0x02, 0x7e, 0x94, 0x04, 0xfc, 0x2e, 0x09, 0xb8, 0x70, 0xab, 0x7f, 0xe3,
0xe8, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x46, 0x63, 0xaf, 0x84, 0x02, 0x00, 0x00,
}

View File

@ -3,7 +3,6 @@ syntax = "proto3";
package containerd.v1.types;
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
enum Status {
option (gogoproto.goproto_enum_prefix) = false;
@ -14,15 +13,17 @@ enum Status {
RUNNING = 2 [(gogoproto.enumvalue_customname) = "StatusRunning"];
STOPPED = 3 [(gogoproto.enumvalue_customname) = "StatusStopped"];
PAUSED = 4 [(gogoproto.enumvalue_customname) = "StatusPaused"];
PAUSING = 5 [(gogoproto.enumvalue_customname) = "StatusPausing"];
}
message Task {
string id = 1;
uint32 pid = 2;
Status status = 3;
google.protobuf.Any spec = 4;
message Process {
string container_id = 1;
string id = 2;
uint32 pid = 3;
Status status = 4;
string stdin = 5;
string stdout = 6;
string stderr = 7;
bool terminal = 8;
uint32 exit_status = 9;
}

View File

@ -4,8 +4,6 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"runtime"
"strconv"
@ -38,19 +36,14 @@ import (
"github.com/containerd/containerd/snapshot"
"github.com/containerd/containerd/typeurl"
pempty "github.com/golang/protobuf/ptypes/empty"
"github.com/opencontainers/image-spec/identity"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/health/grpc_health_v1"
)
func init() {
// reset the grpc logger so that it does not output in the STDIO of the calling process
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
// register TypeUrls for commonly marshaled external types
major := strconv.Itoa(specs.VersionMajor)
typeurl.Register(&specs.Spec{}, "opencontainers/runtime-spec", major, "Spec")
@ -59,28 +52,6 @@ func init() {
typeurl.Register(&specs.WindowsResources{}, "opencontainers/runtime-spec", major, "WindowsResources")
}
type clientOpts struct {
defaultns string
dialOptions []grpc.DialOption
}
type ClientOpt func(c *clientOpts) error
func WithDefaultNamespace(ns string) ClientOpt {
return func(c *clientOpts) error {
c.defaultns = ns
return nil
}
}
// WithDialOpts allows grpc.DialOptions to be set on the connection
func WithDialOpts(opts []grpc.DialOption) ClientOpt {
return func(c *clientOpts) error {
c.dialOptions = opts
return nil
}
}
// New returns a new containerd client that is connected to the containerd
// instance provided by address
func New(address string, opts ...ClientOpt) (*Client, error) {
@ -93,9 +64,10 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
gopts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithInsecure(),
grpc.WithTimeout(100 * time.Second),
grpc.WithTimeout(60 * time.Second),
grpc.FailOnNonTempDialError(true),
grpc.WithDialer(dialer),
grpc.WithBackoffMaxDelay(3 * time.Second),
grpc.WithDialer(Dialer),
}
if len(copts.dialOptions) > 0 {
gopts = copts.dialOptions
@ -107,7 +79,7 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
grpc.WithStreamInterceptor(stream),
)
}
conn, err := grpc.Dial(dialAddress(address), gopts...)
conn, err := grpc.Dial(DialAddress(address), gopts...)
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q", address)
}
@ -132,8 +104,14 @@ type Client struct {
runtime string
}
// IsServing returns true if the client can successfully connect to the
// containerd daemon and the healthcheck service returns the SERVING
// response.
// This call will block if a transient error is encountered during
// connection. A timeout can be set in the context to ensure it returns
// early.
func (c *Client) IsServing(ctx context.Context) (bool, error) {
r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{})
r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.FailFast(false))
if err != nil {
return false, err
}
@ -153,87 +131,6 @@ func (c *Client) Containers(ctx context.Context, filters ...string) ([]Container
return out, nil
}
type NewContainerOpts func(ctx context.Context, client *Client, c *containers.Container) error
// WithContainerLabels adds the provided labels to the container
func WithContainerLabels(labels map[string]string) NewContainerOpts {
return func(_ context.Context, _ *Client, c *containers.Container) error {
c.Labels = labels
return nil
}
}
// WithSnapshot uses an existing root filesystem for the container
func WithSnapshot(id string) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
// check that the snapshot exists, if not, fail on creation
if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil {
return err
}
c.RootFS = id
return nil
}
}
// WithNewSnapshot allocates a new snapshot to be used by the container as the
// root filesystem in read-write mode
func WithNewSnapshot(id string, i Image) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
if err != nil {
return err
}
if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, identity.ChainID(diffIDs).String()); err != nil {
return err
}
c.RootFS = id
c.Image = i.Name()
return nil
}
}
// WithNewSnapshotView allocates a new snapshot to be used by the container as the
// root filesystem in read-only mode
func WithNewSnapshotView(id string, i Image) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
if err != nil {
return err
}
if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, identity.ChainID(diffIDs).String()); err != nil {
return err
}
c.RootFS = id
c.Image = i.Name()
return nil
}
}
// WithRuntime allows a user to specify the runtime name and additional options that should
// be used to create tasks for the container
func WithRuntime(name string) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
c.Runtime = containers.RuntimeInfo{
Name: name,
}
return nil
}
}
func WithSnapshotter(name string) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
c.Snapshotter = name
return nil
}
}
func WithImage(i Image) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
c.Image = i.Name()
return nil
}
}
// NewContainer will create a new container in container with the provided id
// the id must be unique within the namespace
func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) {
@ -255,6 +152,7 @@ func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContain
return containerFromRecord(c, r), nil
}
// LoadContainer loads an existing container from metadata
func (c *Client) LoadContainer(ctx context.Context, id string) (Container, error) {
r, err := c.ContainerService().Get(ctx, id)
if err != nil {
@ -263,8 +161,6 @@ func (c *Client) LoadContainer(ctx context.Context, id string) (Container, error
return containerFromRecord(c, r), nil
}
type RemoteOpts func(*Client, *RemoteContext) error
// RemoteContext is used to configure object resolutions and transfers with
// remote content stores and image providers.
type RemoteContext struct {
@ -299,46 +195,7 @@ func defaultRemoteContext() *RemoteContext {
}
}
// WithPullUnpack is used to unpack an image after pull. This
// uses the snapshotter, content store, and diff service
// configured for the client.
func WithPullUnpack(client *Client, c *RemoteContext) error {
c.Unpack = true
return nil
}
// WithPullSnapshotter specifies snapshotter name used for unpacking
func WithPullSnapshotter(snapshotterName string) RemoteOpts {
return func(client *Client, c *RemoteContext) error {
c.Snapshotter = snapshotterName
return nil
}
}
// WithSchema1Conversion is used to convert Docker registry schema 1
// manifests to oci manifests on pull. Without this option schema 1
// manifests will return a not supported error.
func WithSchema1Conversion(client *Client, c *RemoteContext) error {
c.ConvertSchema1 = true
return nil
}
// WithResolver specifies the resolver to use.
func WithResolver(resolver remotes.Resolver) RemoteOpts {
return func(client *Client, c *RemoteContext) error {
c.Resolver = resolver
return nil
}
}
// WithImageHandler adds a base handler to be called on dispatch.
func WithImageHandler(h images.Handler) RemoteOpts {
return func(client *Client, c *RemoteContext) error {
c.BaseHandlers = append(c.BaseHandlers, h)
return nil
}
}
// Pull downloads the provided content into containerd's content store
func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpts) (Image, error) {
pullCtx := defaultRemoteContext()
for _, o := range opts {
@ -414,6 +271,7 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpts) (Imag
return img, nil
}
// Push uploads the provided content to a remote resource
func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor, opts ...RemoteOpts) error {
pushCtx := defaultRemoteContext()
for _, o := range opts {
@ -539,11 +397,15 @@ func (c *Client) VersionService() versionservice.VersionClient {
return versionservice.NewVersionClient(c.conn)
}
// Version of containerd
type Version struct {
Version string
// Version number
Version string
// Revision from git that was built
Revision string
}
// Version returns the version of containerd that the client is connected to
func (c *Client) Version(ctx context.Context) (Version, error) {
response, err := c.VersionService().Version(ctx, &pempty.Empty{})
if err != nil {
@ -566,8 +428,10 @@ type importOpts struct {
refObject string
}
// ImportOpt allows the caller to specify import specific options
type ImportOpt func(c *importOpts) error
// WithOCIImportFormat sets the import format for an OCI image format
func WithOCIImportFormat() ImportOpt {
return func(c *importOpts) error {
if c.format != "" {
@ -630,8 +494,10 @@ type exportOpts struct {
format imageFormat
}
// ExportOpt allows callers to set export options
type ExportOpt func(c *exportOpts) error
// WithOCIExportFormat sets the OCI image format as the export target
func WithOCIExportFormat() ExportOpt {
return func(c *exportOpts) error {
if c.format != "" {

77
vendor/github.com/containerd/containerd/client_opts.go generated vendored Normal file
View File

@ -0,0 +1,77 @@
package containerd
import (
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/remotes"
"google.golang.org/grpc"
)
type clientOpts struct {
defaultns string
dialOptions []grpc.DialOption
}
// ClientOpt allows callers to set options on the containerd client
type ClientOpt func(c *clientOpts) error
// WithDefaultNamespace sets the default namespace on the client
//
// Any operation that does not have a namespace set on the context will
// be provided the default namespace
func WithDefaultNamespace(ns string) ClientOpt {
return func(c *clientOpts) error {
c.defaultns = ns
return nil
}
}
// WithDialOpts allows grpc.DialOptions to be set on the connection
func WithDialOpts(opts []grpc.DialOption) ClientOpt {
return func(c *clientOpts) error {
c.dialOptions = opts
return nil
}
}
// RemoteOpts allows the caller to set distribution options for a remote
type RemoteOpts func(*Client, *RemoteContext) error
// WithPullUnpack is used to unpack an image after pull. This
// uses the snapshotter, content store, and diff service
// configured for the client.
func WithPullUnpack(client *Client, c *RemoteContext) error {
c.Unpack = true
return nil
}
// WithPullSnapshotter specifies snapshotter name used for unpacking
func WithPullSnapshotter(snapshotterName string) RemoteOpts {
return func(client *Client, c *RemoteContext) error {
c.Snapshotter = snapshotterName
return nil
}
}
// WithSchema1Conversion is used to convert Docker registry schema 1
// manifests to oci manifests on pull. Without this option schema 1
// manifests will return a not supported error.
func WithSchema1Conversion(client *Client, c *RemoteContext) error {
c.ConvertSchema1 = true
return nil
}
// WithResolver specifies the resolver to use.
func WithResolver(resolver remotes.Resolver) RemoteOpts {
return func(client *Client, c *RemoteContext) error {
c.Resolver = resolver
return nil
}
}
// WithImageHandler adds a base handler to be called on dispatch.
func WithImageHandler(h images.Handler) RemoteOpts {
return func(client *Client, c *RemoteContext) error {
c.BaseHandlers = append(c.BaseHandlers, h)
return nil
}
}

View File

@ -1,19 +0,0 @@
// +build !windows
package containerd
import (
"fmt"
"net"
"strings"
"time"
)
func dialer(address string, timeout time.Duration) (net.Conn, error) {
address = strings.TrimPrefix(address, "unix://")
return net.DialTimeout("unix", address, timeout)
}
func dialAddress(address string) string {
return fmt.Sprintf("unix://%s", address)
}

View File

@ -1,16 +0,0 @@
package containerd
import (
"net"
"time"
winio "github.com/Microsoft/go-winio"
)
func dialer(address string, timeout time.Duration) (net.Conn, error) {
return winio.DialPipe(address, &timeout)
}
func dialAddress(address string) string {
return address
}

View File

@ -11,23 +11,36 @@ import (
"github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/typeurl"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
// DeleteOpts allows the caller to set options for the deletion of a container
type DeleteOpts func(context.Context, *Client, containers.Container) error
// Container is a metadata object for container resources and task creation
type Container interface {
// ID identifies the container
ID() string
// Info returns the underlying container record type
Info() containers.Container
// Delete removes the container
Delete(context.Context, ...DeleteOpts) error
// NewTask creates a new task based on the container metadata
NewTask(context.Context, IOCreation, ...NewTaskOpts) (Task, error)
// Spec returns the OCI runtime specification
Spec() (*specs.Spec, error)
// Task returns the current task for the container
//
// If IOAttach options are passed the client will reattach to the IO for the running
// task. If no task exists for the container a NotFound error is returned
Task(context.Context, IOAttach) (Task, error)
// Image returns the image that the container is based on
Image(context.Context) (Image, error)
// Labels returns the labels set on the container
Labels(context.Context) (map[string]string, error)
// SetLabels sets the provided labels for the container and returns the final label set
SetLabels(context.Context, map[string]string) (map[string]string, error)
}
@ -108,14 +121,6 @@ func (c *container) Spec() (*specs.Spec, error) {
return &s, nil
}
// WithSnapshotCleanup deletes the rootfs allocated for the container
func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Container) error {
if c.RootFS != "" {
return client.SnapshotService(c.Snapshotter).Remove(ctx, c.RootFS)
}
return nil
}
// Delete deletes an existing container
// an error is returned if the container has running tasks
func (c *container) Delete(ctx context.Context, opts ...DeleteOpts) (err error) {
@ -153,15 +158,6 @@ func (c *container) Image(ctx context.Context) (Image, error) {
}, nil
}
type NewTaskOpts func(context.Context, *Client, *TaskInfo) error
func WithRootFS(mounts []mount.Mount) NewTaskOpts {
return func(ctx context.Context, c *Client, ti *TaskInfo) error {
ti.RootFS = mounts
return nil
}
}
func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...NewTaskOpts) (Task, error) {
c.mu.Lock()
defer c.mu.Unlock()
@ -169,12 +165,13 @@ func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...Ne
if err != nil {
return nil, err
}
cfg := i.Config()
request := &tasks.CreateTaskRequest{
ContainerID: c.c.ID,
Terminal: i.Terminal,
Stdin: i.Stdin,
Stdout: i.Stdout,
Stderr: i.Stderr,
Terminal: cfg.Terminal,
Stdin: cfg.Stdin,
Stdout: cfg.Stdout,
Stderr: cfg.Stderr,
}
if c.c.RootFS != "" {
// get the rootfs from the snapshotter and add it to the request
@ -232,7 +229,7 @@ func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...Ne
}
func (c *container) loadTask(ctx context.Context, ioAttach IOAttach) (Task, error) {
response, err := c.client.TaskService().Get(ctx, &tasks.GetTaskRequest{
response, err := c.client.TaskService().Get(ctx, &tasks.GetRequest{
ContainerID: c.c.ID,
})
if err != nil {
@ -242,19 +239,19 @@ func (c *container) loadTask(ctx context.Context, ioAttach IOAttach) (Task, erro
}
return nil, err
}
var i *IO
var i IO
if ioAttach != nil {
// get the existing fifo paths from the task information stored by the daemon
paths := &FIFOSet{
Dir: getFifoDir([]string{
response.Task.Stdin,
response.Task.Stdout,
response.Task.Stderr,
response.Process.Stdin,
response.Process.Stdout,
response.Process.Stderr,
}),
In: response.Task.Stdin,
Out: response.Task.Stdout,
Err: response.Task.Stderr,
Terminal: response.Task.Terminal,
In: response.Process.Stdin,
Out: response.Process.Stdout,
Err: response.Process.Stderr,
Terminal: response.Process.Terminal,
}
if i, err = ioAttach(paths); err != nil {
return nil, err
@ -263,8 +260,8 @@ func (c *container) loadTask(ctx context.Context, ioAttach IOAttach) (Task, erro
t := &task{
client: c.client,
io: i,
id: response.Task.ID,
pid: response.Task.Pid,
id: response.Process.ID,
pid: response.Process.Pid,
}
return t, nil
}

View File

@ -0,0 +1,100 @@
package containerd
import (
"context"
"github.com/containerd/containerd/containers"
"github.com/opencontainers/image-spec/identity"
)
// NewContainerOpts allows the caller to set additional options when creating a container
type NewContainerOpts func(ctx context.Context, client *Client, c *containers.Container) error
// WithRuntime allows a user to specify the runtime name and additional options that should
// be used to create tasks for the container
func WithRuntime(name string) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
c.Runtime = containers.RuntimeInfo{
Name: name,
}
return nil
}
}
// WithImage sets the provided image as the base for the container
func WithImage(i Image) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
c.Image = i.Name()
return nil
}
}
// WithContainerLabels adds the provided labels to the container
func WithContainerLabels(labels map[string]string) NewContainerOpts {
return func(_ context.Context, _ *Client, c *containers.Container) error {
c.Labels = labels
return nil
}
}
// WithSnapshotter sets the provided snapshotter for use by the container
func WithSnapshotter(name string) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
c.Snapshotter = name
return nil
}
}
// WithSnapshot uses an existing root filesystem for the container
func WithSnapshot(id string) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
// check that the snapshot exists, if not, fail on creation
if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil {
return err
}
c.RootFS = id
return nil
}
}
// WithNewSnapshot allocates a new snapshot to be used by the container as the
// root filesystem in read-write mode
func WithNewSnapshot(id string, i Image) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
if err != nil {
return err
}
if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, identity.ChainID(diffIDs).String()); err != nil {
return err
}
c.RootFS = id
c.Image = i.Name()
return nil
}
}
// WithSnapshotCleanup deletes the rootfs allocated for the container
func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Container) error {
if c.RootFS != "" {
return client.SnapshotService(c.Snapshotter).Remove(ctx, c.RootFS)
}
return nil
}
// WithNewSnapshotView allocates a new snapshot to be used by the container as the
// root filesystem in read-only mode
func WithNewSnapshotView(id string, i Image) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
if err != nil {
return err
}
if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, identity.ChainID(diffIDs).String()); err != nil {
return err
}
c.RootFS = id
c.Image = i.Name()
return nil
}
}

View File

@ -6,7 +6,6 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/containers"
@ -20,6 +19,9 @@ import (
"github.com/opencontainers/image-spec/specs-go/v1"
)
// WithCheckpoint allows a container to be created from the checkpointed information
// provided by the descriptor. The image, snapshot, and runtime specifications are
// restored on the container
func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
// set image and rw, and spec
return func(ctx context.Context, client *Client, c *containers.Container) error {
@ -51,12 +53,7 @@ func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
}
c.Image = index.Annotations["image.name"]
case images.MediaTypeContainerd1CheckpointConfig:
r, err := store.Reader(ctx, m.Digest)
if err != nil {
return err
}
data, err := ioutil.ReadAll(r)
r.Close()
data, err := content.ReadBlob(ctx, store, m.Digest)
if err != nil {
return err
}
@ -82,6 +79,9 @@ func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
}
}
// WithTaskCheckpoint allows a task to be created with live runtime and memory data from a
// previous checkpoint. Additional software such as CRIU may be required to
// restore a task from a checkpoint
func WithTaskCheckpoint(desc v1.Descriptor) NewTaskOpts {
return func(ctx context.Context, c *Client, info *TaskInfo) error {
id := desc.Digest
@ -105,11 +105,13 @@ func WithTaskCheckpoint(desc v1.Descriptor) NewTaskOpts {
func decodeIndex(ctx context.Context, store content.Store, id digest.Digest) (*v1.Index, error) {
var index v1.Index
r, err := store.Reader(ctx, id)
p, err := content.ReadBlob(ctx, store, id)
if err != nil {
return nil, err
}
err = json.NewDecoder(r).Decode(&index)
r.Close()
return &index, err
if err := json.Unmarshal(p, &index); err != nil {
return nil, err
}
return &index, nil
}

View File

@ -5,13 +5,17 @@ import (
"io"
"time"
"github.com/containerd/containerd/oci"
"github.com/opencontainers/go-digest"
)
type ReaderAt interface {
io.ReaderAt
io.Closer
Size() int64
}
type Provider interface {
Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error)
ReaderAt(ctx context.Context, dgst digest.Digest) (io.ReaderAt, error)
ReaderAt(ctx context.Context, dgst digest.Digest) (ReaderAt, error)
}
type Ingester interface {
@ -78,8 +82,20 @@ type IngestManager interface {
}
type Writer interface {
oci.BlobWriter
// Close is expected to be called after Commit() when commission is needed.
io.WriteCloser
// Digest may return empty digest or panics until committed.
Digest() digest.Digest
// Commit commits the blob (but no roll-back is guaranteed on an error).
// size and expected can be zero-value when unknown.
Commit(size int64, expected digest.Digest) error
// Status returns the current state of write
Status() (Status, error)
// Truncate updates the size of the target blob
Truncate(size int64) error
}

View File

@ -4,7 +4,6 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"sync"
"github.com/containerd/containerd/errdefs"
@ -20,17 +19,25 @@ var (
}
)
func NewReader(ra ReaderAt) io.Reader {
rd := io.NewSectionReader(ra, 0, ra.Size())
return rd
}
// ReadBlob retrieves the entire contents of the blob from the provider.
//
// Avoid using this for large blobs, such as layers.
func ReadBlob(ctx context.Context, provider Provider, dgst digest.Digest) ([]byte, error) {
rc, err := provider.Reader(ctx, dgst)
ra, err := provider.ReaderAt(ctx, dgst)
if err != nil {
return nil, err
}
defer rc.Close()
defer ra.Close()
return ioutil.ReadAll(rc)
p := make([]byte, ra.Size())
_, err = ra.ReadAt(p, 0)
return p, err
}
// WriteBlob writes data with the expected digest into the content store. If

52
vendor/github.com/containerd/containerd/dialer.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
package containerd
import (
"net"
"strings"
"time"
"github.com/pkg/errors"
)
type dialResult struct {
c net.Conn
err error
}
func Dialer(address string, timeout time.Duration) (net.Conn, error) {
var (
stopC = make(chan struct{})
synC = make(chan *dialResult)
)
address = strings.TrimPrefix(address, "unix://")
go func() {
defer close(synC)
for {
select {
case <-stopC:
return
default:
c, err := dialer(address, timeout)
if isNoent(err) {
<-time.After(10 * time.Millisecond)
continue
}
synC <- &dialResult{c, err}
return
}
}
}()
select {
case dr := <-synC:
return dr.c, dr.err
case <-time.After(timeout):
close(stopC)
go func() {
dr := <-synC
if dr != nil {
dr.c.Close()
}
}()
return nil, errors.Errorf("dial %s: no such file or directory", address)
}
}

32
vendor/github.com/containerd/containerd/dialer_unix.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// +build !windows
package containerd
import (
"fmt"
"net"
"os"
"syscall"
"time"
)
func isNoent(err error) bool {
if err != nil {
if nerr, ok := err.(*net.OpError); ok {
if serr, ok := nerr.Err.(*os.SyscallError); ok {
if serr.Err == syscall.ENOENT {
return true
}
}
}
}
return false
}
func dialer(address string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", address, timeout)
}
func DialAddress(address string) string {
return fmt.Sprintf("unix://%s", address)
}

View File

@ -0,0 +1,29 @@
package containerd
import (
"net"
"os"
"syscall"
"time"
winio "github.com/Microsoft/go-winio"
)
func isNoent(err error) bool {
if err != nil {
if oerr, ok := err.(*os.PathError); ok {
if oerr.Err == syscall.ENOENT {
return true
}
}
}
return false
}
func dialer(address string, timeout time.Duration) (net.Conn, error) {
return winio.DialPipe(address, &timeout)
}
func DialAddress(address string) string {
return address
}

View File

@ -13,6 +13,7 @@ import (
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/typeurl"
goevents "github.com/docker/go-events"
"github.com/gogo/protobuf/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -28,20 +29,27 @@ func NewExchange() *Exchange {
}
// Forward accepts an envelope to be direcly distributed on the exchange.
func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) error {
log.G(ctx).WithFields(logrus.Fields{
"topic": envelope.Topic,
"ns": envelope.Namespace,
"type": envelope.Event.TypeUrl,
}).Debug("forward event")
if err := namespaces.Validate(envelope.Namespace); err != nil {
return errors.Wrapf(err, "event envelope has invalid namespace")
//
// This is useful when an event is forwaded on behalf of another namespace or
// when the event is propagated on behalf of another publisher.
func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err error) {
if err := validateEnvelope(envelope); err != nil {
return err
}
if err := validateTopic(envelope.Topic); err != nil {
return errors.Wrapf(err, "envelope topic %q", envelope.Topic)
}
defer func() {
logger := log.G(ctx).WithFields(logrus.Fields{
"topic": envelope.Topic,
"ns": envelope.Namespace,
"type": envelope.Event.TypeUrl,
})
if err != nil {
logger.WithError(err).Error("error forwarding event")
} else {
logger.Debug("event forwarded")
}
}()
return e.broadcaster.Write(envelope)
}
@ -49,8 +57,14 @@ func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) error
// Publish packages and sends an event. The caller will be considered the
// initial publisher of the event. This means the timestamp will be calculated
// at this point and this method may read from the calling context.
func (e *Exchange) Publish(ctx context.Context, topic string, event Event) error {
namespace, err := namespaces.NamespaceRequired(ctx)
func (e *Exchange) Publish(ctx context.Context, topic string, event Event) (err error) {
var (
namespace string
encoded *types.Any
envelope events.Envelope
)
namespace, err = namespaces.NamespaceRequired(ctx)
if err != nil {
return errors.Wrapf(err, "failed publishing event")
}
@ -58,47 +72,76 @@ func (e *Exchange) Publish(ctx context.Context, topic string, event Event) error
return errors.Wrapf(err, "envelope topic %q", topic)
}
evany, err := typeurl.MarshalAny(event)
encoded, err = typeurl.MarshalAny(event)
if err != nil {
return err
}
env := events.Envelope{
Timestamp: time.Now().UTC(),
Topic: topic,
Event: evany,
}
if err := e.broadcaster.Write(&env); err != nil {
return err
}
log.G(ctx).WithFields(logrus.Fields{
"topic": topic,
"type": evany.TypeUrl,
"ns": namespace,
}).Debug("published event")
return nil
envelope.Timestamp = time.Now().UTC()
envelope.Namespace = namespace
envelope.Topic = topic
envelope.Event = encoded
defer func() {
logger := log.G(ctx).WithFields(logrus.Fields{
"topic": envelope.Topic,
"ns": envelope.Namespace,
"type": envelope.Event.TypeUrl,
})
if err != nil {
logger.WithError(err).Error("error publishing event")
} else {
logger.Debug("event published")
}
}()
return e.broadcaster.Write(&envelope)
}
// Subscribe to events on the exchange. Events are sent through the returned
// channel ch. If an error is encountered, it will be sent on channel errs and
// errs will be closed. To end the subscription, cancel the provided context.
func (e *Exchange) Subscribe(ctx context.Context, filters ...filters.Filter) (ch <-chan *events.Envelope, errs <-chan error) {
//
// Zero or more filters may be provided as strings. Only events that match
// *any* of the provided filters will be sent on the channel. The filters use
// the standard containerd filters package syntax.
func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *events.Envelope, errs <-chan error) {
var (
evch = make(chan *events.Envelope)
errq = make(chan error, 1)
channel = goevents.NewChannel(0)
queue = goevents.NewQueue(channel)
evch = make(chan *events.Envelope)
errq = make(chan error, 1)
channel = goevents.NewChannel(0)
queue = goevents.NewQueue(channel)
dst goevents.Sink = queue
)
// TODO(stevvooe): Insert the filter!
e.broadcaster.Add(queue)
go func() {
closeAll := func() {
defer close(errq)
defer e.broadcaster.Remove(queue)
defer e.broadcaster.Remove(dst)
defer queue.Close()
defer channel.Close()
}
ch = evch
errs = errq
if len(fs) > 0 {
filter, err := filters.ParseAll(fs...)
if err != nil {
errq <- errors.Wrapf(err, "failed parsing subscription filters")
closeAll()
return
}
dst = goevents.NewFilter(queue, goevents.MatcherFunc(func(gev goevents.Event) bool {
return filter.Match(adapt(gev))
}))
}
e.broadcaster.Add(dst)
go func() {
defer closeAll()
var err error
loop:
@ -133,9 +176,6 @@ func (e *Exchange) Subscribe(ctx context.Context, filters ...filters.Filter) (ch
errq <- err
}()
ch = evch
errs = errq
return
}
@ -161,3 +201,29 @@ func validateTopic(topic string) error {
return nil
}
func validateEnvelope(envelope *events.Envelope) error {
if err := namespaces.Validate(envelope.Namespace); err != nil {
return errors.Wrapf(err, "event envelope has invalid namespace")
}
if err := validateTopic(envelope.Topic); err != nil {
return errors.Wrapf(err, "envelope topic %q", envelope.Topic)
}
if envelope.Timestamp.IsZero() {
return errors.Wrapf(errdefs.ErrInvalidArgument, "timestamp must be set on forwarded event")
}
return nil
}
func adapt(ev interface{}) filters.Adaptor {
if adaptor, ok := ev.(filters.Adaptor); ok {
return adaptor
}
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
return "", false
})
}

View File

@ -3,63 +3,186 @@ package containerd
import (
"archive/tar"
"context"
"encoding/json"
"io"
"sort"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/oci"
ocispecs "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
func (c *Client) exportToOCITar(ctx context.Context, desc ocispec.Descriptor, writer io.Writer, eopts exportOpts) error {
tw := tar.NewWriter(writer)
img := oci.Tar(tw)
defer tw.Close()
// For tar, we defer creating index until end of the function.
if err := oci.Init(img, oci.InitOpts{SkipCreateIndex: true}); err != nil {
return err
records := []tarRecord{
ociLayoutFile(""),
ociIndexRecord(desc),
}
cs := c.ContentStore()
algorithms := map[string]struct{}{}
exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
records = append(records, blobRecord(cs, desc))
algorithms[desc.Digest.Algorithm().String()] = struct{}{}
return nil, nil
}
handlers := images.Handlers(
images.ChildrenHandler(cs),
exportHandler(cs, img),
images.HandlerFunc(exportHandler),
)
// For tar, we need to use Walk instead of Dispatch for ensuring sequential write
// Walk sequentially since the number of fetchs is likely one and doing in
// parallel requires locking the export handler
if err := images.Walk(ctx, handlers, desc); err != nil {
return err
}
// For tar, we don't use oci.PutManifestDescriptorToIndex() which allows appending desc to existing index.json
// but requires img to support random read access so as to read index.json.
return oci.WriteIndex(img,
ocispec.Index{
Versioned: ocispecs.Versioned{
SchemaVersion: 2,
},
Manifests: []ocispec.Descriptor{desc},
},
)
if len(algorithms) > 0 {
records = append(records, directoryRecord("blobs/", 0755))
for alg := range algorithms {
records = append(records, directoryRecord("blobs/"+alg+"/", 0755))
}
}
return writeTar(ctx, tw, records)
}
func exportHandler(cs content.Store, img oci.ImageDriver) images.HandlerFunc {
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
r, err := cs.Reader(ctx, desc.Digest)
if err != nil {
return nil, err
}
w, err := oci.NewBlobWriter(img, desc.Digest.Algorithm())
if err != nil {
return nil, err
}
if _, err = io.Copy(w, r); err != nil {
return nil, err
}
if err = w.Commit(desc.Size, desc.Digest); err != nil {
return nil, err
}
if err = w.Close(); err != nil {
return nil, err
}
return nil, nil
type tarRecord struct {
Header *tar.Header
CopyTo func(context.Context, io.Writer) (int64, error)
}
func blobRecord(cs content.Store, desc ocispec.Descriptor) tarRecord {
path := "blobs/" + desc.Digest.Algorithm().String() + "/" + desc.Digest.Hex()
return tarRecord{
Header: &tar.Header{
Name: path,
Mode: 0444,
Size: desc.Size,
Typeflag: tar.TypeReg,
},
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
r, err := cs.ReaderAt(ctx, desc.Digest)
if err != nil {
return 0, err
}
defer r.Close()
// Verify digest
dgstr := desc.Digest.Algorithm().Digester()
n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
if err != nil {
return 0, err
}
if dgstr.Digest() != desc.Digest {
return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest())
}
return n, nil
},
}
}
func directoryRecord(name string, mode int64) tarRecord {
return tarRecord{
Header: &tar.Header{
Name: name,
Mode: mode,
Typeflag: tar.TypeDir,
},
}
}
func ociLayoutFile(version string) tarRecord {
if version == "" {
version = ocispec.ImageLayoutVersion
}
layout := ocispec.ImageLayout{
Version: version,
}
b, err := json.Marshal(layout)
if err != nil {
panic(err)
}
return tarRecord{
Header: &tar.Header{
Name: ocispec.ImageLayoutFile,
Mode: 0444,
Size: int64(len(b)),
Typeflag: tar.TypeReg,
},
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
n, err := w.Write(b)
return int64(n), err
},
}
}
func ociIndexRecord(manifests ...ocispec.Descriptor) tarRecord {
index := ocispec.Index{
Versioned: ocispecs.Versioned{
SchemaVersion: 2,
},
Manifests: manifests,
}
b, err := json.Marshal(index)
if err != nil {
panic(err)
}
return tarRecord{
Header: &tar.Header{
Name: "index.json",
Mode: 0644,
Size: int64(len(b)),
Typeflag: tar.TypeReg,
},
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
n, err := w.Write(b)
return int64(n), err
},
}
}
func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error {
sort.Sort(tarRecordsByName(records))
for _, record := range records {
if err := tw.WriteHeader(record.Header); err != nil {
return err
}
if record.CopyTo != nil {
n, err := record.CopyTo(ctx, tw)
if err != nil {
return err
}
if n != record.Header.Size {
return errors.Errorf("unexpected copy size for %s", record.Header.Name)
}
} else if record.Header.Size > 0 {
return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
}
}
return nil
}
type tarRecordsByName []tarRecord
func (t tarRecordsByName) Len() int {
return len(t)
}
func (t tarRecordsByName) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
func (t tarRecordsByName) Less(i, j int) bool {
return t[i].Header.Name < t[j].Header.Name
}

View File

@ -1,120 +0,0 @@
package fs
import (
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/pkg/errors"
)
var (
bufferPool = &sync.Pool{
New: func() interface{} {
return make([]byte, 32*1024)
},
}
)
// CopyDir copies the directory from src to dst.
// Most efficient copy of files is attempted.
func CopyDir(dst, src string) error {
inodes := map[uint64]string{}
return copyDirectory(dst, src, inodes)
}
func copyDirectory(dst, src string, inodes map[uint64]string) error {
stat, err := os.Stat(src)
if err != nil {
return errors.Wrapf(err, "failed to stat %s", src)
}
if !stat.IsDir() {
return errors.Errorf("source is not directory")
}
if st, err := os.Stat(dst); err != nil {
if err := os.Mkdir(dst, stat.Mode()); err != nil {
return errors.Wrapf(err, "failed to mkdir %s", dst)
}
} else if !st.IsDir() {
return errors.Errorf("cannot copy to non-directory: %s", dst)
} else {
if err := os.Chmod(dst, stat.Mode()); err != nil {
return errors.Wrapf(err, "failed to chmod on %s", dst)
}
}
fis, err := ioutil.ReadDir(src)
if err != nil {
return errors.Wrapf(err, "failed to read %s", src)
}
if err := copyFileInfo(stat, dst); err != nil {
return errors.Wrapf(err, "failed to copy file info for %s", dst)
}
for _, fi := range fis {
source := filepath.Join(src, fi.Name())
target := filepath.Join(dst, fi.Name())
switch {
case fi.IsDir():
if err := copyDirectory(target, source, inodes); err != nil {
return err
}
continue
case (fi.Mode() & os.ModeType) == 0:
link, err := getLinkSource(target, fi, inodes)
if err != nil {
return errors.Wrap(err, "failed to get hardlink")
}
if link != "" {
if err := os.Link(link, target); err != nil {
return errors.Wrap(err, "failed to create hard link")
}
} else if err := copyFile(source, target); err != nil {
return errors.Wrap(err, "failed to copy files")
}
case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink:
link, err := os.Readlink(source)
if err != nil {
return errors.Wrapf(err, "failed to read link: %s", source)
}
if err := os.Symlink(link, target); err != nil {
return errors.Wrapf(err, "failed to create symlink: %s", target)
}
case (fi.Mode() & os.ModeDevice) == os.ModeDevice:
if err := copyDevice(target, fi); err != nil {
return errors.Wrapf(err, "failed to create device")
}
default:
// TODO: Support pipes and sockets
return errors.Wrapf(err, "unsupported mode %s", fi.Mode())
}
if err := copyFileInfo(fi, target); err != nil {
return errors.Wrap(err, "failed to copy file info")
}
if err := copyXAttrs(target, source); err != nil {
return errors.Wrap(err, "failed to copy xattrs")
}
}
return nil
}
func copyFile(source, target string) error {
src, err := os.Open(source)
if err != nil {
return errors.Wrapf(err, "failed to open source %s", source)
}
defer src.Close()
tgt, err := os.Create(target)
if err != nil {
return errors.Wrapf(err, "failed to open target %s", target)
}
defer tgt.Close()
return copyFileContent(tgt, src)
}

View File

@ -1,82 +0,0 @@
package fs
import (
"io"
"os"
"syscall"
"github.com/containerd/continuity/sysx"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
func copyFileInfo(fi os.FileInfo, name string) error {
st := fi.Sys().(*syscall.Stat_t)
if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
return errors.Wrapf(err, "failed to chown %s", name)
}
if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
if err := os.Chmod(name, fi.Mode()); err != nil {
return errors.Wrapf(err, "failed to chmod %s", name)
}
}
timespec := []unix.Timespec{unix.Timespec(st.Atim), unix.Timespec(st.Mtim)}
if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
return errors.Wrapf(err, "failed to utime %s", name)
}
return nil
}
func copyFileContent(dst, src *os.File) error {
st, err := src.Stat()
if err != nil {
return errors.Wrap(err, "unable to stat source")
}
n, err := sysx.CopyFileRange(src.Fd(), nil, dst.Fd(), nil, int(st.Size()), 0)
if err != nil {
if err != syscall.ENOSYS && err != syscall.EXDEV {
return errors.Wrap(err, "copy file range failed")
}
buf := bufferPool.Get().([]byte)
_, err = io.CopyBuffer(dst, src, buf)
bufferPool.Put(buf)
return err
}
if int64(n) != st.Size() {
return errors.Wrapf(err, "short copy: %d of %d", int64(n), st.Size())
}
return nil
}
func copyXAttrs(dst, src string) error {
xattrKeys, err := sysx.LListxattr(src)
if err != nil {
return errors.Wrapf(err, "failed to list xattrs on %s", src)
}
for _, xattr := range xattrKeys {
data, err := sysx.LGetxattr(src, xattr)
if err != nil {
return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
}
if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
}
}
return nil
}
func copyDevice(dst string, fi os.FileInfo) error {
st, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return errors.New("unsupported stat type")
}
return syscall.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
}

View File

@ -1,65 +0,0 @@
// +build darwin freebsd
package fs
import (
"io"
"os"
"syscall"
"github.com/containerd/continuity/sysx"
"github.com/pkg/errors"
)
func copyFileInfo(fi os.FileInfo, name string) error {
st := fi.Sys().(*syscall.Stat_t)
if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
return errors.Wrapf(err, "failed to chown %s", name)
}
if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
if err := os.Chmod(name, fi.Mode()); err != nil {
return errors.Wrapf(err, "failed to chmod %s", name)
}
}
if err := syscall.UtimesNano(name, []syscall.Timespec{st.Atimespec, st.Mtimespec}); err != nil {
return errors.Wrapf(err, "failed to utime %s", name)
}
return nil
}
func copyFileContent(dst, src *os.File) error {
buf := bufferPool.Get().([]byte)
_, err := io.CopyBuffer(dst, src, buf)
bufferPool.Put(buf)
return err
}
func copyXAttrs(dst, src string) error {
xattrKeys, err := sysx.LListxattr(src)
if err != nil {
return errors.Wrapf(err, "failed to list xattrs on %s", src)
}
for _, xattr := range xattrKeys {
data, err := sysx.LGetxattr(src, xattr)
if err != nil {
return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
}
if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
}
}
return nil
}
func copyDevice(dst string, fi os.FileInfo) error {
st, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return errors.New("unsupported stat type")
}
return syscall.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
}

View File

@ -1,33 +0,0 @@
package fs
import (
"io"
"os"
"github.com/pkg/errors"
)
func copyFileInfo(fi os.FileInfo, name string) error {
if err := os.Chmod(name, fi.Mode()); err != nil {
return errors.Wrapf(err, "failed to chmod %s", name)
}
// TODO: copy windows specific metadata
return nil
}
func copyFileContent(dst, src *os.File) error {
buf := bufferPool.Get().([]byte)
_, err := io.CopyBuffer(dst, src, buf)
bufferPool.Put(buf)
return err
}
func copyXAttrs(dst, src string) error {
return nil
}
func copyDevice(dst string, fi os.FileInfo) error {
return errors.New("device copy not supported")
}

View File

@ -1,310 +0,0 @@
package fs
import (
"context"
"os"
"path/filepath"
"strings"
"golang.org/x/sync/errgroup"
"github.com/sirupsen/logrus"
)
// ChangeKind is the type of modification that
// a change is making.
type ChangeKind int
const (
// ChangeKindUnmodified represents an unmodified
// file
ChangeKindUnmodified = iota
// ChangeKindAdd represents an addition of
// a file
ChangeKindAdd
// ChangeKindModify represents a change to
// an existing file
ChangeKindModify
// ChangeKindDelete represents a delete of
// a file
ChangeKindDelete
)
func (k ChangeKind) String() string {
switch k {
case ChangeKindUnmodified:
return "unmodified"
case ChangeKindAdd:
return "add"
case ChangeKindModify:
return "modify"
case ChangeKindDelete:
return "delete"
default:
return ""
}
}
// Change represents single change between a diff and its parent.
type Change struct {
Kind ChangeKind
Path string
}
// ChangeFunc is the type of function called for each change
// computed during a directory changes calculation.
type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error
// Changes computes changes between two directories calling the
// given change function for each computed change. The first
// directory is intended to the base directory and second
// directory the changed directory.
//
// The change callback is called by the order of path names and
// should be appliable in that order.
// Due to this apply ordering, the following is true
// - Removed directory trees only create a single change for the root
// directory removed. Remaining changes are implied.
// - A directory which is modified to become a file will not have
// delete entries for sub-path items, their removal is implied
// by the removal of the parent directory.
//
// Opaque directories will not be treated specially and each file
// removed from the base directory will show up as a removal.
//
// File content comparisons will be done on files which have timestamps
// which may have been truncated. If either of the files being compared
// has a zero value nanosecond value, each byte will be compared for
// differences. If 2 files have the same seconds value but different
// nanosecond values where one of those values is zero, the files will
// be considered unchanged if the content is the same. This behavior
// is to account for timestamp truncation during archiving.
func Changes(ctx context.Context, a, b string, changeFn ChangeFunc) error {
if a == "" {
logrus.Debugf("Using single walk diff for %s", b)
return addDirChanges(ctx, changeFn, b)
} else if diffOptions := detectDirDiff(b, a); diffOptions != nil {
logrus.Debugf("Using single walk diff for %s from %s", diffOptions.diffDir, a)
return diffDirChanges(ctx, changeFn, a, diffOptions)
}
logrus.Debugf("Using double walk diff for %s from %s", b, a)
return doubleWalkDiff(ctx, changeFn, a, b)
}
func addDirChanges(ctx context.Context, changeFn ChangeFunc, root string) error {
return filepath.Walk(root, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
path, err = filepath.Rel(root, path)
if err != nil {
return err
}
path = filepath.Join(string(os.PathSeparator), path)
// Skip root
if path == string(os.PathSeparator) {
return nil
}
return changeFn(ChangeKindAdd, path, f, nil)
})
}
// diffDirOptions is used when the diff can be directly calculated from
// a diff directory to its base, without walking both trees.
type diffDirOptions struct {
diffDir string
skipChange func(string) (bool, error)
deleteChange func(string, string, os.FileInfo) (string, error)
}
// diffDirChanges walks the diff directory and compares changes against the base.
func diffDirChanges(ctx context.Context, changeFn ChangeFunc, base string, o *diffDirOptions) error {
changedDirs := make(map[string]struct{})
return filepath.Walk(o.diffDir, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
path, err = filepath.Rel(o.diffDir, path)
if err != nil {
return err
}
path = filepath.Join(string(os.PathSeparator), path)
// Skip root
if path == string(os.PathSeparator) {
return nil
}
// TODO: handle opaqueness, start new double walker at this
// location to get deletes, and skip tree in single walker
if o.skipChange != nil {
if skip, err := o.skipChange(path); skip {
return err
}
}
var kind ChangeKind
deletedFile, err := o.deleteChange(o.diffDir, path, f)
if err != nil {
return err
}
// Find out what kind of modification happened
if deletedFile != "" {
path = deletedFile
kind = ChangeKindDelete
f = nil
} else {
// Otherwise, the file was added
kind = ChangeKindAdd
// ...Unless it already existed in a base, in which case, it's a modification
stat, err := os.Stat(filepath.Join(base, path))
if err != nil && !os.IsNotExist(err) {
return err
}
if err == nil {
// The file existed in the base, so that's a modification
// However, if it's a directory, maybe it wasn't actually modified.
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
if stat.IsDir() && f.IsDir() {
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
// Both directories are the same, don't record the change
return nil
}
}
kind = ChangeKindModify
}
}
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
// This block is here to ensure the change is recorded even if the
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
// Check https://github.com/docker/docker/pull/13590 for details.
if f.IsDir() {
changedDirs[path] = struct{}{}
}
if kind == ChangeKindAdd || kind == ChangeKindDelete {
parent := filepath.Dir(path)
if _, ok := changedDirs[parent]; !ok && parent != "/" {
pi, err := os.Stat(filepath.Join(o.diffDir, parent))
if err := changeFn(ChangeKindModify, parent, pi, err); err != nil {
return err
}
changedDirs[parent] = struct{}{}
}
}
return changeFn(kind, path, f, nil)
})
}
// doubleWalkDiff walks both directories to create a diff
func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err error) {
g, ctx := errgroup.WithContext(ctx)
var (
c1 = make(chan *currentPath)
c2 = make(chan *currentPath)
f1, f2 *currentPath
rmdir string
)
g.Go(func() error {
defer close(c1)
return pathWalk(ctx, a, c1)
})
g.Go(func() error {
defer close(c2)
return pathWalk(ctx, b, c2)
})
g.Go(func() error {
for c1 != nil || c2 != nil {
if f1 == nil && c1 != nil {
f1, err = nextPath(ctx, c1)
if err != nil {
return err
}
if f1 == nil {
c1 = nil
}
}
if f2 == nil && c2 != nil {
f2, err = nextPath(ctx, c2)
if err != nil {
return err
}
if f2 == nil {
c2 = nil
}
}
if f1 == nil && f2 == nil {
continue
}
var f os.FileInfo
k, p := pathChange(f1, f2)
switch k {
case ChangeKindAdd:
if rmdir != "" {
rmdir = ""
}
f = f2.f
f2 = nil
case ChangeKindDelete:
// Check if this file is already removed by being
// under of a removed directory
if rmdir != "" && strings.HasPrefix(f1.path, rmdir) {
f1 = nil
continue
} else if rmdir == "" && f1.f.IsDir() {
rmdir = f1.path + string(os.PathSeparator)
} else if rmdir != "" {
rmdir = ""
}
f1 = nil
case ChangeKindModify:
same, err := sameFile(f1, f2)
if err != nil {
return err
}
if f1.f.IsDir() && !f2.f.IsDir() {
rmdir = f1.path + string(os.PathSeparator)
} else if rmdir != "" {
rmdir = ""
}
f = f2.f
f1 = nil
f2 = nil
if same {
if !isLinked(f) {
continue
}
k = ChangeKindUnmodified
}
}
if err := changeFn(k, p, f, nil); err != nil {
return err
}
}
return nil
})
return g.Wait()
}

View File

@ -1,102 +0,0 @@
// +build !windows
package fs
import (
"bytes"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/containerd/continuity/sysx"
"github.com/pkg/errors"
)
// whiteouts are files with a special meaning for the layered filesystem.
// Docker uses AUFS whiteout files inside exported archives. In other
// filesystems these files are generated/handled on tar creation/extraction.
// whiteoutPrefix prefix means file is a whiteout. If this is followed by a
// filename this means that file has been removed from the base layer.
const whiteoutPrefix = ".wh."
// whiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for removing an actual file. Normally these files are excluded from exported
// archives.
const whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix
// whiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
// layers. Normally these should not go into exported archives and all changed
// hardlinks should be copied to the top layer.
const whiteoutLinkDir = whiteoutMetaPrefix + "plnk"
// whiteoutOpaqueDir file means directory has been made opaque - meaning
// readdir calls to this directory do not follow to lower layers.
const whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq"
// detectDirDiff returns diff dir options if a directory could
// be found in the mount info for upper which is the direct
// diff with the provided lower directory
func detectDirDiff(upper, lower string) *diffDirOptions {
// TODO: get mount options for upper
// TODO: detect AUFS
// TODO: detect overlay
return nil
}
func aufsMetadataSkip(path string) (skip bool, err error) {
skip, err = filepath.Match(string(os.PathSeparator)+whiteoutMetaPrefix+"*", path)
if err != nil {
skip = true
}
return
}
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
f := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(f, whiteoutPrefix) {
originalFile := f[len(whiteoutPrefix):]
return filepath.Join(filepath.Dir(path), originalFile), nil
}
return "", nil
}
// compareSysStat returns whether the stats are equivalent,
// whether the files are considered the same file, and
// an error
func compareSysStat(s1, s2 interface{}) (bool, error) {
ls1, ok := s1.(*syscall.Stat_t)
if !ok {
return false, nil
}
ls2, ok := s2.(*syscall.Stat_t)
if !ok {
return false, nil
}
return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil
}
func compareCapabilities(p1, p2 string) (bool, error) {
c1, err := sysx.LGetxattr(p1, "security.capability")
if err != nil && err != sysx.ENODATA {
return false, errors.Wrapf(err, "failed to get xattr for %s", p1)
}
c2, err := sysx.LGetxattr(p2, "security.capability")
if err != nil && err != sysx.ENODATA {
return false, errors.Wrapf(err, "failed to get xattr for %s", p2)
}
return bytes.Equal(c1, c2), nil
}
func isLinked(f os.FileInfo) bool {
s, ok := f.Sys().(*syscall.Stat_t)
if !ok {
return false
}
return !f.IsDir() && s.Nlink > 1
}

View File

@ -1,21 +0,0 @@
package fs
import "os"
func detectDirDiff(upper, lower string) *diffDirOptions {
return nil
}
func compareSysStat(s1, s2 interface{}) (bool, error) {
// TODO: Use windows specific sys type
return false, nil
}
func compareCapabilities(p1, p2 string) (bool, error) {
// TODO: Use windows equivalent
return true, nil
}
func isLinked(os.FileInfo) bool {
return false
}

View File

@ -1,87 +0,0 @@
// +build linux
package fs
import (
"fmt"
"io/ioutil"
"os"
"syscall"
"unsafe"
)
func locateDummyIfEmpty(path string) (string, error) {
children, err := ioutil.ReadDir(path)
if err != nil {
return "", err
}
if len(children) != 0 {
return "", nil
}
dummyFile, err := ioutil.TempFile(path, "fsutils-dummy")
if err != nil {
return "", err
}
name := dummyFile.Name()
err = dummyFile.Close()
return name, err
}
// SupportsDType returns whether the filesystem mounted on path supports d_type
func SupportsDType(path string) (bool, error) {
// locate dummy so that we have at least one dirent
dummy, err := locateDummyIfEmpty(path)
if err != nil {
return false, err
}
if dummy != "" {
defer os.Remove(dummy)
}
visited := 0
supportsDType := true
fn := func(ent *syscall.Dirent) bool {
visited++
if ent.Type == syscall.DT_UNKNOWN {
supportsDType = false
// stop iteration
return true
}
// continue iteration
return false
}
if err = iterateReadDir(path, fn); err != nil {
return false, err
}
if visited == 0 {
return false, fmt.Errorf("did not hit any dirent during iteration %s", path)
}
return supportsDType, nil
}
func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error {
d, err := os.Open(path)
if err != nil {
return err
}
defer d.Close()
fd := int(d.Fd())
buf := make([]byte, 4096)
for {
nbytes, err := syscall.ReadDirent(fd, buf)
if err != nil {
return err
}
if nbytes == 0 {
break
}
for off := 0; off < nbytes; {
ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off]))
if stop := fn(ent); stop {
return nil
}
off += int(ent.Reclen)
}
}
return nil
}

View File

@ -1,12 +0,0 @@
package fs
type Usage struct {
Inodes int64
Size int64
}
// DiskUsage counts the number of inodes and disk usage for the resources under
// path.
func DiskUsage(roots ...string) (Usage, error) {
return diskUsage(roots...)
}

View File

@ -1,47 +0,0 @@
// +build !windows
package fs
import (
"os"
"path/filepath"
"syscall"
)
func diskUsage(roots ...string) (Usage, error) {
type inode struct {
// TODO(stevvooe): Can probably reduce memory usage by not tracking
// device, but we can leave this right for now.
dev, ino uint64
}
var (
size int64
inodes = map[inode]struct{}{} // expensive!
)
for _, root := range roots {
if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
stat := fi.Sys().(*syscall.Stat_t)
inoKey := inode{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
if _, ok := inodes[inoKey]; !ok {
inodes[inoKey] = struct{}{}
size += fi.Size()
}
return nil
}); err != nil {
return Usage{}, err
}
}
return Usage{
Inodes: int64(len(inodes)),
Size: size,
}, nil
}

View File

@ -1,33 +0,0 @@
// +build windows
package fs
import (
"os"
"path/filepath"
)
func diskUsage(roots ...string) (Usage, error) {
var (
size int64
)
// TODO(stevvooe): Support inodes (or equivalent) for windows.
for _, root := range roots {
if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
size += fi.Size()
return nil
}); err != nil {
return Usage{}, err
}
}
return Usage{
Size: size,
}, nil
}

View File

@ -1,27 +0,0 @@
package fs
import "os"
// GetLinkID returns an identifier representing the node a hardlink is pointing
// to. If the file is not hard linked then 0 will be returned.
func GetLinkInfo(fi os.FileInfo) (uint64, bool) {
return getLinkInfo(fi)
}
// getLinkSource returns a path for the given name and
// file info to its link source in the provided inode
// map. If the given file name is not in the map and
// has other links, it is added to the inode map
// to be a source for other link locations.
func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) {
inode, isHardlink := getLinkInfo(fi)
if !isHardlink {
return "", nil
}
path, ok := inodes[inode]
if !ok {
inodes[inode] = name
}
return path, nil
}

View File

@ -1,17 +0,0 @@
// +build !windows
package fs
import (
"os"
"syscall"
)
func getLinkInfo(fi os.FileInfo) (uint64, bool) {
s, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return 0, false
}
return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1
}

View File

@ -1,7 +0,0 @@
package fs
import "os"
func getLinkInfo(fi os.FileInfo) (uint64, bool) {
return 0, false
}

View File

@ -1,261 +0,0 @@
package fs
import (
"bytes"
"context"
"io"
"os"
"path/filepath"
"strings"
"github.com/pkg/errors"
)
var (
errTooManyLinks = errors.New("too many links")
)
type currentPath struct {
path string
f os.FileInfo
fullPath string
}
func pathChange(lower, upper *currentPath) (ChangeKind, string) {
if lower == nil {
if upper == nil {
panic("cannot compare nil paths")
}
return ChangeKindAdd, upper.path
}
if upper == nil {
return ChangeKindDelete, lower.path
}
// TODO: compare by directory
switch i := strings.Compare(lower.path, upper.path); {
case i < 0:
// File in lower that is not in upper
return ChangeKindDelete, lower.path
case i > 0:
// File in upper that is not in lower
return ChangeKindAdd, upper.path
default:
return ChangeKindModify, upper.path
}
}
func sameFile(f1, f2 *currentPath) (bool, error) {
if os.SameFile(f1.f, f2.f) {
return true, nil
}
equalStat, err := compareSysStat(f1.f.Sys(), f2.f.Sys())
if err != nil || !equalStat {
return equalStat, err
}
if eq, err := compareCapabilities(f1.fullPath, f2.fullPath); err != nil || !eq {
return eq, err
}
// If not a directory also check size, modtime, and content
if !f1.f.IsDir() {
if f1.f.Size() != f2.f.Size() {
return false, nil
}
t1 := f1.f.ModTime()
t2 := f2.f.ModTime()
if t1.Unix() != t2.Unix() {
return false, nil
}
// If the timestamp may have been truncated in one of the
// files, check content of file to determine difference
if t1.Nanosecond() == 0 || t2.Nanosecond() == 0 {
if f1.f.Size() > 0 {
eq, err := compareFileContent(f1.fullPath, f2.fullPath)
if err != nil || !eq {
return eq, err
}
}
} else if t1.Nanosecond() != t2.Nanosecond() {
return false, nil
}
}
return true, nil
}
const compareChuckSize = 32 * 1024
// compareFileContent compares the content of 2 same sized files
// by comparing each byte.
func compareFileContent(p1, p2 string) (bool, error) {
f1, err := os.Open(p1)
if err != nil {
return false, err
}
defer f1.Close()
f2, err := os.Open(p2)
if err != nil {
return false, err
}
defer f2.Close()
b1 := make([]byte, compareChuckSize)
b2 := make([]byte, compareChuckSize)
for {
n1, err1 := f1.Read(b1)
if err1 != nil && err1 != io.EOF {
return false, err1
}
n2, err2 := f2.Read(b2)
if err2 != nil && err2 != io.EOF {
return false, err2
}
if n1 != n2 || !bytes.Equal(b1[:n1], b2[:n2]) {
return false, nil
}
if err1 == io.EOF && err2 == io.EOF {
return true, nil
}
}
}
func pathWalk(ctx context.Context, root string, pathC chan<- *currentPath) error {
return filepath.Walk(root, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
path, err = filepath.Rel(root, path)
if err != nil {
return err
}
path = filepath.Join(string(os.PathSeparator), path)
// Skip root
if path == string(os.PathSeparator) {
return nil
}
p := &currentPath{
path: path,
f: f,
fullPath: filepath.Join(root, path),
}
select {
case <-ctx.Done():
return ctx.Err()
case pathC <- p:
return nil
}
})
}
func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
case p := <-pathC:
return p, nil
}
}
// RootPath joins a path with a root, evaluating and bounding any
// symlink to the root directory.
func RootPath(root, path string) (string, error) {
if path == "" {
return root, nil
}
var linksWalked int // to protect against cycles
for {
i := linksWalked
newpath, err := walkLinks(root, path, &linksWalked)
if err != nil {
return "", err
}
path = newpath
if i == linksWalked {
newpath = filepath.Join("/", newpath)
if path == newpath {
return filepath.Join(root, newpath), nil
}
path = newpath
}
}
}
func walkLink(root, path string, linksWalked *int) (newpath string, islink bool, err error) {
if *linksWalked > 255 {
return "", false, errTooManyLinks
}
path = filepath.Join("/", path)
if path == "/" {
return path, false, nil
}
realPath := filepath.Join(root, path)
fi, err := os.Lstat(realPath)
if err != nil {
// If path does not yet exist, treat as non-symlink
if os.IsNotExist(err) {
return path, false, nil
}
return "", false, err
}
if fi.Mode()&os.ModeSymlink == 0 {
return path, false, nil
}
newpath, err = os.Readlink(realPath)
if err != nil {
return "", false, err
}
if filepath.IsAbs(newpath) && strings.HasPrefix(newpath, root) {
newpath = newpath[:len(root)]
if !strings.HasPrefix(newpath, "/") {
newpath = "/" + newpath
}
}
*linksWalked++
return newpath, true, nil
}
func walkLinks(root, path string, linksWalked *int) (string, error) {
switch dir, file := filepath.Split(path); {
case dir == "":
newpath, _, err := walkLink(root, file, linksWalked)
return newpath, err
case file == "":
if os.IsPathSeparator(dir[len(dir)-1]) {
if dir == "/" {
return dir, nil
}
return walkLinks(root, dir[:len(dir)-1], linksWalked)
}
newpath, _, err := walkLink(root, dir, linksWalked)
return newpath, err
default:
newdir, err := walkLinks(root, dir, linksWalked)
if err != nil {
return "", err
}
newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked)
if err != nil {
return "", err
}
if !islink {
return newpath, nil
}
if filepath.IsAbs(newpath) {
return newpath, nil
}
return filepath.Join(newdir, newpath), nil
}
}

View File

@ -1,13 +0,0 @@
package fs
import "time"
// Gnu tar and the go tar writer don't have sub-second mtime
// precision, which is problematic when we apply changes via tar
// files, we handle this by comparing for exact times, *or* same
// second count and either a or b having exactly 0 nanoseconds
func sameFsTime(a, b time.Time) bool {
return a == b ||
(a.Unix() == b.Unix() &&
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
}

View File

@ -12,11 +12,18 @@ import (
"github.com/pkg/errors"
)
// Image describes an image used by containers
type Image interface {
// Name of the image
Name() string
// Target descriptor for the image content
Target() ocispec.Descriptor
// Unpack unpacks the image's content into a snapshot
Unpack(context.Context, string) error
// RootFS returns the image digests
RootFS(ctx context.Context) ([]digest.Digest, error)
// Size returns the image size
Size(ctx context.Context) (int64, error)
}
var _ = (Image)(&image{})
@ -35,6 +42,16 @@ func (i *image) Target() ocispec.Descriptor {
return i.i.Target
}
func (i *image) RootFS(ctx context.Context) ([]digest.Digest, error) {
provider := i.client.ContentStore()
return i.i.RootFS(ctx, provider)
}
func (i *image) Size(ctx context.Context) (int64, error) {
provider := i.client.ContentStore()
return i.i.Size(ctx, provider)
}
func (i *image) Unpack(ctx context.Context, snapshotterName string) error {
layers, err := i.getLayers(ctx)
if err != nil {

View File

@ -3,7 +3,6 @@ package images
import (
"context"
"encoding/json"
"io/ioutil"
"time"
"github.com/containerd/containerd/content"
@ -73,13 +72,7 @@ func Config(ctx context.Context, provider content.Provider, image ocispec.Descri
return configDesc, Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
switch image.MediaType {
case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
rc, err := provider.Reader(ctx, image.Digest)
if err != nil {
return nil, err
}
defer rc.Close()
p, err := ioutil.ReadAll(rc)
p, err := content.ReadBlob(ctx, provider, image.Digest)
if err != nil {
return nil, err
}

View File

@ -8,62 +8,99 @@ import (
"sync"
)
type IO struct {
// IOConfig holds the io configurations.
type IOConfig struct {
// Terminal is true if one has been allocated
Terminal bool
Stdin string
Stdout string
Stderr string
// Stdin path
Stdin string
// Stdout path
Stdout string
// Stderr path
Stderr string
}
// IO holds the io information for a task or process
type IO interface {
// Config returns the IO configuration.
Config() IOConfig
// Cancel aborts all current io operations
Cancel()
// Wait blocks until all io copy operations have completed
Wait()
// Close cleans up all open io resources
Close() error
}
// cio is a basic container IO implementation.
type cio struct {
config IOConfig
closer *wgCloser
}
func (i *IO) Cancel() {
if i.closer == nil {
return
}
i.closer.Cancel()
func (c *cio) Config() IOConfig {
return c.config
}
func (i *IO) Wait() {
if i.closer == nil {
func (c *cio) Cancel() {
if c.closer == nil {
return
}
i.closer.Wait()
c.closer.Cancel()
}
func (i *IO) Close() error {
if i.closer == nil {
func (c *cio) Wait() {
if c.closer == nil {
return
}
c.closer.Wait()
}
func (c *cio) Close() error {
if c.closer == nil {
return nil
}
return i.closer.Close()
return c.closer.Close()
}
type IOCreation func(id string) (*IO, error)
// IOCreation creates new IO sets for a task
type IOCreation func(id string) (IO, error)
type IOAttach func(*FIFOSet) (*IO, error)
// IOAttach allows callers to reattach to running tasks
type IOAttach func(*FIFOSet) (IO, error)
// NewIO returns an IOCreation that will provide IO sets without a terminal
func NewIO(stdin io.Reader, stdout, stderr io.Writer) IOCreation {
return NewIOWithTerminal(stdin, stdout, stderr, false)
}
// NewIOWithTerminal creates a new io set with the provied io.Reader/Writers for use with a terminal
func NewIOWithTerminal(stdin io.Reader, stdout, stderr io.Writer, terminal bool) IOCreation {
return func(id string) (*IO, error) {
return func(id string) (_ IO, err error) {
paths, err := NewFifos(id)
if err != nil {
return nil, err
}
i := &IO{
defer func() {
if err != nil && paths.Dir != "" {
os.RemoveAll(paths.Dir)
}
}()
cfg := IOConfig{
Terminal: terminal,
Stdout: paths.Out,
Stderr: paths.Err,
Stdin: paths.In,
}
i := &cio{config: cfg}
set := &ioSet{
in: stdin,
out: stdout,
err: stderr,
}
closer, err := copyIO(paths, set, i.Terminal)
closer, err := copyIO(paths, set, cfg.Terminal)
if err != nil {
return nil, err
}
@ -72,23 +109,25 @@ func NewIOWithTerminal(stdin io.Reader, stdout, stderr io.Writer, terminal bool)
}
}
// WithAttach attaches the existing io for a task to the provided io.Reader/Writers
func WithAttach(stdin io.Reader, stdout, stderr io.Writer) IOAttach {
return func(paths *FIFOSet) (*IO, error) {
return func(paths *FIFOSet) (IO, error) {
if paths == nil {
return nil, fmt.Errorf("cannot attach to existing fifos")
}
i := &IO{
cfg := IOConfig{
Terminal: paths.Terminal,
Stdout: paths.Out,
Stderr: paths.Err,
Stdin: paths.In,
}
i := &cio{config: cfg}
set := &ioSet{
in: stdin,
out: stdout,
err: stderr,
}
closer, err := copyIO(paths, set, i.Terminal)
closer, err := copyIO(paths, set, cfg.Terminal)
if err != nil {
return nil, err
}
@ -97,22 +136,30 @@ func WithAttach(stdin io.Reader, stdout, stderr io.Writer) IOAttach {
}
}
// Stdio returns an IO implementation to be used for a task
// Stdio returns an IO set to be used for a task
// that outputs the container's IO as the current processes Stdio
func Stdio(id string) (*IO, error) {
func Stdio(id string) (IO, error) {
return NewIO(os.Stdin, os.Stdout, os.Stderr)(id)
}
// StdioTerminal will setup the IO for the task to use a terminal
func StdioTerminal(id string) (*IO, error) {
func StdioTerminal(id string) (IO, error) {
return NewIOWithTerminal(os.Stdin, os.Stdout, os.Stderr, true)(id)
}
// NullIO redirects the container's IO into /dev/null
func NullIO(id string) (IO, error) {
return &cio{}, nil
}
// FIFOSet is a set of fifos for use with tasks
type FIFOSet struct {
// Dir is the directory holding the task fifos
Dir string
Dir string
// In, Out, and Err fifo paths
In, Out, Err string
Terminal bool
// Terminal returns true if a terminal is being used for the task
Terminal bool
}
type ioSet struct {

View File

@ -1,4 +1,4 @@
package metadata
package boltutil
import (
"time"
@ -7,19 +7,36 @@ import (
"github.com/pkg/errors"
)
func readLabels(m map[string]string, bkt *bolt.Bucket) error {
return bkt.ForEach(func(k, v []byte) error {
m[string(k)] = string(v)
var (
bucketKeyLabels = []byte("labels")
bucketKeyCreatedAt = []byte("createdat")
bucketKeyUpdatedAt = []byte("updatedat")
)
// ReadLabels reads the labels key from the bucket
// Uses the key "labels"
func ReadLabels(bkt *bolt.Bucket) (map[string]string, error) {
lbkt := bkt.Bucket(bucketKeyLabels)
if lbkt == nil {
return nil, nil
}
labels := map[string]string{}
if err := lbkt.ForEach(func(k, v []byte) error {
labels[string(k)] = string(v)
return nil
})
}); err != nil {
return nil, err
}
return labels, nil
}
// writeLabels will write a new labels bucket to the provided bucket at key
// WriteLabels will write a new labels bucket to the provided bucket at key
// bucketKeyLabels, replacing the contents of the bucket with the provided map.
//
// The provide map labels will be modified to have the final contents of the
// bucket. Typically, this removes zero-value entries.
func writeLabels(bkt *bolt.Bucket, labels map[string]string) error {
// Uses the key "labels"
func WriteLabels(bkt *bolt.Bucket, labels map[string]string) error {
// Remove existing labels to keep from merging
if lbkt := bkt.Bucket(bucketKeyLabels); lbkt != nil {
if err := bkt.DeleteBucket(bucketKeyLabels); err != nil {
@ -50,7 +67,9 @@ func writeLabels(bkt *bolt.Bucket, labels map[string]string) error {
return nil
}
func readTimestamps(created, updated *time.Time, bkt *bolt.Bucket) error {
// ReadTimestamps reads created and updated timestamps from a bucket.
// Uses keys "createdat" and "updatedat"
func ReadTimestamps(bkt *bolt.Bucket, created, updated *time.Time) error {
for _, f := range []struct {
b []byte
t *time.Time
@ -68,7 +87,9 @@ func readTimestamps(created, updated *time.Time, bkt *bolt.Bucket) error {
return nil
}
func writeTimestamps(bkt *bolt.Bucket, created, updated time.Time) error {
// WriteTimestamps writes created and updated timestamps to a bucket.
// Uses keys "createdat" and "updatedat"
func WriteTimestamps(bkt *bolt.Bucket, created, updated time.Time) error {
createdAt, err := created.MarshalBinary()
if err != nil {
return err

View File

@ -41,16 +41,14 @@ var (
bucketKeyDigest = []byte("digest")
bucketKeyMediaType = []byte("mediatype")
bucketKeySize = []byte("size")
bucketKeyLabels = []byte("labels")
bucketKeyImage = []byte("image")
bucketKeyRuntime = []byte("runtime")
bucketKeyName = []byte("name")
bucketKeyParent = []byte("parent")
bucketKeyOptions = []byte("options")
bucketKeySpec = []byte("spec")
bucketKeyRootFS = []byte("rootfs")
bucketKeyTarget = []byte("target")
bucketKeyCreatedAt = []byte("createdat")
bucketKeyUpdatedAt = []byte("updatedat")
)
func getBucket(tx *bolt.Tx, keys ...[]byte) *bolt.Bucket {

View File

@ -10,6 +10,7 @@ import (
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/filters"
"github.com/containerd/containerd/identifiers"
"github.com/containerd/containerd/metadata/boltutil"
"github.com/containerd/containerd/namespaces"
"github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/types"
@ -163,9 +164,6 @@ func (s *containerStore) Update(ctx context.Context, container containers.Contai
updated.Labels = container.Labels
case "image":
updated.Image = container.Image
case "runtime":
// TODO(stevvooe): Should this actually be allowed?
updated.Runtime = container.Runtime
case "spec":
updated.Spec = container.Spec
case "rootfs":
@ -206,6 +204,16 @@ func (s *containerStore) Delete(ctx context.Context, id string) error {
}
func readContainer(container *containers.Container, bkt *bolt.Bucket) error {
labels, err := boltutil.ReadLabels(bkt)
if err != nil {
return err
}
container.Labels = labels
if err := boltutil.ReadTimestamps(bkt, &container.CreatedAt, &container.UpdatedAt); err != nil {
return err
}
return bkt.ForEach(func(k, v []byte) error {
switch string(k) {
case string(bucketKeyImage):
@ -239,24 +247,7 @@ func readContainer(container *containers.Container, bkt *bolt.Bucket) error {
container.Spec = &any
case string(bucketKeyRootFS):
container.RootFS = string(v)
case string(bucketKeyCreatedAt):
if err := container.CreatedAt.UnmarshalBinary(v); err != nil {
return err
}
case string(bucketKeyUpdatedAt):
if err := container.UpdatedAt.UnmarshalBinary(v); err != nil {
return err
}
case string(bucketKeyLabels):
lbkt := bkt.Bucket(bucketKeyLabels)
if lbkt == nil {
return nil
}
container.Labels = map[string]string{}
if err := readLabels(container.Labels, lbkt); err != nil {
return err
}
}
return nil
@ -264,18 +255,23 @@ func readContainer(container *containers.Container, bkt *bolt.Bucket) error {
}
func writeContainer(bkt *bolt.Bucket, container *containers.Container) error {
if err := writeTimestamps(bkt, container.CreatedAt, container.UpdatedAt); err != nil {
if err := boltutil.WriteTimestamps(bkt, container.CreatedAt, container.UpdatedAt); err != nil {
return err
}
spec, err := container.Spec.Marshal()
if err != nil {
return err
if container.Spec != nil {
spec, err := container.Spec.Marshal()
if err != nil {
return err
}
if err := bkt.Put(bucketKeySpec, spec); err != nil {
return err
}
}
for _, v := range [][2][]byte{
{bucketKeyImage, []byte(container.Image)},
{bucketKeySpec, spec},
{bucketKeyRootFS, []byte(container.RootFS)},
} {
if err := bkt.Put(v[0], v[1]); err != nil {
@ -314,5 +310,5 @@ func writeContainer(bkt *bolt.Bucket, container *containers.Container) error {
}
}
return writeLabels(bkt, container.Labels)
return boltutil.WriteLabels(bkt, container.Labels)
}

View File

@ -3,7 +3,6 @@ package metadata
import (
"context"
"encoding/binary"
"io"
"strings"
"time"
@ -11,6 +10,7 @@ import (
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/filters"
"github.com/containerd/containerd/metadata/boltutil"
"github.com/containerd/containerd/namespaces"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@ -390,24 +390,18 @@ func (nw *namespacedWriter) commit(tx *bolt.Tx, size int64, expected digest.Dige
return err
}
commitTime := time.Now().UTC()
sizeEncoded, err := encodeSize(size)
if err != nil {
return err
}
timeEncoded, err := time.Now().UTC().MarshalBinary()
if err != nil {
if err := boltutil.WriteTimestamps(bkt, commitTime, commitTime); err != nil {
return err
}
for _, v := range [][2][]byte{
{bucketKeyCreatedAt, timeEncoded},
{bucketKeyUpdatedAt, timeEncoded},
{bucketKeySize, sizeEncoded},
} {
if err := bkt.Put(v[0], v[1]); err != nil {
return err
}
if err := bkt.Put(bucketKeySize, sizeEncoded); err != nil {
return err
}
return nil
@ -421,14 +415,7 @@ func (nw *namespacedWriter) Status() (content.Status, error) {
return st, err
}
func (cs *contentStore) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) {
if err := cs.checkAccess(ctx, dgst); err != nil {
return nil, err
}
return cs.Store.Reader(ctx, dgst)
}
func (cs *contentStore) ReaderAt(ctx context.Context, dgst digest.Digest) (io.ReaderAt, error) {
func (cs *contentStore) ReaderAt(ctx context.Context, dgst digest.Digest) (content.ReaderAt, error) {
if err := cs.checkAccess(ctx, dgst); err != nil {
return nil, err
}
@ -451,17 +438,15 @@ func (cs *contentStore) checkAccess(ctx context.Context, dgst digest.Digest) err
}
func readInfo(info *content.Info, bkt *bolt.Bucket) error {
if err := readTimestamps(&info.CreatedAt, &info.UpdatedAt, bkt); err != nil {
if err := boltutil.ReadTimestamps(bkt, &info.CreatedAt, &info.UpdatedAt); err != nil {
return err
}
lbkt := bkt.Bucket(bucketKeyLabels)
if lbkt != nil {
info.Labels = map[string]string{}
if err := readLabels(info.Labels, lbkt); err != nil {
return err
}
labels, err := boltutil.ReadLabels(bkt)
if err != nil {
return err
}
info.Labels = labels
if v := bkt.Get(bucketKeySize); len(v) > 0 {
info.Size, _ = binary.Varint(v)
@ -471,11 +456,11 @@ func readInfo(info *content.Info, bkt *bolt.Bucket) error {
}
func writeInfo(info *content.Info, bkt *bolt.Bucket) error {
if err := writeTimestamps(bkt, info.CreatedAt, info.UpdatedAt); err != nil {
if err := boltutil.WriteTimestamps(bkt, info.CreatedAt, info.UpdatedAt); err != nil {
return err
}
if err := writeLabels(bkt, info.Labels); err != nil {
if err := boltutil.WriteLabels(bkt, info.Labels); err != nil {
return errors.Wrapf(err, "writing labels for info %v", info.Digest)
}

View File

@ -11,6 +11,7 @@ import (
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/filters"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/metadata/boltutil"
"github.com/containerd/containerd/namespaces"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@ -191,17 +192,15 @@ func (s *imageStore) Delete(ctx context.Context, name string) error {
}
func readImage(image *images.Image, bkt *bolt.Bucket) error {
if err := readTimestamps(&image.CreatedAt, &image.UpdatedAt, bkt); err != nil {
if err := boltutil.ReadTimestamps(bkt, &image.CreatedAt, &image.UpdatedAt); err != nil {
return err
}
lbkt := bkt.Bucket(bucketKeyLabels)
if lbkt != nil {
image.Labels = map[string]string{}
if err := readLabels(image.Labels, lbkt); err != nil {
return err
}
labels, err := boltutil.ReadLabels(bkt)
if err != nil {
return err
}
image.Labels = labels
tbkt := bkt.Bucket(bucketKeyTarget)
if tbkt == nil {
@ -228,11 +227,11 @@ func readImage(image *images.Image, bkt *bolt.Bucket) error {
}
func writeImage(bkt *bolt.Bucket, image *images.Image) error {
if err := writeTimestamps(bkt, image.CreatedAt, image.UpdatedAt); err != nil {
if err := boltutil.WriteTimestamps(bkt, image.CreatedAt, image.UpdatedAt); err != nil {
return err
}
if err := writeLabels(bkt, image.Labels); err != nil {
if err := boltutil.WriteLabels(bkt, image.Labels); err != nil {
return errors.Wrapf(err, "writing labels for image %v", image.Name)
}

View File

@ -4,9 +4,11 @@ import (
"context"
"fmt"
"strings"
"time"
"github.com/boltdb/bolt"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/metadata/boltutil"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/snapshot"
@ -46,7 +48,11 @@ func getKey(tx *bolt.Tx, ns, name, key string) string {
if bkt == nil {
return ""
}
v := bkt.Get([]byte(key))
bkt = bkt.Bucket([]byte(key))
if bkt == nil {
return ""
}
v := bkt.Get(bucketKeyName)
if len(v) == 0 {
return ""
}
@ -74,20 +80,144 @@ func (s *snapshotter) resolveKey(ctx context.Context, key string) (string, error
}
func (s *snapshotter) Stat(ctx context.Context, key string) (snapshot.Info, error) {
bkey, err := s.resolveKey(ctx, key)
ns, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return snapshot.Info{}, err
}
var (
bkey string
local = snapshot.Info{
Name: key,
}
)
if err := view(ctx, s.db, func(tx *bolt.Tx) error {
bkt := getSnapshotterBucket(tx, ns, s.name)
if bkt == nil {
return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key)
}
sbkt := bkt.Bucket([]byte(key))
if sbkt == nil {
return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key)
}
local.Labels, err = boltutil.ReadLabels(sbkt)
if err != nil {
return errors.Wrap(err, "failed to read labels")
}
if err := boltutil.ReadTimestamps(sbkt, &local.Created, &local.Updated); err != nil {
return errors.Wrap(err, "failed to read timestamps")
}
bkey = string(sbkt.Get(bucketKeyName))
local.Parent = string(sbkt.Get(bucketKeyParent))
return nil
}); err != nil {
return snapshot.Info{}, err
}
info, err := s.Snapshotter.Stat(ctx, bkey)
if err != nil {
return snapshot.Info{}, err
}
info.Name = trimKey(info.Name)
if info.Parent != "" {
info.Parent = trimKey(info.Parent)
return overlayInfo(info, local), nil
}
func (s *snapshotter) Update(ctx context.Context, info snapshot.Info, fieldpaths ...string) (snapshot.Info, error) {
ns, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return snapshot.Info{}, err
}
return info, nil
if info.Name == "" {
return snapshot.Info{}, errors.Wrap(errdefs.ErrInvalidArgument, "")
}
var (
bkey string
local = snapshot.Info{
Name: info.Name,
}
)
if err := update(ctx, s.db, func(tx *bolt.Tx) error {
bkt := getSnapshotterBucket(tx, ns, s.name)
if bkt == nil {
return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", info.Name)
}
sbkt := bkt.Bucket([]byte(info.Name))
if sbkt == nil {
return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", info.Name)
}
local.Labels, err = boltutil.ReadLabels(sbkt)
if err != nil {
return errors.Wrap(err, "failed to read labels")
}
if err := boltutil.ReadTimestamps(sbkt, &local.Created, &local.Updated); err != nil {
return errors.Wrap(err, "failed to read timestamps")
}
// Handle field updates
if len(fieldpaths) > 0 {
for _, path := range fieldpaths {
if strings.HasPrefix(path, "labels.") {
if local.Labels == nil {
local.Labels = map[string]string{}
}
key := strings.TrimPrefix(path, "labels.")
local.Labels[key] = info.Labels[key]
continue
}
switch path {
case "labels":
local.Labels = info.Labels
default:
return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on snapshot %q", path, info.Name)
}
}
} else {
local.Labels = info.Labels
}
local.Updated = time.Now().UTC()
if err := boltutil.WriteTimestamps(sbkt, local.Created, local.Updated); err != nil {
return errors.Wrap(err, "failed to read timestamps")
}
if err := boltutil.WriteLabels(sbkt, local.Labels); err != nil {
return errors.Wrap(err, "failed to read labels")
}
bkey = string(sbkt.Get(bucketKeyName))
local.Parent = string(sbkt.Get(bucketKeyParent))
return nil
}); err != nil {
return snapshot.Info{}, err
}
info, err = s.Snapshotter.Stat(ctx, bkey)
if err != nil {
return snapshot.Info{}, err
}
return overlayInfo(info, local), nil
}
func overlayInfo(info, overlay snapshot.Info) snapshot.Info {
// Merge info
info.Name = overlay.Name
info.Created = overlay.Created
info.Updated = overlay.Updated
info.Parent = overlay.Parent
if info.Labels == nil {
info.Labels = overlay.Labels
} else {
for k, v := range overlay.Labels {
overlay.Labels[k] = v
}
}
return info
}
func (s *snapshotter) Usage(ctx context.Context, key string) (snapshot.Usage, error) {
@ -106,20 +236,27 @@ func (s *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, er
return s.Snapshotter.Mounts(ctx, bkey)
}
func (s *snapshotter) Prepare(ctx context.Context, key, parent string) ([]mount.Mount, error) {
return s.createSnapshot(ctx, key, parent, false)
func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshot.Opt) ([]mount.Mount, error) {
return s.createSnapshot(ctx, key, parent, false, opts)
}
func (s *snapshotter) View(ctx context.Context, key, parent string) ([]mount.Mount, error) {
return s.createSnapshot(ctx, key, parent, true)
func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshot.Opt) ([]mount.Mount, error) {
return s.createSnapshot(ctx, key, parent, true, opts)
}
func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, readonly bool) ([]mount.Mount, error) {
func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, readonly bool, opts []snapshot.Opt) ([]mount.Mount, error) {
ns, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return nil, err
}
var base snapshot.Info
for _, opt := range opts {
if err := opt(&base); err != nil {
return nil, err
}
}
var m []mount.Mount
if err := update(ctx, s.db, func(tx *bolt.Tx) error {
bkt, err := createSnapshotterBucket(tx, ns, s.name)
@ -127,24 +264,40 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re
return err
}
bkey := string(bkt.Get([]byte(key)))
if bkey != "" {
return errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %v already exists", key)
bbkt, err := bkt.CreateBucket([]byte(key))
if err != nil {
if err == bolt.ErrBucketExists {
err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %v already exists", key)
}
return err
}
var bparent string
if parent != "" {
bparent = string(bkt.Get([]byte(parent)))
if bparent == "" {
pbkt := bkt.Bucket([]byte(parent))
if pbkt == nil {
return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", parent)
}
bparent = string(pbkt.Get(bucketKeyName))
if err := bbkt.Put(bucketKeyParent, []byte(parent)); err != nil {
return err
}
}
sid, err := bkt.NextSequence()
if err != nil {
return err
}
bkey = createKey(sid, ns, key)
if err := bkt.Put([]byte(key), []byte(bkey)); err != nil {
bkey := createKey(sid, ns, key)
if err := bbkt.Put(bucketKeyName, []byte(bkey)); err != nil {
return err
}
ts := time.Now().UTC()
if err := boltutil.WriteTimestamps(bbkt, ts, ts); err != nil {
return err
}
if err := boltutil.WriteLabels(bbkt, base.Labels); err != nil {
return err
}
@ -162,37 +315,62 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re
return m, nil
}
func (s *snapshotter) Commit(ctx context.Context, name, key string) error {
func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshot.Opt) error {
ns, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return err
}
var base snapshot.Info
for _, opt := range opts {
if err := opt(&base); err != nil {
return err
}
}
return update(ctx, s.db, func(tx *bolt.Tx) error {
bkt := getSnapshotterBucket(tx, ns, s.name)
if bkt == nil {
return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key)
}
nameKey := string(bkt.Get([]byte(name)))
if nameKey != "" {
return errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %v already exists", name)
bbkt, err := bkt.CreateBucket([]byte(name))
if err != nil {
if err == bolt.ErrBucketExists {
err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %v already exists", name)
}
return err
}
bkey := string(bkt.Get([]byte(key)))
if bkey == "" {
obkt := bkt.Bucket([]byte(key))
if obkt == nil {
return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key)
}
bkey := string(obkt.Get(bucketKeyName))
parent := string(obkt.Get(bucketKeyParent))
sid, err := bkt.NextSequence()
if err != nil {
return err
}
nameKey = createKey(sid, ns, name)
if err := bkt.Put([]byte(name), []byte(nameKey)); err != nil {
nameKey := createKey(sid, ns, name)
if err := bbkt.Put(bucketKeyName, []byte(nameKey)); err != nil {
return err
}
if err := bkt.Delete([]byte(key)); err != nil {
if err := bbkt.Put(bucketKeyParent, []byte(parent)); err != nil {
return err
}
ts := time.Now().UTC()
if err := boltutil.WriteTimestamps(bbkt, ts, ts); err != nil {
return err
}
if err := boltutil.WriteLabels(bbkt, base.Labels); err != nil {
return err
}
if err := bkt.DeleteBucket([]byte(key)); err != nil {
return err
}
@ -210,16 +388,19 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error {
}
return update(ctx, s.db, func(tx *bolt.Tx) error {
var bkey string
bkt := getSnapshotterBucket(tx, ns, s.name)
if bkt == nil {
return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key)
if bkt != nil {
sbkt := bkt.Bucket([]byte(key))
if sbkt != nil {
bkey = string(sbkt.Get(bucketKeyName))
}
}
bkey := string(bkt.Get([]byte(key)))
if bkey == "" {
return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key)
}
if err := bkt.Delete([]byte(key)); err != nil {
if err := bkt.DeleteBucket([]byte(key)); err != nil {
return err
}
@ -227,45 +408,93 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error {
})
}
type infoPair struct {
bkey string
info snapshot.Info
}
func (s *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapshot.Info) error) error {
ns, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return err
}
var keys []string
var (
batchSize = 100
pairs = []infoPair{}
lastKey string
)
if err := view(ctx, s.db, func(tx *bolt.Tx) error {
bkt := getSnapshotterBucket(tx, ns, s.name)
if bkt == nil {
return nil
}
bkt.ForEach(func(k, v []byte) error {
if len(v) > 0 {
keys = append(keys, string(v))
for {
if err := view(ctx, s.db, func(tx *bolt.Tx) error {
bkt := getSnapshotterBucket(tx, ns, s.name)
if bkt == nil {
return nil
}
c := bkt.Cursor()
var k, v []byte
if lastKey == "" {
k, v = c.First()
} else {
k, v = c.Seek([]byte(lastKey))
}
for k != nil {
if v == nil {
if len(pairs) >= batchSize {
break
}
sbkt := bkt.Bucket(k)
pair := infoPair{
bkey: string(sbkt.Get(bucketKeyName)),
info: snapshot.Info{
Name: string(k),
Parent: string(sbkt.Get(bucketKeyParent)),
},
}
err := boltutil.ReadTimestamps(sbkt, &pair.info.Created, &pair.info.Updated)
if err != nil {
return err
}
pair.info.Labels, err = boltutil.ReadLabels(sbkt)
if err != nil {
return err
}
pairs = append(pairs, pair)
}
k, v = c.Next()
}
lastKey = string(k)
return nil
})
return nil
}); err != nil {
return err
}
for _, k := range keys {
info, err := s.Snapshotter.Stat(ctx, k)
if err != nil {
}); err != nil {
return err
}
info.Name = trimKey(info.Name)
if info.Parent != "" {
info.Parent = trimKey(info.Parent)
for _, pair := range pairs {
info, err := s.Snapshotter.Stat(ctx, pair.bkey)
if err != nil {
return err
}
if err := fn(ctx, overlayInfo(info, pair.info)); err != nil {
return err
}
}
if err := fn(ctx, info); err != nil {
return err
if lastKey == "" {
break
}
pairs = pairs[:0]
}
return nil

View File

@ -0,0 +1,83 @@
package mount
// On Solaris we can't invoke the mount system call directly. First,
// the mount system call takes more than 6 arguments, and go doesn't
// support invoking system calls that take more than 6 arguments. Past
// that, the mount system call is a private interfaces. For example,
// the arguments and data structures passed to the kernel to create an
// nfs mount are private and can change at any time. The only public
// and stable interface for creating mounts on Solaris is the mount.8
// command, so we'll invoke that here.
import (
"bytes"
"errors"
"fmt"
"os/exec"
"strings"
"golang.org/x/sys/unix"
)
const (
mountCmd = "/usr/sbin/mount"
)
func doMount(arg ...string) error {
cmd := exec.Command(mountCmd, arg...)
/* Setup Stdin, Stdout, and Stderr */
stderr := new(bytes.Buffer)
cmd.Stdin = nil
cmd.Stdout = nil
cmd.Stderr = stderr
/*
* Run the command. If the command fails create a new error
* object to return that includes stderr output.
*/
err := cmd.Start()
if err != nil {
return err
}
err = cmd.Wait()
if err != nil {
return errors.New(fmt.Sprintf("%v: %s", err, stderr.String()))
}
return nil
}
func (m *Mount) Mount(target string) error {
var err error
if len(m.Options) == 0 {
err = doMount("-F", m.Type, m.Source, target)
} else {
err = doMount("-F", m.Type, "-o", strings.Join(m.Options, ","),
m.Source, target)
}
return err
}
func Unmount(mount string, flags int) error {
return unix.Unmount(mount, flags)
}
// UnmountAll repeatedly unmounts the given mount point until there
// are no mounts remaining (EINVAL is returned by mount), which is
// useful for undoing a stack of mounts on the same mount point.
func UnmountAll(mount string, flags int) error {
for {
if err := Unmount(mount, flags); err != nil {
// EINVAL is returned if the target is not a
// mount point, indicating that we are
// done. It can also indicate a few other
// things (such as invalid flags) which we
// unfortunately end up squelching here too.
if err == unix.EINVAL {
return nil
}
return err
}
}
}

View File

@ -1,293 +0,0 @@
// Package oci provides basic operations for manipulating OCI images.
// This package can be used even outside of containerd, and contains some
// functions not used in containerd itself.
package oci
import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
spec "github.com/opencontainers/image-spec/specs-go/v1"
)
// BlobWriter writes an OCI blob and returns a digest when committed.
type BlobWriter interface {
// Close is expected to be called after Commit() when commission is needed.
io.WriteCloser
// Digest may return empty digest or panics until committed.
Digest() digest.Digest
// Commit commits the blob (but no roll-back is guaranteed on an error).
// size and expected can be zero-value when unknown.
Commit(size int64, expected digest.Digest) error
}
// ErrUnexpectedSize can be returned from BlobWriter.Commit()
type ErrUnexpectedSize struct {
Expected int64
Actual int64
}
func (e ErrUnexpectedSize) Error() string {
if e.Expected > 0 && e.Expected != e.Actual {
return fmt.Sprintf("unexpected size: %d != %d", e.Expected, e.Actual)
}
return fmt.Sprintf("malformed ErrUnexpectedSize(%+v)", e)
}
// ErrUnexpectedDigest can be returned from BlobWriter.Commit()
type ErrUnexpectedDigest struct {
Expected digest.Digest
Actual digest.Digest
}
func (e ErrUnexpectedDigest) Error() string {
if e.Expected.String() != "" && e.Expected.String() != e.Actual.String() {
return fmt.Sprintf("unexpected digest: %v != %v", e.Expected, e.Actual)
}
return fmt.Sprintf("malformed ErrUnexpectedDigest(%+v)", e)
}
// ImageDriver corresponds to the representation of an image.
// Path uses os.PathSeparator as the separator.
// The methods of ImageDriver should only be called from oci package.
type ImageDriver interface {
Init() error
Remove(path string) error
Reader(path string) (io.ReadCloser, error)
Writer(path string, perm os.FileMode) (io.WriteCloser, error)
BlobWriter(algo digest.Algorithm) (BlobWriter, error)
}
type InitOpts struct {
// imageLayoutVersion can be an empty string for specifying the default version.
ImageLayoutVersion string
// skip creating oci-layout
SkipCreateImageLayout bool
// skip creating index.json
SkipCreateIndex bool
}
// Init initializes an OCI image structure.
// Init calls img.Init, creates `oci-layout`(0444), and creates `index.json`(0644).
//
func Init(img ImageDriver, opts InitOpts) error {
if err := img.Init(); err != nil {
return err
}
// Create oci-layout
if !opts.SkipCreateImageLayout {
imageLayoutVersion := opts.ImageLayoutVersion
if imageLayoutVersion == "" {
imageLayoutVersion = spec.ImageLayoutVersion
}
if err := WriteImageLayout(img, spec.ImageLayout{Version: imageLayoutVersion}); err != nil {
return err
}
}
// Create index.json
if !opts.SkipCreateIndex {
if err := WriteIndex(img, spec.Index{Versioned: specs.Versioned{SchemaVersion: 2}}); err != nil {
return err
}
}
return nil
}
func blobPath(d digest.Digest) string {
return filepath.Join("blobs", d.Algorithm().String(), d.Hex())
}
const (
indexPath = "index.json"
)
// GetBlobReader returns io.ReadCloser for a blob.
func GetBlobReader(img ImageDriver, d digest.Digest) (io.ReadCloser, error) {
// we return a reader rather than the full *os.File here so as to prohibit write operations.
return img.Reader(blobPath(d))
}
// ReadBlob reads an OCI blob.
func ReadBlob(img ImageDriver, d digest.Digest) ([]byte, error) {
r, err := GetBlobReader(img, d)
if err != nil {
return nil, err
}
defer r.Close()
return ioutil.ReadAll(r)
}
// WriteBlob writes bytes as an OCI blob and returns its digest using the canonical digest algorithm.
// If you need to specify certain algorithm, you can use NewBlobWriter(img string, algo digest.Algorithm).
func WriteBlob(img ImageDriver, b []byte) (digest.Digest, error) {
w, err := img.BlobWriter(digest.Canonical)
if err != nil {
return "", err
}
n, err := w.Write(b)
if err != nil {
return "", err
}
if n < len(b) {
return "", io.ErrShortWrite
}
if err := w.Close(); err != nil {
return "", err
}
return w.Digest(), err
}
// NewBlobWriter returns a BlobWriter.
func NewBlobWriter(img ImageDriver, algo digest.Algorithm) (BlobWriter, error) {
return img.BlobWriter(algo)
}
// DeleteBlob deletes an OCI blob.
func DeleteBlob(img ImageDriver, d digest.Digest) error {
return img.Remove(blobPath(d))
}
// ReadImageLayout returns the image layout.
func ReadImageLayout(img ImageDriver) (spec.ImageLayout, error) {
r, err := img.Reader(spec.ImageLayoutFile)
if err != nil {
return spec.ImageLayout{}, err
}
b, err := ioutil.ReadAll(r)
if err != nil {
return spec.ImageLayout{}, err
}
if err := r.Close(); err != nil {
return spec.ImageLayout{}, err
}
var layout spec.ImageLayout
if err := json.Unmarshal(b, &layout); err != nil {
return spec.ImageLayout{}, err
}
return layout, nil
}
// WriteImageLayout writes the image layout.
func WriteImageLayout(img ImageDriver, layout spec.ImageLayout) error {
b, err := json.Marshal(layout)
if err != nil {
return err
}
w, err := img.Writer(spec.ImageLayoutFile, 0444)
if err != nil {
return err
}
n, err := w.Write(b)
if err != nil {
return err
}
if n < len(b) {
return io.ErrShortWrite
}
return w.Close()
}
// ReadIndex returns the index.
func ReadIndex(img ImageDriver) (spec.Index, error) {
r, err := img.Reader(indexPath)
if err != nil {
return spec.Index{}, err
}
b, err := ioutil.ReadAll(r)
if err != nil {
return spec.Index{}, err
}
if err := r.Close(); err != nil {
return spec.Index{}, err
}
var idx spec.Index
if err := json.Unmarshal(b, &idx); err != nil {
return spec.Index{}, err
}
return idx, nil
}
// WriteIndex writes the index.
func WriteIndex(img ImageDriver, idx spec.Index) error {
b, err := json.Marshal(idx)
if err != nil {
return err
}
w, err := img.Writer(indexPath, 0644)
if err != nil {
return err
}
n, err := w.Write(b)
if err != nil {
return err
}
if n < len(b) {
return io.ErrShortWrite
}
return w.Close()
}
// RemoveManifestDescriptorFromIndex removes the manifest descriptor from the index.
// Returns nil error when the entry not found.
func RemoveManifestDescriptorFromIndex(img ImageDriver, refName string) error {
if refName == "" {
return errors.New("empty refName specified")
}
src, err := ReadIndex(img)
if err != nil {
return err
}
dst := src
dst.Manifests = nil
for _, m := range src.Manifests {
mRefName, ok := m.Annotations[spec.AnnotationRefName]
if ok && mRefName == refName {
continue
}
dst.Manifests = append(dst.Manifests, m)
}
return WriteIndex(img, dst)
}
// PutManifestDescriptorToIndex puts a manifest descriptor to the index.
// If ref name is set and conflicts with the existing descriptors, the old ones are removed.
func PutManifestDescriptorToIndex(img ImageDriver, desc spec.Descriptor) error {
refName, ok := desc.Annotations[spec.AnnotationRefName]
if ok && refName != "" {
if err := RemoveManifestDescriptorFromIndex(img, refName); err != nil {
return err
}
}
idx, err := ReadIndex(img)
if err != nil {
return err
}
idx.Manifests = append(idx.Manifests, desc)
return WriteIndex(img, idx)
}
// WriteJSONBlob is an utility function that writes x as a JSON blob with the specified media type, and returns the descriptor.
func WriteJSONBlob(img ImageDriver, x interface{}, mediaType string) (spec.Descriptor, error) {
b, err := json.Marshal(x)
if err != nil {
return spec.Descriptor{}, err
}
d, err := WriteBlob(img, b)
if err != nil {
return spec.Descriptor{}, err
}
return spec.Descriptor{
MediaType: mediaType,
Digest: d,
Size: int64(len(b)),
}, nil
}

View File

@ -1,156 +0,0 @@
package oci
import (
"archive/tar"
"bytes"
"errors"
"io"
"os"
"path/filepath"
"github.com/opencontainers/go-digest"
)
// TarWriter is an interface that is implemented by archive/tar.Writer.
// (Using an interface allows hooking)
type TarWriter interface {
io.WriteCloser
Flush() error
WriteHeader(hdr *tar.Header) error
}
// Tar is ImageDriver for TAR representation of an OCI image.
func Tar(w TarWriter) ImageDriver {
return &tarDriver{
w: w,
}
}
type tarDriver struct {
w TarWriter
}
func (d *tarDriver) Init() error {
headers := []tar.Header{
{
Name: "blobs/",
Mode: 0755,
Typeflag: tar.TypeDir,
},
{
Name: "blobs/" + string(digest.Canonical) + "/",
Mode: 0755,
Typeflag: tar.TypeDir,
},
}
for _, h := range headers {
if err := d.w.WriteHeader(&h); err != nil {
return err
}
}
return nil
}
func (d *tarDriver) Remove(path string) error {
return errors.New("Tar does not support Remove")
}
func (d *tarDriver) Reader(path string) (io.ReadCloser, error) {
// because tar does not support random access
return nil, errors.New("Tar does not support Reader")
}
func (d *tarDriver) Writer(path string, perm os.FileMode) (io.WriteCloser, error) {
name := filepath.ToSlash(path)
return &tarDriverWriter{
w: d.w,
name: name,
mode: int64(perm),
}, nil
}
// tarDriverWriter is used for writing non-blob files
// (e.g. oci-layout, index.json)
type tarDriverWriter struct {
bytes.Buffer
w TarWriter
name string
mode int64
}
func (w *tarDriverWriter) Close() error {
if err := w.w.WriteHeader(&tar.Header{
Name: w.name,
Mode: w.mode,
Size: int64(w.Len()),
Typeflag: tar.TypeReg,
}); err != nil {
return err
}
n, err := io.Copy(w.w, w)
if err != nil {
return err
}
if n < int64(w.Len()) {
return io.ErrShortWrite
}
return w.w.Flush()
}
func (d *tarDriver) BlobWriter(algo digest.Algorithm) (BlobWriter, error) {
return &tarBlobWriter{
w: d.w,
digester: algo.Digester(),
}, nil
}
// tarBlobWriter implements BlobWriter.
type tarBlobWriter struct {
w TarWriter
digester digest.Digester
buf bytes.Buffer // TODO: use tmp file for large buffer?
}
// Write implements io.Writer.
func (bw *tarBlobWriter) Write(b []byte) (int, error) {
n, err := bw.buf.Write(b)
if err != nil {
return n, err
}
return bw.digester.Hash().Write(b)
}
func (bw *tarBlobWriter) Commit(size int64, expected digest.Digest) error {
path := "blobs/" + bw.digester.Digest().Algorithm().String() + "/" + bw.digester.Digest().Hex()
if err := bw.w.WriteHeader(&tar.Header{
Name: path,
Mode: 0444,
Size: int64(bw.buf.Len()),
Typeflag: tar.TypeReg,
}); err != nil {
return err
}
n, err := io.Copy(bw.w, &bw.buf)
if err != nil {
return err
}
if n < int64(bw.buf.Len()) {
return io.ErrShortWrite
}
if size > 0 && size != n {
return ErrUnexpectedSize{Expected: size, Actual: n}
}
if expected != "" && bw.digester.Digest() != expected {
return ErrUnexpectedDigest{Expected: expected, Actual: bw.digester.Digest()}
}
return bw.w.Flush()
}
func (bw *tarBlobWriter) Close() error {
// we don't close bw.w (reused for writing another blob)
return bw.w.Flush()
}
func (bw *tarBlobWriter) Digest() digest.Digest {
return bw.digester.Digest()
}

View File

@ -9,16 +9,18 @@ import (
"github.com/containerd/containerd/log"
)
func NewContext(ctx context.Context, plugins map[PluginType]map[string]interface{}, root, id string) *InitContext {
func NewContext(ctx context.Context, plugins map[PluginType]map[string]interface{}, root, state, id string) *InitContext {
return &InitContext{
plugins: plugins,
Root: filepath.Join(root, id),
State: filepath.Join(state, id),
Context: log.WithModule(ctx, id),
}
}
type InitContext struct {
Root string
State string
Address string
Context context.Context
Config interface{}

View File

@ -2,19 +2,45 @@ package containerd
import (
"context"
"strings"
"syscall"
eventsapi "github.com/containerd/containerd/api/services/events/v1"
"github.com/containerd/containerd/api/services/tasks/v1"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/runtime"
"github.com/containerd/containerd/typeurl"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
// Process represents a system process
type Process interface {
// Pid is the system specific process id
Pid() uint32
// Start starts the process executing the user's defined binary
Start(context.Context) error
// Delete removes the process and any resources allocated returning the exit status
Delete(context.Context, ...ProcessDeleteOpts) (uint32, error)
// Kill sends the provided signal to the process
Kill(context.Context, syscall.Signal) error
// Wait blocks until the process has exited returning the exit status
Wait(context.Context) (uint32, error)
// CloseIO allows various pipes to be closed on the process
CloseIO(context.Context, ...IOCloserOpts) error
// Resize changes the width and heigh of the process's terminal
Resize(ctx context.Context, w, h uint32) error
// IO returns the io set for the process
IO() IO
// Status returns the executing status of the process
Status(context.Context) (Status, error)
}
type process struct {
id string
task *task
pid uint32
io *IO
io IO
spec *specs.Process
}
@ -30,27 +56,17 @@ func (p *process) Pid() uint32 {
// Start starts the exec process
func (p *process) Start(ctx context.Context) error {
any, err := typeurl.MarshalAny(p.spec)
if err != nil {
return err
}
request := &tasks.ExecProcessRequest{
r, err := p.task.client.TaskService().Start(ctx, &tasks.StartRequest{
ContainerID: p.task.id,
ExecID: p.id,
Terminal: p.io.Terminal,
Stdin: p.io.Stdin,
Stdout: p.io.Stdout,
Stderr: p.io.Stderr,
Spec: any,
}
response, err := p.task.client.TaskService().Exec(ctx, request)
})
if err != nil {
p.io.Cancel()
p.io.Wait()
p.io.Close()
return err
}
p.pid = response.Pid
p.pid = r.Pid
return nil
}
@ -64,10 +80,22 @@ func (p *process) Kill(ctx context.Context, s syscall.Signal) error {
}
func (p *process) Wait(ctx context.Context) (uint32, error) {
eventstream, err := p.task.client.EventService().Subscribe(ctx, &eventsapi.SubscribeRequest{})
cancellable, cancel := context.WithCancel(ctx)
defer cancel()
eventstream, err := p.task.client.EventService().Subscribe(cancellable, &eventsapi.SubscribeRequest{
Filters: []string{"topic==" + runtime.TaskExitEventTopic},
})
if err != nil {
return UnknownExitStatus, err
}
// first check if the task has exited
status, err := p.Status(ctx)
if err != nil {
return UnknownExitStatus, errdefs.FromGRPC(err)
}
if status.Status == Stopped {
return status.ExitStatus, nil
}
for {
evt, err := eventstream.Recv()
if err != nil {
@ -100,7 +128,7 @@ func (p *process) CloseIO(ctx context.Context, opts ...IOCloserOpts) error {
return err
}
func (p *process) IO() *IO {
func (p *process) IO() IO {
return p.io
}
@ -114,7 +142,19 @@ func (p *process) Resize(ctx context.Context, w, h uint32) error {
return err
}
func (p *process) Delete(ctx context.Context) (uint32, error) {
func (p *process) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (uint32, error) {
for _, o := range opts {
if err := o(ctx, p); err != nil {
return UnknownExitStatus, err
}
}
status, err := p.Status(ctx)
if err != nil {
return UnknownExitStatus, err
}
if status.Status != Stopped {
return UnknownExitStatus, errors.Wrapf(errdefs.ErrFailedPrecondition, "process must be stopped before deletion")
}
if p.io != nil {
p.io.Wait()
p.io.Close()
@ -128,3 +168,17 @@ func (p *process) Delete(ctx context.Context) (uint32, error) {
}
return r.ExitStatus, nil
}
func (p *process) Status(ctx context.Context) (Status, error) {
r, err := p.task.client.TaskService().Get(ctx, &tasks.GetRequest{
ContainerID: p.task.id,
ExecID: p.id,
})
if err != nil {
return Status{}, errdefs.FromGRPC(err)
}
return Status{
Status: ProcessStatus(strings.ToLower(r.Process.Status.String())),
ExitStatus: r.Process.ExitStatus,
}, nil
}

View File

@ -0,0 +1 @@
package plugin

View File

@ -0,0 +1,73 @@
// Code generated by protoc-gen-gogo.
// source: github.com/containerd/containerd/protobuf/plugin/fieldpath.proto
// DO NOT EDIT!
/*
Package plugin is a generated protocol buffer package.
It is generated from these files:
github.com/containerd/containerd/protobuf/plugin/fieldpath.proto
It has these top-level messages:
*/
package plugin
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
var E_FieldpathAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63300,
Name: "containerd.plugin.fieldpath_all",
Tag: "varint,63300,opt,name=fieldpath_all,json=fieldpathAll",
Filename: "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto",
}
var E_Fieldpath = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64400,
Name: "containerd.plugin.fieldpath",
Tag: "varint,64400,opt,name=fieldpath",
Filename: "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto",
}
func init() {
proto.RegisterExtension(E_FieldpathAll)
proto.RegisterExtension(E_Fieldpath)
}
func init() {
proto.RegisterFile("github.com/containerd/containerd/protobuf/plugin/fieldpath.proto", fileDescriptorFieldpath)
}
var fileDescriptorFieldpath = []byte{
// 203 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x48, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x16, 0x14, 0xe5, 0x97, 0xe4, 0x27, 0x95, 0xa6, 0xe9, 0x17, 0xe4, 0x94, 0xa6,
0x67, 0xe6, 0xe9, 0xa7, 0x65, 0xa6, 0xe6, 0xa4, 0x14, 0x24, 0x96, 0x64, 0xe8, 0x81, 0x65, 0x84,
0x04, 0x11, 0x6a, 0xf5, 0x20, 0x4a, 0xa4, 0x14, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0x11, 0x5a,
0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0xf2, 0x8b, 0x20, 0x9a, 0xac, 0x9c, 0xb9, 0x78,
0xe1, 0xe6, 0xc4, 0x27, 0xe6, 0xe4, 0x08, 0xc9, 0xe8, 0x41, 0xf4, 0xe8, 0xc1, 0xf4, 0xe8, 0xb9,
0x65, 0xe6, 0xa4, 0xfa, 0x17, 0x94, 0x64, 0xe6, 0xe7, 0x15, 0x4b, 0x1c, 0x79, 0xc7, 0xac, 0xc0,
0xa8, 0xc1, 0x11, 0xc4, 0x03, 0xd7, 0xe4, 0x98, 0x93, 0x63, 0x65, 0xcf, 0xc5, 0x09, 0xe7, 0x0b,
0xc9, 0x63, 0x18, 0xe0, 0x9b, 0x5a, 0x5c, 0x9c, 0x98, 0x0e, 0x37, 0x63, 0xc2, 0x77, 0x88, 0x19,
0x08, 0x3d, 0x4e, 0x12, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xd0, 0xf0, 0x48, 0x8e,
0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x04, 0x04, 0x00,
0x00, 0xff, 0xff, 0xd6, 0x21, 0x2a, 0xb6, 0x17, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,40 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto2";
package containerd.plugin;
import "google/protobuf/descriptor.proto";
extend google.protobuf.FileOptions {
optional bool fieldpath_all = 63300;
}
extend google.protobuf.MessageOptions {
optional bool fieldpath = 64400;
}

View File

@ -0,0 +1,10 @@
package plugin
import (
"github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
)
func FieldpathEnabled(file *descriptor.FileDescriptorProto, message *descriptor.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Fieldpath, proto.GetBoolExtension(file.Options, E_FieldpathAll, false))
}

View File

@ -109,13 +109,13 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
// turns out, we have a valid digest, make a url.
urls = append(urls, fetcher.url("manifests", dgst.String()))
// fallback to blobs on not found.
urls = append(urls, fetcher.url("blobs", dgst.String()))
} else {
urls = append(urls, fetcher.url("manifests", refspec.Object))
}
// fallback to blobs on not found.
urls = append(urls, fetcher.url("blobs", dgst.String()))
for _, u := range urls {
req, err := http.NewRequest(http.MethodHead, u, nil)
if err != nil {

Some files were not shown because too many files have changed in this diff Show More