Merge pull request #105 from Random-Liu/upgrade-containerd
Upgrade containerd to 2386062
This commit is contained in:
commit
719b097e96
250
Godeps/Godeps.json
generated
250
Godeps/Godeps.json
generated
@ -8,13 +8,8 @@
|
|||||||
"Deps": [
|
"Deps": [
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/Microsoft/go-winio",
|
"ImportPath": "github.com/Microsoft/go-winio",
|
||||||
"Comment": "v0.4.1",
|
"Comment": "v0.4.4",
|
||||||
"Rev": "706941bedd2d9b3a8c88e4022bd0078101f233f2"
|
"Rev": "7ff89941bcb93df2e962467fb073c6e997b13cf0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/Sirupsen/logrus",
|
|
||||||
"Comment": "v0.11.0",
|
|
||||||
"Rev": "d26492970760ca5d33129d2d799e34be5c4782eb"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/blang/semver",
|
"ImportPath": "github.com/blang/semver",
|
||||||
@ -28,157 +23,201 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd",
|
"ImportPath": "github.com/containerd/containerd",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/services/containers",
|
"ImportPath": "github.com/containerd/containerd/api/services/containers/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/services/content",
|
"ImportPath": "github.com/containerd/containerd/api/services/content/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/services/diff",
|
"ImportPath": "github.com/containerd/containerd/api/services/diff/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/services/execution",
|
"ImportPath": "github.com/containerd/containerd/api/services/events/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/services/images",
|
"ImportPath": "github.com/containerd/containerd/api/services/images/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/services/namespaces",
|
"ImportPath": "github.com/containerd/containerd/api/services/namespaces/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/services/snapshot",
|
"ImportPath": "github.com/containerd/containerd/api/services/snapshot/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/services/version",
|
"ImportPath": "github.com/containerd/containerd/api/services/tasks/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/types/descriptor",
|
"ImportPath": "github.com/containerd/containerd/api/services/version/v1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/types/mount",
|
"ImportPath": "github.com/containerd/containerd/api/types",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/api/types/task",
|
"ImportPath": "github.com/containerd/containerd/api/types/task",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/containers",
|
"ImportPath": "github.com/containerd/containerd/containers",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/content",
|
"ImportPath": "github.com/containerd/containerd/content",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/containerd/errdefs",
|
||||||
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/containerd/events",
|
||||||
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/containerd/filters",
|
||||||
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/containerd/fs",
|
||||||
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/containerd/identifiers",
|
||||||
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/images",
|
"ImportPath": "github.com/containerd/containerd/images",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/containerd/linux/runcopts",
|
||||||
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/log",
|
"ImportPath": "github.com/containerd/containerd/log",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/metadata",
|
"ImportPath": "github.com/containerd/containerd/metadata",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/mount",
|
"ImportPath": "github.com/containerd/containerd/mount",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/namespaces",
|
"ImportPath": "github.com/containerd/containerd/namespaces",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/containerd/oci",
|
||||||
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/plugin",
|
"ImportPath": "github.com/containerd/containerd/plugin",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/reference",
|
"ImportPath": "github.com/containerd/containerd/reference",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/remotes",
|
"ImportPath": "github.com/containerd/containerd/remotes",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/remotes/docker",
|
"ImportPath": "github.com/containerd/containerd/remotes/docker",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/remotes/docker/schema1",
|
"ImportPath": "github.com/containerd/containerd/remotes/docker/schema1",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/rootfs",
|
"ImportPath": "github.com/containerd/containerd/rootfs",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/services/content",
|
"ImportPath": "github.com/containerd/containerd/services/content",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/services/diff",
|
"ImportPath": "github.com/containerd/containerd/services/diff",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/services/images",
|
"ImportPath": "github.com/containerd/containerd/services/images",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/services/snapshot",
|
"ImportPath": "github.com/containerd/containerd/services/snapshot",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/containerd/snapshot",
|
"ImportPath": "github.com/containerd/containerd/snapshot",
|
||||||
"Comment": "v0.2.3-1098-g8ed1e24",
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
"Rev": "8ed1e24ae925b5c6d8195858ee89dddb0507d65f"
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/containerd/typeurl",
|
||||||
|
"Comment": "v1.0.0-alpha2-24-g2386062",
|
||||||
|
"Rev": "2386062ce152d6f158d22be5991fe11c7cf67535"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/containerd/continuity/sysx",
|
||||||
|
"Rev": "86cec1535a968310e7532819f699ff2830ed7463"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containerd/fifo",
|
"ImportPath": "github.com/containerd/fifo",
|
||||||
"Rev": "69b99525e472735860a5269b75af1970142b3062"
|
"Rev": "fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/containernetworking/cni/libcni",
|
"ImportPath": "github.com/containernetworking/cni/libcni",
|
||||||
@ -236,9 +275,8 @@
|
|||||||
"Rev": "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
|
"Rev": "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/docker/pkg/truncindex",
|
"ImportPath": "github.com/docker/go-events",
|
||||||
"Comment": "v1.13.1",
|
"Rev": "9461782956ad83b30282bf90e31fa6a70c255ba9"
|
||||||
"Rev": "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/fsnotify/fsnotify",
|
"ImportPath": "github.com/fsnotify/fsnotify",
|
||||||
@ -280,15 +318,15 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/golang/protobuf/proto",
|
"ImportPath": "github.com/golang/protobuf/proto",
|
||||||
"Rev": "7a211bcf3bce0e3f1d74f9894916e6f116ae83b4"
|
"Rev": "5a0f697c9ed9d68fef0116532c6e05cfeae00e55"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/golang/protobuf/ptypes/any",
|
"ImportPath": "github.com/golang/protobuf/ptypes/any",
|
||||||
"Rev": "7a211bcf3bce0e3f1d74f9894916e6f116ae83b4"
|
"Rev": "5a0f697c9ed9d68fef0116532c6e05cfeae00e55"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/golang/protobuf/ptypes/empty",
|
"ImportPath": "github.com/golang/protobuf/ptypes/empty",
|
||||||
"Rev": "7a211bcf3bce0e3f1d74f9894916e6f116ae83b4"
|
"Rev": "5a0f697c9ed9d68fef0116532c6e05cfeae00e55"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/jpillora/backoff",
|
"ImportPath": "github.com/jpillora/backoff",
|
||||||
@ -305,45 +343,45 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/image-spec/identity",
|
"ImportPath": "github.com/opencontainers/image-spec/identity",
|
||||||
"Comment": "v1.0.0-rc6",
|
"Comment": "v1.0.0-rc6-12-g372ad78",
|
||||||
"Rev": "1a6593ab6c3ab5902072b4694a22ff19425396ae"
|
"Rev": "372ad780f63454fbbbbcc7cf80e5b90245c13e13"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/image-spec/specs-go",
|
"ImportPath": "github.com/opencontainers/image-spec/specs-go",
|
||||||
"Comment": "v1.0.0-rc6",
|
"Comment": "v1.0.0-rc6-12-g372ad78",
|
||||||
"Rev": "1a6593ab6c3ab5902072b4694a22ff19425396ae"
|
"Rev": "372ad780f63454fbbbbcc7cf80e5b90245c13e13"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/image-spec/specs-go/v1",
|
"ImportPath": "github.com/opencontainers/image-spec/specs-go/v1",
|
||||||
"Comment": "v1.0.0-rc6",
|
"Comment": "v1.0.0-rc6-12-g372ad78",
|
||||||
"Rev": "1a6593ab6c3ab5902072b4694a22ff19425396ae"
|
"Rev": "372ad780f63454fbbbbcc7cf80e5b90245c13e13"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs",
|
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs",
|
||||||
"Comment": "v1.0.0-rc3-74-g6394544",
|
"Comment": "v1.0.0-rc3-161-ge775f0f",
|
||||||
"Rev": "639454475cb9c8b861cc599f8bcd5c8c790ae402"
|
"Rev": "e775f0fba3ea329b8b766451c892c41a3d49594d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/devices",
|
"ImportPath": "github.com/opencontainers/runc/libcontainer/devices",
|
||||||
"Comment": "v1.0.0-rc3-74-g6394544",
|
"Comment": "v1.0.0-rc3-161-ge775f0f",
|
||||||
"Rev": "639454475cb9c8b861cc599f8bcd5c8c790ae402"
|
"Rev": "e775f0fba3ea329b8b766451c892c41a3d49594d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/runtime-spec/specs-go",
|
"ImportPath": "github.com/opencontainers/runtime-spec/specs-go",
|
||||||
"Comment": "v1.0.0-rc5",
|
"Comment": "v1.0.0",
|
||||||
"Rev": "035da1dca3dfbb00d752eb58b0b158d6129f3776"
|
"Rev": "02137cd4e50b37a01665e1731fcd4ac2d2178230"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/runtime-tools/generate",
|
"ImportPath": "github.com/opencontainers/runtime-tools/generate",
|
||||||
"Rev": "68c195c3f2fa04a9a298b839eb2d94f31141271a"
|
"Rev": "e29f3ca4eb806a582ee1a1864c7b0563bd64c19b"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/runtime-tools/generate/seccomp",
|
"ImportPath": "github.com/opencontainers/runtime-tools/generate/seccomp",
|
||||||
"Rev": "68c195c3f2fa04a9a298b839eb2d94f31141271a"
|
"Rev": "e29f3ca4eb806a582ee1a1864c7b0563bd64c19b"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/opencontainers/runtime-tools/validate",
|
"ImportPath": "github.com/opencontainers/runtime-tools/validate",
|
||||||
"Rev": "68c195c3f2fa04a9a298b839eb2d94f31141271a"
|
"Rev": "e29f3ca4eb806a582ee1a1864c7b0563bd64c19b"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/pkg/errors",
|
"ImportPath": "github.com/pkg/errors",
|
||||||
@ -355,6 +393,11 @@
|
|||||||
"Comment": "v1.0.0",
|
"Comment": "v1.0.0",
|
||||||
"Rev": "792786c7400a136282c1664665ae0a8db921c6c2"
|
"Rev": "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/sirupsen/logrus",
|
||||||
|
"Comment": "v1.0.0",
|
||||||
|
"Rev": "202f25545ea4cf9b191ff7f846df5d87c9382c2b"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/spf13/pflag",
|
"ImportPath": "github.com/spf13/pflag",
|
||||||
"Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
|
"Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
|
||||||
@ -373,11 +416,6 @@
|
|||||||
"ImportPath": "github.com/syndtr/gocapability/capability",
|
"ImportPath": "github.com/syndtr/gocapability/capability",
|
||||||
"Rev": "e7cb7fa329f456b3855136a2642b197bad7366ba"
|
"Rev": "e7cb7fa329f456b3855136a2642b197bad7366ba"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"ImportPath": "github.com/tchap/go-patricia/patricia",
|
|
||||||
"Comment": "v2.2.6",
|
|
||||||
"Rev": "666120de432aea38ab06bd5c818f04f4129882c9"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/context",
|
"ImportPath": "golang.org/x/net/context",
|
||||||
"Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
|
"Rev": "7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6"
|
||||||
@ -416,11 +454,11 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/sys/unix",
|
"ImportPath": "golang.org/x/sys/unix",
|
||||||
"Rev": "f3918c30c5c2cb527c0b071a27c35120a6c0719a"
|
"Rev": "739734461d1c916b6c72a63d7efda2b27edb369f"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/sys/windows",
|
"ImportPath": "golang.org/x/sys/windows",
|
||||||
"Rev": "f3918c30c5c2cb527c0b071a27c35120a6c0719a"
|
"Rev": "739734461d1c916b6c72a63d7efda2b27edb369f"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/secure/bidirule",
|
"ImportPath": "golang.org/x/text/secure/bidirule",
|
||||||
|
@ -61,6 +61,7 @@ if [ ! -x "$(command -v containerd)" ]; then
|
|||||||
fi
|
fi
|
||||||
sudo pkill containerd
|
sudo pkill containerd
|
||||||
sudo containerd -l debug &> ${REPORT_DIR}/containerd.log &
|
sudo containerd -l debug &> ${REPORT_DIR}/containerd.log &
|
||||||
|
sleep 1 # sleep 1 seconds for containerd to be ready.
|
||||||
|
|
||||||
# Start cri-containerd
|
# Start cri-containerd
|
||||||
cd ${ROOT}
|
cd ${ROOT}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
RUNC_VERSION=639454475cb9c8b861cc599f8bcd5c8c790ae402
|
RUNC_VERSION=e775f0fba3ea329b8b766451c892c41a3d49594d
|
||||||
CNI_VERSION=v0.4.0
|
CNI_VERSION=v0.4.0
|
||||||
CONTAINERD_VERSION=8ed1e24ae925b5c6d8195858ee89dddb0507d65f
|
CONTAINERD_VERSION=2386062ce152d6f158d22be5991fe11c7cf67535
|
||||||
CRITEST_VERSION=74bbd4e142f752f13c648d9dde23defed3e472a2
|
CRITEST_VERSION=74bbd4e142f752f13c648d9dde23defed3e472a2
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/api/services/containers"
|
"github.com/containerd/containerd/containers"
|
||||||
prototypes "github.com/gogo/protobuf/types"
|
prototypes "github.com/gogo/protobuf/types"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
@ -135,24 +135,22 @@ func (c *criContainerdService) CreateContainer(ctx context.Context, r *runtime.C
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// Create containerd container.
|
// Create containerd container.
|
||||||
if _, err = c.containerService.Create(ctx, &containers.CreateContainerRequest{
|
if _, err = c.containerService.Create(ctx, containers.Container{
|
||||||
Container: containers.Container{
|
|
||||||
ID: id,
|
ID: id,
|
||||||
// TODO(random-liu): Checkpoint metadata into container labels.
|
// TODO(random-liu): Checkpoint metadata into container labels.
|
||||||
Image: image.ID,
|
Image: image.ID,
|
||||||
Runtime: defaultRuntime,
|
Runtime: containers.RuntimeInfo{Name: defaultRuntime},
|
||||||
Spec: &prototypes.Any{
|
Spec: &prototypes.Any{
|
||||||
TypeUrl: runtimespec.Version,
|
TypeUrl: runtimespec.Version,
|
||||||
Value: rawSpec,
|
Value: rawSpec,
|
||||||
},
|
},
|
||||||
RootFS: id,
|
RootFS: id,
|
||||||
},
|
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return nil, fmt.Errorf("failed to create containerd container: %v", err)
|
return nil, fmt.Errorf("failed to create containerd container: %v", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if retErr != nil {
|
if retErr != nil {
|
||||||
if _, err := c.containerService.Delete(ctx, &containers.DeleteContainerRequest{ID: id}); err != nil {
|
if err := c.containerService.Delete(ctx, id); err != nil {
|
||||||
glog.Errorf("Failed to delete containerd container %q: %v", id, err)
|
glog.Errorf("Failed to delete containerd container %q: %v", id, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -419,8 +417,8 @@ func setOCILinuxResource(g *generate.Generator, resources *runtime.LinuxContaine
|
|||||||
g.SetLinuxResourcesCPUPeriod(uint64(resources.GetCpuPeriod()))
|
g.SetLinuxResourcesCPUPeriod(uint64(resources.GetCpuPeriod()))
|
||||||
g.SetLinuxResourcesCPUQuota(resources.GetCpuQuota())
|
g.SetLinuxResourcesCPUQuota(resources.GetCpuQuota())
|
||||||
g.SetLinuxResourcesCPUShares(uint64(resources.GetCpuShares()))
|
g.SetLinuxResourcesCPUShares(uint64(resources.GetCpuShares()))
|
||||||
g.SetLinuxResourcesMemoryLimit(uint64(resources.GetMemoryLimitInBytes()))
|
g.SetLinuxResourcesMemoryLimit(resources.GetMemoryLimitInBytes())
|
||||||
g.SetLinuxResourcesOOMScoreAdj(int(resources.GetOomScoreAdj()))
|
g.SetProcessOOMScoreAdj(int(resources.GetOomScoreAdj()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// setOCICapabilities adds/drops process capabilities.
|
// setOCICapabilities adds/drops process capabilities.
|
||||||
|
@ -128,7 +128,7 @@ func getCreateContainerTestData() (*runtime.ContainerConfig, *runtime.PodSandbox
|
|||||||
assert.EqualValues(t, *spec.Linux.Resources.CPU.Quota, 200)
|
assert.EqualValues(t, *spec.Linux.Resources.CPU.Quota, 200)
|
||||||
assert.EqualValues(t, *spec.Linux.Resources.CPU.Shares, 300)
|
assert.EqualValues(t, *spec.Linux.Resources.CPU.Shares, 300)
|
||||||
assert.EqualValues(t, *spec.Linux.Resources.Memory.Limit, 400)
|
assert.EqualValues(t, *spec.Linux.Resources.Memory.Limit, 400)
|
||||||
assert.EqualValues(t, *spec.Linux.Resources.OOMScoreAdj, 500)
|
assert.EqualValues(t, *spec.Process.OOMScoreAdj, 500)
|
||||||
|
|
||||||
t.Logf("Check capabilities")
|
t.Logf("Check capabilities")
|
||||||
assert.Contains(t, spec.Process.Capabilities.Bounding, "CAP_SYS_ADMIN")
|
assert.Contains(t, spec.Process.Capabilities.Bounding, "CAP_SYS_ADMIN")
|
||||||
|
@ -23,9 +23,9 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
||||||
"github.com/containerd/containerd/api/services/containers"
|
"github.com/containerd/containerd/api/services/events/v1"
|
||||||
"github.com/containerd/containerd/api/services/execution"
|
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||||
"github.com/containerd/containerd/api/types/task"
|
"github.com/containerd/containerd/typeurl"
|
||||||
prototypes "github.com/gogo/protobuf/types"
|
prototypes "github.com/gogo/protobuf/types"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
@ -58,15 +58,15 @@ func (c *criContainerdService) ExecSync(ctx context.Context, r *runtime.ExecSync
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get exec process spec.
|
// Get exec process spec.
|
||||||
cntrResp, err := c.containerService.Get(ctx, &containers.GetContainerRequest{ID: id})
|
container, err := c.containerService.Get(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get container %q from containerd: %v", id, err)
|
return nil, fmt.Errorf("failed to get container %q from containerd: %v", id, err)
|
||||||
}
|
}
|
||||||
var spec runtimespec.Spec
|
var spec runtimespec.Spec
|
||||||
if err := json.Unmarshal(cntrResp.Container.Spec.Value, &spec); err != nil {
|
if err := json.Unmarshal(container.Spec.Value, &spec); err != nil {
|
||||||
return nil, fmt.Errorf("failed to unmarshal container spec: %v", err)
|
return nil, fmt.Errorf("failed to unmarshal container spec: %v", err)
|
||||||
}
|
}
|
||||||
pspec := &spec.Process
|
pspec := spec.Process
|
||||||
pspec.Args = r.GetCmd()
|
pspec.Args = r.GetCmd()
|
||||||
rawSpec, err := json.Marshal(pspec)
|
rawSpec, err := json.Marshal(pspec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -98,15 +98,16 @@ func (c *criContainerdService) ExecSync(ctx context.Context, r *runtime.ExecSync
|
|||||||
go io.Copy(stderrBuf, stderrPipe) // nolint: errcheck
|
go io.Copy(stderrBuf, stderrPipe) // nolint: errcheck
|
||||||
|
|
||||||
// Get containerd event client first, so that we won't miss any events.
|
// Get containerd event client first, so that we won't miss any events.
|
||||||
// TODO(random-liu): Handle this in event handler. Create an events client for
|
// TODO(random-liu): Add filter to only subscribe events of the exec process.
|
||||||
// each exec introduces unnecessary overhead.
|
|
||||||
cancellable, cancel := context.WithCancel(ctx)
|
cancellable, cancel := context.WithCancel(ctx)
|
||||||
events, err := c.taskService.Events(cancellable, &execution.EventsRequest{})
|
eventstream, err := c.eventService.Subscribe(cancellable, &events.SubscribeRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get containerd event: %v", err)
|
return nil, fmt.Errorf("failed to get containerd event: %v", err)
|
||||||
}
|
}
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
resp, err := c.taskService.Exec(ctx, &execution.ExecRequest{
|
execID := generateID()
|
||||||
|
_, err = c.taskService.Exec(ctx, &tasks.ExecProcessRequest{
|
||||||
ContainerID: id,
|
ContainerID: id,
|
||||||
Terminal: false,
|
Terminal: false,
|
||||||
Stdout: stdout,
|
Stdout: stdout,
|
||||||
@ -115,14 +116,22 @@ func (c *criContainerdService) ExecSync(ctx context.Context, r *runtime.ExecSync
|
|||||||
TypeUrl: runtimespec.Version,
|
TypeUrl: runtimespec.Version,
|
||||||
Value: rawSpec,
|
Value: rawSpec,
|
||||||
},
|
},
|
||||||
|
ExecID: execID,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to exec in container %q: %v", id, err)
|
return nil, fmt.Errorf("failed to exec in container %q: %v", id, err)
|
||||||
}
|
}
|
||||||
exitCode, err := waitContainerExec(cancel, events, id, resp.Pid, r.GetTimeout())
|
exitCode, err := c.waitContainerExec(eventstream, id, execID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to wait for exec in container %q to finish: %v", id, err)
|
return nil, fmt.Errorf("failed to wait for exec in container %q to finish: %v", id, err)
|
||||||
}
|
}
|
||||||
|
if _, err := c.taskService.DeleteProcess(ctx, &tasks.DeleteProcessRequest{
|
||||||
|
ContainerID: id,
|
||||||
|
ExecID: execID,
|
||||||
|
}); err != nil && !isContainerdGRPCNotFoundError(err) {
|
||||||
|
return nil, fmt.Errorf("failed to delete exec %q in container %q: %v", execID, id, err)
|
||||||
|
}
|
||||||
|
// TODO(random-liu): [P1] Deal with timeout, kill and wait again on timeout.
|
||||||
|
|
||||||
// TODO(random-liu): Make sure stdout/stderr are drained.
|
// TODO(random-liu): Make sure stdout/stderr are drained.
|
||||||
return &runtime.ExecSyncResponse{
|
return &runtime.ExecSyncResponse{
|
||||||
@ -133,30 +142,24 @@ func (c *criContainerdService) ExecSync(ctx context.Context, r *runtime.ExecSync
|
|||||||
}
|
}
|
||||||
|
|
||||||
// waitContainerExec waits for container exec to finish and returns the exit code.
|
// waitContainerExec waits for container exec to finish and returns the exit code.
|
||||||
func waitContainerExec(cancel context.CancelFunc, events execution.Tasks_EventsClient, id string,
|
func (c *criContainerdService) waitContainerExec(eventstream events.Events_SubscribeClient, id string,
|
||||||
pid uint32, timeout int64) (uint32, error) {
|
execID string) (uint32, error) {
|
||||||
// TODO(random-liu): [P1] Support ExecSync timeout.
|
|
||||||
// TODO(random-liu): Delete process after containerd upgrade.
|
|
||||||
defer func() {
|
|
||||||
// Stop events and drain the event channel. grpc-go#188
|
|
||||||
cancel()
|
|
||||||
for {
|
for {
|
||||||
_, err := events.Recv()
|
evt, err := eventstream.Recv()
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
for {
|
|
||||||
e, err := events.Recv()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Return non-zero exit code just in case.
|
// Return non-zero exit code just in case.
|
||||||
return unknownExitCode, err
|
return unknownExitCode, err
|
||||||
}
|
}
|
||||||
if e.Type != task.Event_EXIT {
|
// Continue until the event received is of type task exit.
|
||||||
|
if !typeurl.Is(evt.Event, &events.TaskExit{}) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if e.ID == id && e.Pid == pid {
|
any, err := typeurl.UnmarshalAny(evt.Event)
|
||||||
|
if err != nil {
|
||||||
|
return unknownExitCode, err
|
||||||
|
}
|
||||||
|
e := any.(*events.TaskExit)
|
||||||
|
if e.ContainerID == id && e.ID == execID {
|
||||||
return e.ExitStatus, nil
|
return e.ExitStatus, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,8 +19,7 @@ package server
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/containerd/containerd/api/services/containers"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/snapshot"
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||||
@ -71,7 +70,7 @@ func (c *criContainerdService) RemoveContainer(ctx context.Context, r *runtime.R
|
|||||||
|
|
||||||
// Remove container snapshot.
|
// Remove container snapshot.
|
||||||
if err := c.snapshotService.Remove(ctx, id); err != nil {
|
if err := c.snapshotService.Remove(ctx, id); err != nil {
|
||||||
if !snapshot.IsNotExist(err) {
|
if !errdefs.IsNotFound(err) {
|
||||||
return nil, fmt.Errorf("failed to remove container snapshot %q: %v", id, err)
|
return nil, fmt.Errorf("failed to remove container snapshot %q: %v", id, err)
|
||||||
}
|
}
|
||||||
glog.V(5).Infof("Remove called for snapshot %q that does not exist", id)
|
glog.V(5).Infof("Remove called for snapshot %q that does not exist", id)
|
||||||
@ -89,7 +88,7 @@ func (c *criContainerdService) RemoveContainer(ctx context.Context, r *runtime.R
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete containerd container.
|
// Delete containerd container.
|
||||||
if _, err := c.containerService.Delete(ctx, &containers.DeleteContainerRequest{ID: id}); err != nil {
|
if err := c.containerService.Delete(ctx, id); err != nil {
|
||||||
if !isContainerdGRPCNotFoundError(err) {
|
if !isContainerdGRPCNotFoundError(err) {
|
||||||
return nil, fmt.Errorf("failed to delete containerd container %q: %v", id, err)
|
return nil, fmt.Errorf("failed to delete containerd container %q: %v", id, err)
|
||||||
}
|
}
|
||||||
|
@ -23,8 +23,8 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/api/services/execution"
|
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||||
"github.com/containerd/containerd/api/types/mount"
|
"github.com/containerd/containerd/api/types"
|
||||||
"github.com/containerd/containerd/api/types/task"
|
"github.com/containerd/containerd/api/types/task"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
@ -97,7 +97,7 @@ func (c *criContainerdService) startContainer(ctx context.Context, id string, me
|
|||||||
sandboxConfig := sandbox.Config
|
sandboxConfig := sandbox.Config
|
||||||
sandboxID := meta.SandboxID
|
sandboxID := meta.SandboxID
|
||||||
// Make sure sandbox is running.
|
// Make sure sandbox is running.
|
||||||
sandboxInfo, err := c.taskService.Info(ctx, &execution.InfoRequest{ContainerID: sandboxID})
|
sandboxInfo, err := c.taskService.Get(ctx, &tasks.GetTaskRequest{ContainerID: sandboxID})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get sandbox container %q info: %v", sandboxID, err)
|
return fmt.Errorf("failed to get sandbox container %q info: %v", sandboxID, err)
|
||||||
}
|
}
|
||||||
@ -153,9 +153,9 @@ func (c *criContainerdService) startContainer(ctx context.Context, id string, me
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get rootfs mounts %q: %v", id, err)
|
return fmt.Errorf("failed to get rootfs mounts %q: %v", id, err)
|
||||||
}
|
}
|
||||||
var rootfs []*mount.Mount
|
var rootfs []*types.Mount
|
||||||
for _, m := range rootfsMounts {
|
for _, m := range rootfsMounts {
|
||||||
rootfs = append(rootfs, &mount.Mount{
|
rootfs = append(rootfs, &types.Mount{
|
||||||
Type: m.Type,
|
Type: m.Type,
|
||||||
Source: m.Source,
|
Source: m.Source,
|
||||||
Options: m.Options,
|
Options: m.Options,
|
||||||
@ -163,7 +163,7 @@ func (c *criContainerdService) startContainer(ctx context.Context, id string, me
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create containerd task.
|
// Create containerd task.
|
||||||
createOpts := &execution.CreateRequest{
|
createOpts := &tasks.CreateTaskRequest{
|
||||||
ContainerID: id,
|
ContainerID: id,
|
||||||
Rootfs: rootfs,
|
Rootfs: rootfs,
|
||||||
Stdin: stdin,
|
Stdin: stdin,
|
||||||
@ -180,14 +180,14 @@ func (c *criContainerdService) startContainer(ctx context.Context, id string, me
|
|||||||
defer func() {
|
defer func() {
|
||||||
if retErr != nil {
|
if retErr != nil {
|
||||||
// Cleanup the containerd task if an error is returned.
|
// Cleanup the containerd task if an error is returned.
|
||||||
if _, err := c.taskService.Delete(ctx, &execution.DeleteRequest{ContainerID: id}); err != nil {
|
if _, err := c.taskService.Delete(ctx, &tasks.DeleteTaskRequest{ContainerID: id}); err != nil {
|
||||||
glog.Errorf("Failed to delete containerd task %q: %v", id, err)
|
glog.Errorf("Failed to delete containerd task %q: %v", id, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Start containerd task.
|
// Start containerd task.
|
||||||
if _, err := c.taskService.Start(ctx, &execution.StartRequest{ContainerID: id}); err != nil {
|
if _, err := c.taskService.Start(ctx, &tasks.StartTaskRequest{ContainerID: id}); err != nil {
|
||||||
return fmt.Errorf("failed to start containerd task %q: %v", id, err)
|
return fmt.Errorf("failed to start containerd task %q: %v", id, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/api/services/execution"
|
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||||
"github.com/docker/docker/pkg/signal"
|
"github.com/docker/docker/pkg/signal"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
@ -94,10 +94,10 @@ func (c *criContainerdService) stopContainer(ctx context.Context, container cont
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
glog.V(2).Infof("Stop container %q with signal %v", id, stopSignal)
|
glog.V(2).Infof("Stop container %q with signal %v", id, stopSignal)
|
||||||
_, err = c.taskService.Kill(ctx, &execution.KillRequest{
|
_, err = c.taskService.Kill(ctx, &tasks.KillRequest{
|
||||||
ContainerID: id,
|
ContainerID: id,
|
||||||
Signal: uint32(stopSignal),
|
Signal: uint32(stopSignal),
|
||||||
PidOrAll: &execution.KillRequest_All{All: true},
|
All: true,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !isContainerdGRPCNotFoundError(err) && !isRuncProcessAlreadyFinishedError(err) {
|
if !isContainerdGRPCNotFoundError(err) && !isRuncProcessAlreadyFinishedError(err) {
|
||||||
@ -115,10 +115,10 @@ func (c *criContainerdService) stopContainer(ctx context.Context, container cont
|
|||||||
|
|
||||||
// Event handler will Delete the container from containerd after it handles the Exited event.
|
// Event handler will Delete the container from containerd after it handles the Exited event.
|
||||||
glog.V(2).Infof("Kill container %q", id)
|
glog.V(2).Infof("Kill container %q", id)
|
||||||
_, err := c.taskService.Kill(ctx, &execution.KillRequest{
|
_, err := c.taskService.Kill(ctx, &tasks.KillRequest{
|
||||||
ContainerID: id,
|
ContainerID: id,
|
||||||
Signal: uint32(unix.SIGKILL),
|
Signal: uint32(unix.SIGKILL),
|
||||||
PidOrAll: &execution.KillRequest_All{All: true},
|
All: true,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !isContainerdGRPCNotFoundError(err) && !isRuncProcessAlreadyFinishedError(err) {
|
if !isContainerdGRPCNotFoundError(err) && !isRuncProcessAlreadyFinishedError(err) {
|
||||||
|
@ -19,8 +19,9 @@ package server
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/api/services/execution"
|
"github.com/containerd/containerd/api/services/events/v1"
|
||||||
"github.com/containerd/containerd/api/types/task"
|
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||||
|
"github.com/containerd/containerd/typeurl"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"github.com/jpillora/backoff"
|
"github.com/jpillora/backoff"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
@ -48,7 +49,7 @@ func (c *criContainerdService) startEventMonitor() {
|
|||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
events, err := c.taskService.Events(context.Background(), &execution.EventsRequest{})
|
eventstream, err := c.eventService.Subscribe(context.Background(), &events.SubscribeRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed to connect to containerd event stream: %v", err)
|
glog.Errorf("Failed to connect to containerd event stream: %v", err)
|
||||||
time.Sleep(b.Duration())
|
time.Sleep(b.Duration())
|
||||||
@ -59,7 +60,7 @@ func (c *criContainerdService) startEventMonitor() {
|
|||||||
// TODO(random-liu): Relist to recover state, should prevent other operations
|
// TODO(random-liu): Relist to recover state, should prevent other operations
|
||||||
// until state is fully recovered.
|
// until state is fully recovered.
|
||||||
for {
|
for {
|
||||||
if err := c.handleEventStream(events); err != nil {
|
if err := c.handleEventStream(eventstream); err != nil {
|
||||||
glog.Errorf("Failed to handle event stream: %v", err)
|
glog.Errorf("Failed to handle event stream: %v", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -69,27 +70,34 @@ func (c *criContainerdService) startEventMonitor() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// handleEventStream receives an event from containerd and handles the event.
|
// handleEventStream receives an event from containerd and handles the event.
|
||||||
func (c *criContainerdService) handleEventStream(events execution.Tasks_EventsClient) error {
|
func (c *criContainerdService) handleEventStream(eventstream events.Events_SubscribeClient) error {
|
||||||
e, err := events.Recv()
|
e, err := eventstream.Recv()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
glog.V(2).Infof("Received container event: %+v", e)
|
glog.V(4).Infof("Received container event timestamp - %v, namespace - %q, topic - %q", e.Timestamp, e.Namespace, e.Topic)
|
||||||
c.handleEvent(e)
|
c.handleEvent(e)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleEvent handles a containerd event.
|
// handleEvent handles a containerd event.
|
||||||
func (c *criContainerdService) handleEvent(e *task.Event) {
|
func (c *criContainerdService) handleEvent(evt *events.Envelope) {
|
||||||
switch e.Type {
|
any, err := typeurl.UnmarshalAny(evt.Event)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("Failed to convert event envelope %+v: %v", evt, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch any.(type) {
|
||||||
// If containerd-shim exits unexpectedly, there will be no corresponding event.
|
// If containerd-shim exits unexpectedly, there will be no corresponding event.
|
||||||
// However, containerd could not retrieve container state in that case, so it's
|
// However, containerd could not retrieve container state in that case, so it's
|
||||||
// fine to leave out that case for now.
|
// fine to leave out that case for now.
|
||||||
// TODO(random-liu): [P2] Handle containerd-shim exit.
|
// TODO(random-liu): [P2] Handle containerd-shim exit.
|
||||||
case task.Event_EXIT:
|
case *events.TaskExit:
|
||||||
cntr, err := c.containerStore.Get(e.ID)
|
e := any.(*events.TaskExit)
|
||||||
|
glog.V(2).Infof("TaskExit event %+v", e)
|
||||||
|
cntr, err := c.containerStore.Get(e.ContainerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed to get container %q: %v", e.ID, err)
|
glog.Errorf("Failed to get container %q: %v", e.ContainerID, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if e.Pid != cntr.Status.Get().Pid {
|
if e.Pid != cntr.Status.Get().Pid {
|
||||||
@ -97,10 +105,11 @@ func (c *criContainerdService) handleEvent(e *task.Event) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Delete the container from containerd.
|
// Delete the container from containerd.
|
||||||
_, err = c.taskService.Delete(context.Background(), &execution.DeleteRequest{ContainerID: e.ID})
|
_, err = c.taskService.Delete(context.Background(), &tasks.DeleteTaskRequest{ContainerID: e.ContainerID})
|
||||||
|
// TODO(random-liu): Change isContainerdGRPCNotFoundError to use errdefs.
|
||||||
if err != nil && !isContainerdGRPCNotFoundError(err) {
|
if err != nil && !isContainerdGRPCNotFoundError(err) {
|
||||||
// TODO(random-liu): [P0] Enqueue the event and retry.
|
// TODO(random-liu): [P0] Enqueue the event and retry.
|
||||||
glog.Errorf("Failed to delete container %q: %v", e.ID, err)
|
glog.Errorf("Failed to delete container %q: %v", e.ContainerID, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err = cntr.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
|
err = cntr.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
|
||||||
@ -115,21 +124,23 @@ func (c *criContainerdService) handleEvent(e *task.Event) {
|
|||||||
return status, nil
|
return status, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed to update container %q state: %v", e.ID, err)
|
glog.Errorf("Failed to update container %q state: %v", e.ContainerID, err)
|
||||||
// TODO(random-liu): [P0] Enqueue the event and retry.
|
// TODO(random-liu): [P0] Enqueue the event and retry.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case task.Event_OOM:
|
case *events.TaskOOM:
|
||||||
cntr, err := c.containerStore.Get(e.ID)
|
e := any.(*events.TaskOOM)
|
||||||
|
glog.V(2).Infof("TaskOOM event %+v", e)
|
||||||
|
cntr, err := c.containerStore.Get(e.ContainerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed to get container %q: %v", e.ID, err)
|
glog.Errorf("Failed to get container %q: %v", e.ContainerID, err)
|
||||||
}
|
}
|
||||||
err = cntr.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
|
err = cntr.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
|
||||||
status.Reason = oomExitReason
|
status.Reason = oomExitReason
|
||||||
return status, nil
|
return status, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed to update container %q oom: %v", e.ID, err)
|
glog.Errorf("Failed to update container %q oom: %v", e.ContainerID, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
containerdmetadata "github.com/containerd/containerd/metadata"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/docker/pkg/stringid"
|
"github.com/docker/docker/pkg/stringid"
|
||||||
imagedigest "github.com/opencontainers/go-digest"
|
imagedigest "github.com/opencontainers/go-digest"
|
||||||
@ -69,7 +69,7 @@ const (
|
|||||||
relativeRootfsPath = "rootfs"
|
relativeRootfsPath = "rootfs"
|
||||||
// defaultRuntime is the runtime to use in containerd. We may support
|
// defaultRuntime is the runtime to use in containerd. We may support
|
||||||
// other runtime in the future.
|
// other runtime in the future.
|
||||||
defaultRuntime = "linux"
|
defaultRuntime = "io.containerd.runtime.v1.linux"
|
||||||
// sandboxesDir contains all sandbox root. A sandbox root is the running
|
// sandboxesDir contains all sandbox root. A sandbox root is the running
|
||||||
// directory of the sandbox, all files created for the sandbox will be
|
// directory of the sandbox, all files created for the sandbox will be
|
||||||
// placed under this directory.
|
// placed under this directory.
|
||||||
@ -339,7 +339,7 @@ func (c *criContainerdService) localResolve(ctx context.Context, ref string) (*i
|
|||||||
}
|
}
|
||||||
imageInContainerd, err := c.imageStoreService.Get(ctx, normalized.String())
|
imageInContainerd, err := c.imageStoreService.Get(ctx, normalized.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if containerdmetadata.IsNotFound(err) {
|
if errdefs.IsNotFound(err) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("an error occurred when getting image %q from containerd image store: %v",
|
return nil, fmt.Errorf("an error occurred when getting image %q from containerd image store: %v",
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/content"
|
"github.com/containerd/containerd/content"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
containerdimages "github.com/containerd/containerd/images"
|
containerdimages "github.com/containerd/containerd/images"
|
||||||
"github.com/containerd/containerd/remotes"
|
"github.com/containerd/containerd/remotes"
|
||||||
"github.com/containerd/containerd/remotes/docker"
|
"github.com/containerd/containerd/remotes/docker"
|
||||||
@ -279,9 +280,8 @@ func (c *criContainerdService) pullImage(ctx context.Context, rawRef string, aut
|
|||||||
if r == "" {
|
if r == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := c.imageStoreService.Put(ctx, r, desc); err != nil {
|
if err := c.createImageReference(ctx, r, desc); err != nil {
|
||||||
return "", "", "", fmt.Errorf("failed to put image reference %q desc %v into containerd image store: %v",
|
return "", "", "", fmt.Errorf("failed to update image reference %q: %v", r, err)
|
||||||
r, desc, err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Do not cleanup if following operations fail so as to make resumable download possible.
|
// Do not cleanup if following operations fail so as to make resumable download possible.
|
||||||
@ -331,13 +331,34 @@ func (c *criContainerdService) pullImage(ctx context.Context, rawRef string, aut
|
|||||||
// Use config digest as imageID to conform to oci image spec, and also add image id as
|
// Use config digest as imageID to conform to oci image spec, and also add image id as
|
||||||
// image reference.
|
// image reference.
|
||||||
imageID := configDesc.Digest.String()
|
imageID := configDesc.Digest.String()
|
||||||
if err := c.imageStoreService.Put(ctx, imageID, desc); err != nil {
|
if err := c.createImageReference(ctx, imageID, desc); err != nil {
|
||||||
return "", "", "", fmt.Errorf("failed to put image id %q into containerd image store: %v",
|
return "", "", "", fmt.Errorf("failed to update image id %q: %v", imageID, err)
|
||||||
imageID, err)
|
|
||||||
}
|
}
|
||||||
return imageID, repoTag, repoDigest, nil
|
return imageID, repoTag, repoDigest, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// createImageReference creates image reference inside containerd image store.
|
||||||
|
// Note that because create and update are not finished in one transaction, there could be race. E.g.
|
||||||
|
// the image reference is deleted by someone else after create returns already exists, but before update
|
||||||
|
// happens.
|
||||||
|
func (c *criContainerdService) createImageReference(ctx context.Context, name string, desc imagespec.Descriptor) error {
|
||||||
|
img := containerdimages.Image{
|
||||||
|
Name: name,
|
||||||
|
Target: desc,
|
||||||
|
}
|
||||||
|
// TODO(random-liu): Figure out which is the more performant sequence create then update or
|
||||||
|
// update then create.
|
||||||
|
_, err := c.imageStoreService.Create(ctx, img)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil && !errdefs.IsAlreadyExists(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = c.imageStoreService.Update(ctx, img, "target")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// waitDownloadingPollInterval is the interval to check resource downloading progress.
|
// waitDownloadingPollInterval is the interval to check resource downloading progress.
|
||||||
const waitDownloadingPollInterval = 200 * time.Millisecond
|
const waitDownloadingPollInterval = 200 * time.Millisecond
|
||||||
|
|
||||||
@ -350,7 +371,7 @@ func (c *criContainerdService) waitForResourcesDownloading(ctx context.Context,
|
|||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
// TODO(random-liu): Use better regexp when containerd `MakeRefKey` contains more
|
// TODO(random-liu): Use better regexp when containerd `MakeRefKey` contains more
|
||||||
// information.
|
// information.
|
||||||
statuses, err := c.contentStoreService.Status(ctx, "")
|
statuses, err := c.contentStoreService.ListStatuses(ctx, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get content status: %v", err)
|
return fmt.Errorf("failed to get content status: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ package server
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
containerdmetadata "github.com/containerd/containerd/metadata"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||||
@ -51,8 +51,10 @@ func (c *criContainerdService) RemoveImage(ctx context.Context, r *runtime.Remov
|
|||||||
for _, ref := range append(append(image.RepoTags, image.RepoDigests...), image.ID) {
|
for _, ref := range append(append(image.RepoTags, image.RepoDigests...), image.ID) {
|
||||||
// TODO(random-liu): Containerd should schedule a garbage collection immediately,
|
// TODO(random-liu): Containerd should schedule a garbage collection immediately,
|
||||||
// and we may want to wait for the garbage collection to be over here.
|
// and we may want to wait for the garbage collection to be over here.
|
||||||
|
// TODO(random-liu): Should check whether descriptor is as expected before delete,
|
||||||
|
// so as to avoid deleting new reference because of staled reference.
|
||||||
err = c.imageStoreService.Delete(ctx, ref)
|
err = c.imageStoreService.Delete(ctx, ref)
|
||||||
if err == nil || containerdmetadata.IsNotFound(err) {
|
if err == nil || errdefs.IsNotFound(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("failed to delete image reference %q for image %q: %v", ref, image.ID, err)
|
return nil, fmt.Errorf("failed to delete image reference %q for image %q: %v", ref, image.ID, err)
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"github.com/containerd/containerd/api/services/execution"
|
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||||
|
|
||||||
"github.com/containerd/containerd/api/types/task"
|
"github.com/containerd/containerd/api/types/task"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||||
@ -42,7 +42,7 @@ func (c *criContainerdService) ListPodSandbox(ctx context.Context, r *runtime.Li
|
|||||||
// List all sandboxes from store.
|
// List all sandboxes from store.
|
||||||
sandboxesInStore := c.sandboxStore.List()
|
sandboxesInStore := c.sandboxStore.List()
|
||||||
|
|
||||||
resp, err := c.taskService.List(ctx, &execution.ListRequest{})
|
resp, err := c.taskService.List(ctx, &tasks.ListTasksRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to list sandbox containers: %v", err)
|
return nil, fmt.Errorf("failed to list sandbox containers: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -19,9 +19,8 @@ package server
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/containerd/containerd/api/services/containers"
|
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||||
"github.com/containerd/containerd/api/services/execution"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/snapshot"
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||||
@ -55,7 +54,7 @@ func (c *criContainerdService) RemovePodSandbox(ctx context.Context, r *runtime.
|
|||||||
|
|
||||||
// Return error if sandbox container is not fully stopped.
|
// Return error if sandbox container is not fully stopped.
|
||||||
// TODO(random-liu): [P0] Make sure network is torn down, may need to introduce a state.
|
// TODO(random-liu): [P0] Make sure network is torn down, may need to introduce a state.
|
||||||
_, err = c.taskService.Info(ctx, &execution.InfoRequest{ContainerID: id})
|
_, err = c.taskService.Get(ctx, &tasks.GetTaskRequest{ContainerID: id})
|
||||||
if err != nil && !isContainerdGRPCNotFoundError(err) {
|
if err != nil && !isContainerdGRPCNotFoundError(err) {
|
||||||
return nil, fmt.Errorf("failed to get sandbox container info for %q: %v", id, err)
|
return nil, fmt.Errorf("failed to get sandbox container info for %q: %v", id, err)
|
||||||
}
|
}
|
||||||
@ -65,7 +64,7 @@ func (c *criContainerdService) RemovePodSandbox(ctx context.Context, r *runtime.
|
|||||||
|
|
||||||
// Remove sandbox container snapshot.
|
// Remove sandbox container snapshot.
|
||||||
if err := c.snapshotService.Remove(ctx, id); err != nil {
|
if err := c.snapshotService.Remove(ctx, id); err != nil {
|
||||||
if !snapshot.IsNotExist(err) {
|
if !errdefs.IsNotFound(err) {
|
||||||
return nil, fmt.Errorf("failed to remove sandbox container snapshot %q: %v", id, err)
|
return nil, fmt.Errorf("failed to remove sandbox container snapshot %q: %v", id, err)
|
||||||
}
|
}
|
||||||
glog.V(5).Infof("Remove called for snapshot %q that does not exist", id)
|
glog.V(5).Infof("Remove called for snapshot %q that does not exist", id)
|
||||||
@ -97,7 +96,7 @@ func (c *criContainerdService) RemovePodSandbox(ctx context.Context, r *runtime.
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete sandbox container.
|
// Delete sandbox container.
|
||||||
if _, err := c.containerService.Delete(ctx, &containers.DeleteContainerRequest{ID: id}); err != nil {
|
if err := c.containerService.Delete(ctx, id); err != nil {
|
||||||
if !isContainerdGRPCNotFoundError(err) {
|
if !isContainerdGRPCNotFoundError(err) {
|
||||||
return nil, fmt.Errorf("failed to delete sandbox container %q: %v", id, err)
|
return nil, fmt.Errorf("failed to delete sandbox container %q: %v", id, err)
|
||||||
}
|
}
|
||||||
|
@ -23,9 +23,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/api/services/containers"
|
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||||
"github.com/containerd/containerd/api/services/execution"
|
"github.com/containerd/containerd/api/types"
|
||||||
"github.com/containerd/containerd/api/types/mount"
|
"github.com/containerd/containerd/containers"
|
||||||
prototypes "github.com/gogo/protobuf/types"
|
prototypes "github.com/gogo/protobuf/types"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
@ -90,9 +90,9 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
var rootfs []*mount.Mount
|
var rootfs []*types.Mount
|
||||||
for _, m := range rootfsMounts {
|
for _, m := range rootfsMounts {
|
||||||
rootfs = append(rootfs, &mount.Mount{
|
rootfs = append(rootfs, &types.Mount{
|
||||||
Type: m.Type,
|
Type: m.Type,
|
||||||
Source: m.Source,
|
Source: m.Source,
|
||||||
Options: m.Options,
|
Options: m.Options,
|
||||||
@ -109,24 +109,22 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
|
|||||||
return nil, fmt.Errorf("failed to marshal oci spec %+v: %v", spec, err)
|
return nil, fmt.Errorf("failed to marshal oci spec %+v: %v", spec, err)
|
||||||
}
|
}
|
||||||
glog.V(4).Infof("Sandbox container spec: %+v", spec)
|
glog.V(4).Infof("Sandbox container spec: %+v", spec)
|
||||||
if _, err = c.containerService.Create(ctx, &containers.CreateContainerRequest{
|
if _, err = c.containerService.Create(ctx, containers.Container{
|
||||||
Container: containers.Container{
|
|
||||||
ID: id,
|
ID: id,
|
||||||
// TODO(random-liu): Checkpoint metadata into container labels.
|
// TODO(random-liu): Checkpoint metadata into container labels.
|
||||||
Image: image.ID,
|
Image: image.ID,
|
||||||
Runtime: defaultRuntime,
|
Runtime: containers.RuntimeInfo{Name: defaultRuntime},
|
||||||
Spec: &prototypes.Any{
|
Spec: &prototypes.Any{
|
||||||
TypeUrl: runtimespec.Version,
|
TypeUrl: runtimespec.Version,
|
||||||
Value: rawSpec,
|
Value: rawSpec,
|
||||||
},
|
},
|
||||||
RootFS: id,
|
RootFS: id,
|
||||||
},
|
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return nil, fmt.Errorf("failed to create containerd container: %v", err)
|
return nil, fmt.Errorf("failed to create containerd container: %v", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if retErr != nil {
|
if retErr != nil {
|
||||||
if _, err := c.containerService.Delete(ctx, &containers.DeleteContainerRequest{ID: id}); err != nil {
|
if err := c.containerService.Delete(ctx, id); err != nil {
|
||||||
glog.Errorf("Failed to delete containerd container%q: %v", id, err)
|
glog.Errorf("Failed to delete containerd container%q: %v", id, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -181,7 +179,7 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
createOpts := &execution.CreateRequest{
|
createOpts := &tasks.CreateTaskRequest{
|
||||||
ContainerID: id,
|
ContainerID: id,
|
||||||
Rootfs: rootfs,
|
Rootfs: rootfs,
|
||||||
// No stdin for sandbox container.
|
// No stdin for sandbox container.
|
||||||
@ -199,9 +197,8 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
|
|||||||
defer func() {
|
defer func() {
|
||||||
if retErr != nil {
|
if retErr != nil {
|
||||||
// Cleanup the sandbox container if an error is returned.
|
// Cleanup the sandbox container if an error is returned.
|
||||||
if _, err = c.taskService.Delete(ctx, &execution.DeleteRequest{ContainerID: id}); err != nil {
|
if err := c.stopSandboxContainer(ctx, id); err != nil {
|
||||||
glog.Errorf("Failed to delete sandbox container %q: %v",
|
glog.Errorf("Failed to delete sandbox container %q: %v", id, err)
|
||||||
id, err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -226,7 +223,7 @@ func (c *criContainerdService) RunPodSandbox(ctx context.Context, r *runtime.Run
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Start sandbox container in containerd.
|
// Start sandbox container in containerd.
|
||||||
if _, err := c.taskService.Start(ctx, &execution.StartRequest{ContainerID: id}); err != nil {
|
if _, err := c.taskService.Start(ctx, &tasks.StartTaskRequest{ContainerID: id}); err != nil {
|
||||||
return nil, fmt.Errorf("failed to start sandbox container %q: %v",
|
return nil, fmt.Errorf("failed to start sandbox container %q: %v",
|
||||||
id, err)
|
id, err)
|
||||||
}
|
}
|
||||||
@ -314,7 +311,7 @@ func (c *criContainerdService) generateSandboxContainerSpec(id string, config *r
|
|||||||
// TODO(random-liu): [P2] Set apparmor and seccomp from annotations.
|
// TODO(random-liu): [P2] Set apparmor and seccomp from annotations.
|
||||||
|
|
||||||
g.SetLinuxResourcesCPUShares(uint64(defaultSandboxCPUshares))
|
g.SetLinuxResourcesCPUShares(uint64(defaultSandboxCPUshares))
|
||||||
g.SetLinuxResourcesOOMScoreAdj(int(defaultSandboxOOMAdj))
|
g.SetProcessOOMScoreAdj(int(defaultSandboxOOMAdj))
|
||||||
|
|
||||||
return g.Spec(), nil
|
return g.Spec(), nil
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,7 @@ func getRunPodSandboxTestData() (*runtime.PodSandboxConfig, *imagespec.ImageConf
|
|||||||
assert.Equal(t, []string{"/pause", "forever"}, spec.Process.Args)
|
assert.Equal(t, []string{"/pause", "forever"}, spec.Process.Args)
|
||||||
assert.Equal(t, "/workspace", spec.Process.Cwd)
|
assert.Equal(t, "/workspace", spec.Process.Cwd)
|
||||||
assert.EqualValues(t, *spec.Linux.Resources.CPU.Shares, defaultSandboxCPUshares)
|
assert.EqualValues(t, *spec.Linux.Resources.CPU.Shares, defaultSandboxCPUshares)
|
||||||
assert.EqualValues(t, *spec.Linux.Resources.OOMScoreAdj, defaultSandboxOOMAdj)
|
assert.EqualValues(t, *spec.Process.OOMScoreAdj, defaultSandboxOOMAdj)
|
||||||
}
|
}
|
||||||
return config, imageConfig, specCheck
|
return config, imageConfig, specCheck
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"github.com/containerd/containerd/api/services/execution"
|
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||||
"github.com/containerd/containerd/api/types/task"
|
"github.com/containerd/containerd/api/types/task"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||||
@ -47,7 +47,7 @@ func (c *criContainerdService) PodSandboxStatus(ctx context.Context, r *runtime.
|
|||||||
// Use the full sandbox id.
|
// Use the full sandbox id.
|
||||||
id := sandbox.ID
|
id := sandbox.ID
|
||||||
|
|
||||||
info, err := c.taskService.Info(ctx, &execution.InfoRequest{ContainerID: id})
|
info, err := c.taskService.Get(ctx, &tasks.GetTaskRequest{ContainerID: id})
|
||||||
if err != nil && !isContainerdGRPCNotFoundError(err) {
|
if err != nil && !isContainerdGRPCNotFoundError(err) {
|
||||||
return nil, fmt.Errorf("failed to get sandbox container info for %q: %v", id, err)
|
return nil, fmt.Errorf("failed to get sandbox container info for %q: %v", id, err)
|
||||||
}
|
}
|
||||||
|
@ -20,11 +20,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/api/services/events/v1"
|
||||||
|
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||||
|
"github.com/containerd/containerd/api/types/task"
|
||||||
|
"github.com/containerd/containerd/typeurl"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
"github.com/containerd/containerd/api/services/execution"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -77,16 +79,73 @@ func (c *criContainerdService) StopPodSandbox(ctx context.Context, r *runtime.St
|
|||||||
glog.V(2).Infof("TearDown network for sandbox %q successfully", id)
|
glog.V(2).Infof("TearDown network for sandbox %q successfully", id)
|
||||||
|
|
||||||
sandboxRoot := getSandboxRootDir(c.rootDir, id)
|
sandboxRoot := getSandboxRootDir(c.rootDir, id)
|
||||||
if err = c.unmountSandboxFiles(sandboxRoot, sandbox.Config); err != nil {
|
if err := c.unmountSandboxFiles(sandboxRoot, sandbox.Config); err != nil {
|
||||||
return nil, fmt.Errorf("failed to unmount sandbox files in %q: %v", sandboxRoot, err)
|
return nil, fmt.Errorf("failed to unmount sandbox files in %q: %v", sandboxRoot, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(random-liu): [P1] Handle sandbox container graceful deletion.
|
if err := c.stopSandboxContainer(ctx, id); err != nil {
|
||||||
// Delete the sandbox container from containerd.
|
return nil, fmt.Errorf("failed to stop sandbox container %q: %v", id, err)
|
||||||
_, err = c.taskService.Delete(ctx, &execution.DeleteRequest{ContainerID: id})
|
|
||||||
if err != nil && !isContainerdGRPCNotFoundError(err) {
|
|
||||||
return nil, fmt.Errorf("failed to delete sandbox container %q: %v", id, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &runtime.StopPodSandboxResponse{}, nil
|
return &runtime.StopPodSandboxResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// stopSandboxContainer kills and deletes sandbox container.
|
||||||
|
func (c *criContainerdService) stopSandboxContainer(ctx context.Context, id string) error {
|
||||||
|
cancellable, cancel := context.WithCancel(ctx)
|
||||||
|
eventstream, err := c.eventService.Subscribe(cancellable, &events.SubscribeRequest{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get containerd event: %v", err)
|
||||||
|
}
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := c.taskService.Get(ctx, &tasks.GetTaskRequest{ContainerID: id})
|
||||||
|
if err != nil {
|
||||||
|
if isContainerdGRPCNotFoundError(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("failed to get sandbox container: %v", err)
|
||||||
|
}
|
||||||
|
if resp.Task.Status != task.StatusStopped {
|
||||||
|
// TODO(random-liu): [P1] Handle sandbox container graceful deletion.
|
||||||
|
if _, err := c.taskService.Kill(ctx, &tasks.KillRequest{
|
||||||
|
ContainerID: id,
|
||||||
|
Signal: uint32(unix.SIGKILL),
|
||||||
|
All: true,
|
||||||
|
}); err != nil && !isContainerdGRPCNotFoundError(err) && !isRuncProcessAlreadyFinishedError(err) {
|
||||||
|
return fmt.Errorf("failed to kill sandbox container: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.waitSandboxContainer(eventstream, id, resp.Task.Pid); err != nil {
|
||||||
|
return fmt.Errorf("failed to wait for pod sandbox to stop: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the sandbox container from containerd.
|
||||||
|
_, err = c.taskService.Delete(ctx, &tasks.DeleteTaskRequest{ContainerID: id})
|
||||||
|
if err != nil && !isContainerdGRPCNotFoundError(err) {
|
||||||
|
return fmt.Errorf("failed to delete sandbox container: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitSandboxContainer wait sandbox container stop event.
|
||||||
|
func (c *criContainerdService) waitSandboxContainer(eventstream events.Events_SubscribeClient, id string, pid uint32) error {
|
||||||
|
for {
|
||||||
|
evt, err := eventstream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Continue until the event received is of type task exit.
|
||||||
|
if !typeurl.Is(evt.Event, &events.TaskExit{}) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
any, err := typeurl.UnmarshalAny(evt.Event)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
e := any.(*events.TaskExit)
|
||||||
|
if e.ContainerID == id && e.Pid == pid {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -20,9 +20,10 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
"github.com/containerd/containerd/api/services/containers"
|
"github.com/containerd/containerd/api/services/events/v1"
|
||||||
"github.com/containerd/containerd/api/services/execution"
|
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||||
versionapi "github.com/containerd/containerd/api/services/version"
|
versionapi "github.com/containerd/containerd/api/services/version/v1"
|
||||||
|
"github.com/containerd/containerd/containers"
|
||||||
"github.com/containerd/containerd/content"
|
"github.com/containerd/containerd/content"
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
diffservice "github.com/containerd/containerd/services/diff"
|
diffservice "github.com/containerd/containerd/services/diff"
|
||||||
@ -71,9 +72,9 @@ type criContainerdService struct {
|
|||||||
// imageStore stores all resources associated with images.
|
// imageStore stores all resources associated with images.
|
||||||
imageStore *imagestore.Store
|
imageStore *imagestore.Store
|
||||||
// containerService is containerd containers client.
|
// containerService is containerd containers client.
|
||||||
containerService containers.ContainersClient
|
containerService containers.Store
|
||||||
// taskService is containerd tasks client.
|
// taskService is containerd tasks client.
|
||||||
taskService execution.TasksClient
|
taskService tasks.TasksClient
|
||||||
// contentStoreService is the containerd content service client.
|
// contentStoreService is the containerd content service client.
|
||||||
contentStoreService content.Store
|
contentStoreService content.Store
|
||||||
// snapshotService is the containerd snapshot service client.
|
// snapshotService is the containerd snapshot service client.
|
||||||
@ -93,6 +94,8 @@ type criContainerdService struct {
|
|||||||
agentFactory agents.AgentFactory
|
agentFactory agents.AgentFactory
|
||||||
// client is an instance of the containerd client
|
// client is an instance of the containerd client
|
||||||
client *containerd.Client
|
client *containerd.Client
|
||||||
|
// eventsService is the containerd task service client
|
||||||
|
eventService events.EventsClient
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCRIContainerdService returns a new instance of CRIContainerdService
|
// NewCRIContainerdService returns a new instance of CRIContainerdService
|
||||||
@ -117,12 +120,14 @@ func NewCRIContainerdService(containerdEndpoint, rootDir, networkPluginBinDir, n
|
|||||||
taskService: client.TaskService(),
|
taskService: client.TaskService(),
|
||||||
imageStoreService: client.ImageService(),
|
imageStoreService: client.ImageService(),
|
||||||
contentStoreService: client.ContentStore(),
|
contentStoreService: client.ContentStore(),
|
||||||
snapshotService: client.SnapshotService(),
|
// Use daemon default snapshotter.
|
||||||
|
snapshotService: client.SnapshotService(""),
|
||||||
diffService: client.DiffService(),
|
diffService: client.DiffService(),
|
||||||
versionService: client.VersionService(),
|
versionService: client.VersionService(),
|
||||||
healthService: client.HealthService(),
|
healthService: client.HealthService(),
|
||||||
agentFactory: agents.NewAgentFactory(),
|
agentFactory: agents.NewAgentFactory(),
|
||||||
client: client,
|
client: client,
|
||||||
|
eventService: client.EventService(),
|
||||||
}
|
}
|
||||||
|
|
||||||
netPlugin, err := ocicni.InitCNI(networkPluginBinDir, networkPluginConfDir)
|
netPlugin, err := ocicni.InitCNI(networkPluginBinDir, networkPluginConfDir)
|
||||||
|
@ -20,7 +20,7 @@ limitations under the License.
|
|||||||
package testing
|
package testing
|
||||||
|
|
||||||
import (
|
import (
|
||||||
version "github.com/containerd/containerd/api/services/version"
|
version "github.com/containerd/containerd/api/services/version/v1"
|
||||||
gomock "github.com/golang/mock/gomock"
|
gomock "github.com/golang/mock/gomock"
|
||||||
empty "github.com/golang/protobuf/ptypes/empty"
|
empty "github.com/golang/protobuf/ptypes/empty"
|
||||||
context "golang.org/x/net/context"
|
context "golang.org/x/net/context"
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
versionapi "github.com/containerd/containerd/api/services/version"
|
versionapi "github.com/containerd/containerd/api/services/version/v1"
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/golang/protobuf/ptypes/empty"
|
"github.com/golang/protobuf/ptypes/empty"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
21
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
21
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
@ -23,6 +23,13 @@ type atomicBool int32
|
|||||||
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
|
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
|
||||||
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
|
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
|
||||||
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
|
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
|
||||||
|
func (b *atomicBool) swap(new bool) bool {
|
||||||
|
var newInt int32
|
||||||
|
if new {
|
||||||
|
newInt = 1
|
||||||
|
}
|
||||||
|
return atomic.SwapInt32((*int32)(b), newInt) == 1
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
|
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
|
||||||
@ -71,7 +78,7 @@ func initIo() {
|
|||||||
type win32File struct {
|
type win32File struct {
|
||||||
handle syscall.Handle
|
handle syscall.Handle
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
closing bool
|
closing atomicBool
|
||||||
readDeadline deadlineHandler
|
readDeadline deadlineHandler
|
||||||
writeDeadline deadlineHandler
|
writeDeadline deadlineHandler
|
||||||
}
|
}
|
||||||
@ -107,9 +114,9 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
|||||||
|
|
||||||
// closeHandle closes the resources associated with a Win32 handle
|
// closeHandle closes the resources associated with a Win32 handle
|
||||||
func (f *win32File) closeHandle() {
|
func (f *win32File) closeHandle() {
|
||||||
if !f.closing {
|
// Atomically set that we are closing, releasing the resources only once.
|
||||||
|
if !f.closing.swap(true) {
|
||||||
// cancel all IO and wait for it to complete
|
// cancel all IO and wait for it to complete
|
||||||
f.closing = true
|
|
||||||
cancelIoEx(f.handle, nil)
|
cancelIoEx(f.handle, nil)
|
||||||
f.wg.Wait()
|
f.wg.Wait()
|
||||||
// at this point, no new IO can start
|
// at this point, no new IO can start
|
||||||
@ -127,10 +134,10 @@ func (f *win32File) Close() error {
|
|||||||
// prepareIo prepares for a new IO operation.
|
// prepareIo prepares for a new IO operation.
|
||||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||||
func (f *win32File) prepareIo() (*ioOperation, error) {
|
func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||||
f.wg.Add(1)
|
if f.closing.isSet() {
|
||||||
if f.closing {
|
|
||||||
return nil, ErrFileClosed
|
return nil, ErrFileClosed
|
||||||
}
|
}
|
||||||
|
f.wg.Add(1)
|
||||||
c := &ioOperation{}
|
c := &ioOperation{}
|
||||||
c.ch = make(chan ioResult)
|
c.ch = make(chan ioResult)
|
||||||
return c, nil
|
return c, nil
|
||||||
@ -159,7 +166,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
|||||||
return int(bytes), err
|
return int(bytes), err
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.closing {
|
if f.closing.isSet() {
|
||||||
cancelIoEx(f.handle, &c.o)
|
cancelIoEx(f.handle, &c.o)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -175,7 +182,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
|||||||
case r = <-c.ch:
|
case r = <-c.ch:
|
||||||
err = r.err
|
err = r.err
|
||||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||||
if f.closing {
|
if f.closing.isSet() {
|
||||||
err = ErrFileClosed
|
err = ErrFileClosed
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
15
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
15
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
@ -13,19 +13,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
||||||
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
||||||
//sys createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
|
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
|
||||||
//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW
|
//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW
|
||||||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||||
//sys copyMemory(dst uintptr, src uintptr, length uint32) = RtlCopyMemory
|
|
||||||
|
|
||||||
type securityAttributes struct {
|
|
||||||
Length uint32
|
|
||||||
SecurityDescriptor uintptr
|
|
||||||
InheritHandle uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
cERROR_PIPE_BUSY = syscall.Errno(231)
|
cERROR_PIPE_BUSY = syscall.Errno(231)
|
||||||
@ -233,13 +226,13 @@ func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig,
|
|||||||
mode |= cPIPE_TYPE_MESSAGE
|
mode |= cPIPE_TYPE_MESSAGE
|
||||||
}
|
}
|
||||||
|
|
||||||
sa := &securityAttributes{}
|
sa := &syscall.SecurityAttributes{}
|
||||||
sa.Length = uint32(unsafe.Sizeof(*sa))
|
sa.Length = uint32(unsafe.Sizeof(*sa))
|
||||||
if securityDescriptor != nil {
|
if securityDescriptor != nil {
|
||||||
len := uint32(len(securityDescriptor))
|
len := uint32(len(securityDescriptor))
|
||||||
sa.SecurityDescriptor = localAlloc(0, len)
|
sa.SecurityDescriptor = localAlloc(0, len)
|
||||||
defer localFree(sa.SecurityDescriptor)
|
defer localFree(sa.SecurityDescriptor)
|
||||||
copyMemory(sa.SecurityDescriptor, uintptr(unsafe.Pointer(&securityDescriptor[0])), len)
|
copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor)
|
||||||
}
|
}
|
||||||
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
|
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
14
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
14
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
@ -53,7 +53,6 @@ var (
|
|||||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
||||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
||||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||||
procRtlCopyMemory = modkernel32.NewProc("RtlCopyMemory")
|
|
||||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
||||||
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
||||||
@ -141,7 +140,7 @@ func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) {
|
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||||
var _p0 *uint16
|
var _p0 *uint16
|
||||||
_p0, err = syscall.UTF16PtrFromString(name)
|
_p0, err = syscall.UTF16PtrFromString(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -150,7 +149,7 @@ func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances ui
|
|||||||
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
|
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
|
||||||
}
|
}
|
||||||
|
|
||||||
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) {
|
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||||
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
|
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
|
||||||
handle = syscall.Handle(r0)
|
handle = syscall.Handle(r0)
|
||||||
if handle == syscall.InvalidHandle {
|
if handle == syscall.InvalidHandle {
|
||||||
@ -163,7 +162,7 @@ func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||||
var _p0 *uint16
|
var _p0 *uint16
|
||||||
_p0, err = syscall.UTF16PtrFromString(name)
|
_p0, err = syscall.UTF16PtrFromString(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -172,7 +171,7 @@ func createFile(name string, access uint32, mode uint32, sa *securityAttributes,
|
|||||||
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
||||||
}
|
}
|
||||||
|
|
||||||
func _createFile(name *uint16, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||||
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
|
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
|
||||||
handle = syscall.Handle(r0)
|
handle = syscall.Handle(r0)
|
||||||
if handle == syscall.InvalidHandle {
|
if handle == syscall.InvalidHandle {
|
||||||
@ -236,11 +235,6 @@ func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyMemory(dst uintptr, src uintptr, length uint32) {
|
|
||||||
syscall.Syscall(procRtlCopyMemory.Addr(), 3, uintptr(dst), uintptr(src), uintptr(length))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||||
var _p0 *uint16
|
var _p0 *uint16
|
||||||
_p0, err = syscall.UTF16PtrFromString(accountName)
|
_p0, err = syscall.UTF16PtrFromString(accountName)
|
||||||
|
10
vendor/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
10
vendor/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
@ -1,10 +0,0 @@
|
|||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.3
|
|
||||||
- 1.4
|
|
||||||
- 1.5
|
|
||||||
- 1.6
|
|
||||||
- tip
|
|
||||||
install:
|
|
||||||
- go get -t ./...
|
|
||||||
script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./...
|
|
41
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
41
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
@ -1,41 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
type JSONFormatter struct {
|
|
||||||
// TimestampFormat sets the format used for marshaling timestamps.
|
|
||||||
TimestampFormat string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
|
||||||
data := make(Fields, len(entry.Data)+3)
|
|
||||||
for k, v := range entry.Data {
|
|
||||||
switch v := v.(type) {
|
|
||||||
case error:
|
|
||||||
// Otherwise errors are ignored by `encoding/json`
|
|
||||||
// https://github.com/Sirupsen/logrus/issues/137
|
|
||||||
data[k] = v.Error()
|
|
||||||
default:
|
|
||||||
data[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
prefixFieldClashes(data)
|
|
||||||
|
|
||||||
timestampFormat := f.TimestampFormat
|
|
||||||
if timestampFormat == "" {
|
|
||||||
timestampFormat = DefaultTimestampFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
data["time"] = entry.Time.Format(timestampFormat)
|
|
||||||
data["msg"] = entry.Message
|
|
||||||
data["level"] = entry.Level.String()
|
|
||||||
|
|
||||||
serialized, err := json.Marshal(data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
|
||||||
}
|
|
||||||
return append(serialized, '\n'), nil
|
|
||||||
}
|
|
15
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
15
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
// +build solaris,!appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
|
||||||
func IsTerminal() bool {
|
|
||||||
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
|
|
||||||
return err == nil
|
|
||||||
}
|
|
27
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
27
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
@ -1,27 +0,0 @@
|
|||||||
// Based on ssh/terminal:
|
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build windows,!appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
|
||||||
|
|
||||||
var (
|
|
||||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
|
||||||
func IsTerminal() bool {
|
|
||||||
fd := syscall.Stderr
|
|
||||||
var st uint32
|
|
||||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
|
||||||
return r != 0 && e == 0
|
|
||||||
}
|
|
37
vendor/github.com/containerd/containerd/.appveyor.yml
generated
vendored
Normal file
37
vendor/github.com/containerd/containerd/.appveyor.yml
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
version: "{build}"
|
||||||
|
|
||||||
|
image: Visual Studio 2017
|
||||||
|
|
||||||
|
clone_folder: c:\gopath\src\github.com\containerd\containerd
|
||||||
|
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
|
||||||
|
environment:
|
||||||
|
GOPATH: C:\gopath
|
||||||
|
CGO_ENABLED: 1
|
||||||
|
|
||||||
|
before_build:
|
||||||
|
- choco install -y mingw
|
||||||
|
# TODO: re-enable once the content unit-test have been updated to pass on windows
|
||||||
|
#- choco install codecov
|
||||||
|
|
||||||
|
build_script:
|
||||||
|
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe fmt"
|
||||||
|
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe vet"
|
||||||
|
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:$PATH ; mingw32-make.exe build"
|
||||||
|
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:$PATH ; mingw32-make.exe binaries"
|
||||||
|
|
||||||
|
test_script:
|
||||||
|
# TODO: need an equivalent of TRAVIS_COMMIT_RANGE
|
||||||
|
# - GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" C:\MinGW\bin\mingw32-make.exe dco
|
||||||
|
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe integration"
|
||||||
|
# TODO: re-enable once the content unit-test have been updated to pass on windows
|
||||||
|
#- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe coverage"
|
||||||
|
#- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe root-coverage"
|
||||||
|
|
||||||
|
on_success:
|
||||||
|
# Note that, a Codecov upload token is not required.
|
||||||
|
# TODO: re-enable once the content unit-test have been updated to pass on windows
|
||||||
|
#- codecov -f coverage.txt
|
8
vendor/github.com/containerd/containerd/.travis.yml
generated
vendored
8
vendor/github.com/containerd/containerd/.travis.yml
generated
vendored
@ -32,13 +32,16 @@ env:
|
|||||||
- TRAVIS_GOOS=linux TRAVIS_CGO_ENABLED=1
|
- TRAVIS_GOOS=linux TRAVIS_CGO_ENABLED=1
|
||||||
- TRAVIS_GOOS=darwin TRAVIS_CGO_ENABLED=0
|
- TRAVIS_GOOS=darwin TRAVIS_CGO_ENABLED=0
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- uname -r
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- if [ "$TRAVIS_GOOS" = "windows" ] ; then sudo apt-get install -y gcc-multilib gcc-mingw-w64; export CC=x86_64-w64-mingw32-gcc ; export CXX=x86_64-w64-mingw32-g++ ; fi
|
- if [ "$TRAVIS_GOOS" = "windows" ] ; then sudo apt-get install -y gcc-multilib gcc-mingw-w64; export CC=x86_64-w64-mingw32-gcc ; export CXX=x86_64-w64-mingw32-g++ ; fi
|
||||||
- wget https://github.com/google/protobuf/releases/download/v3.1.0/protoc-3.1.0-linux-x86_64.zip -O /tmp/protoc-3.1.0-linux-x86_64.zip
|
- wget https://github.com/google/protobuf/releases/download/v3.1.0/protoc-3.1.0-linux-x86_64.zip -O /tmp/protoc-3.1.0-linux-x86_64.zip
|
||||||
- unzip -o -d /tmp/protobuf /tmp/protoc-3.1.0-linux-x86_64.zip
|
- unzip -o -d /tmp/protobuf /tmp/protoc-3.1.0-linux-x86_64.zip
|
||||||
- export PATH=$PATH:/tmp/protobuf/bin/
|
- export PATH=$PATH:/tmp/protobuf/bin/
|
||||||
- go get -u github.com/vbatts/git-validation
|
- go get -u github.com/vbatts/git-validation
|
||||||
- sudo wget https://github.com/crosbymichael/runc/releases/download/ctd-1/runc -O /bin/runc; sudo chmod +x /bin/runc
|
- sudo wget https://github.com/crosbymichael/runc/releases/download/ctd-4/runc -O /bin/runc; sudo chmod +x /bin/runc
|
||||||
- wget https://github.com/xemul/criu/archive/v3.0.tar.gz -O /tmp/criu.tar.gz
|
- wget https://github.com/xemul/criu/archive/v3.0.tar.gz -O /tmp/criu.tar.gz
|
||||||
- tar -C /tmp/ -zxf /tmp/criu.tar.gz
|
- tar -C /tmp/ -zxf /tmp/criu.tar.gz
|
||||||
- cd /tmp/criu-3.0 && sudo make install-criu
|
- cd /tmp/criu-3.0 && sudo make install-criu
|
||||||
@ -49,6 +52,9 @@ script:
|
|||||||
- export CGO_ENABLED=$TRAVIS_CGO_ENABLED
|
- export CGO_ENABLED=$TRAVIS_CGO_ENABLED
|
||||||
- GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" make dco
|
- GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" make dco
|
||||||
- make fmt
|
- make fmt
|
||||||
|
# FIXME: For non-linux GOOS, without running `go build -i`, vet fails with `vet: import failed: can't find import: fmt`...
|
||||||
|
# Note that `go build -i` requires write permission to GOROOT. (So it is not called in Makefile)
|
||||||
|
- go build -i .
|
||||||
- make vet
|
- make vet
|
||||||
- make build
|
- make build
|
||||||
- make binaries
|
- make binaries
|
||||||
|
2
vendor/github.com/containerd/containerd/BUILDING.md
generated
vendored
2
vendor/github.com/containerd/containerd/BUILDING.md
generated
vendored
@ -9,6 +9,8 @@ In first you need to setup your Go development environment. You can follow this
|
|||||||
guideline [How to write go code](https://golang.org/doc/code.html) and at the
|
guideline [How to write go code](https://golang.org/doc/code.html) and at the
|
||||||
end you need to have `GOPATH` and `GOROOT` set in your environment.
|
end you need to have `GOPATH` and `GOROOT` set in your environment.
|
||||||
|
|
||||||
|
Current containerd requires Go 1.8.x or above.
|
||||||
|
|
||||||
At this point you can use `go` to checkout `containerd` in your `GOPATH`:
|
At this point you can use `go` to checkout `containerd` in your `GOPATH`:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
57
vendor/github.com/containerd/containerd/Makefile
generated
vendored
57
vendor/github.com/containerd/containerd/Makefile
generated
vendored
@ -8,27 +8,33 @@ DESTDIR=/usr/local
|
|||||||
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
|
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
|
||||||
REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
|
REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
|
||||||
|
|
||||||
PKG=github.com/containerd/containerd
|
|
||||||
|
|
||||||
ifneq "$(strip $(shell command -v go 2>/dev/null))" ""
|
ifneq "$(strip $(shell command -v go 2>/dev/null))" ""
|
||||||
GOOS ?= $(shell go env GOOS)
|
GOOS ?= $(shell go env GOOS)
|
||||||
else
|
else
|
||||||
GOOS ?= $$GOOS
|
GOOS ?= $$GOOS
|
||||||
endif
|
endif
|
||||||
WHALE = "🐳"
|
|
||||||
|
WHALE = "🇩"
|
||||||
ONI = "👹"
|
ONI = "👹"
|
||||||
|
FIX_PATH = $1
|
||||||
ifeq ("$(OS)", "Windows_NT")
|
ifeq ("$(OS)", "Windows_NT")
|
||||||
WHALE="+"
|
WHALE="+"
|
||||||
ONI="-"
|
ONI="-"
|
||||||
|
FIX_PATH = $(subst /,\,$1)
|
||||||
endif
|
endif
|
||||||
|
GOARCH ?= $(shell go env GOARCH)
|
||||||
|
|
||||||
|
RELEASE=containerd-$(VERSION:v%=%).${GOOS}-${GOARCH}
|
||||||
|
|
||||||
|
PKG=github.com/containerd/containerd
|
||||||
|
|
||||||
# Project packages.
|
# Project packages.
|
||||||
PACKAGES=$(shell go list ./... | grep -v /vendor/)
|
PACKAGES=$(shell go list ./... | grep -v /vendor/)
|
||||||
INTEGRATION_PACKAGE=${PKG}/integration
|
INTEGRATION_PACKAGE=${PKG}
|
||||||
SNAPSHOT_PACKAGES=$(shell go list ./snapshot/...)
|
TEST_REQUIRES_ROOT_PACKAGES=$(shell for f in $$(git grep -l testutil.RequiresRoot | grep -v Makefile);do echo "${PKG}/$$(dirname $$f)"; done)
|
||||||
|
|
||||||
# Project binaries.
|
# Project binaries.
|
||||||
COMMANDS=ctr containerd protoc-gen-gogoctrd dist ctrd-protobuild
|
COMMANDS=ctr containerd
|
||||||
ifneq ("$(GOOS)", "windows")
|
ifneq ("$(GOOS)", "windows")
|
||||||
COMMANDS += containerd-shim
|
COMMANDS += containerd-shim
|
||||||
endif
|
endif
|
||||||
@ -41,9 +47,9 @@ GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",)
|
|||||||
GO_LDFLAGS=-ldflags "-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)"
|
GO_LDFLAGS=-ldflags "-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)"
|
||||||
|
|
||||||
# Flags passed to `go test`
|
# Flags passed to `go test`
|
||||||
TESTFLAGS ?=-parallel 8 -race
|
TESTFLAGS ?=-parallel 8 -race -v
|
||||||
|
|
||||||
.PHONY: clean all AUTHORS fmt vet lint dco build binaries test integration setup generate protos checkprotos coverage ci check help install uninstall vendor
|
.PHONY: clean all AUTHORS fmt vet lint dco build binaries test integration setup generate protos checkprotos coverage ci check help install uninstall vendor release
|
||||||
.DEFAULT: default
|
.DEFAULT: default
|
||||||
|
|
||||||
all: binaries
|
all: binaries
|
||||||
@ -61,14 +67,15 @@ setup: ## install dependencies
|
|||||||
@go get -u github.com/golang/lint/golint
|
@go get -u github.com/golang/lint/golint
|
||||||
#@go get -u github.com/kisielk/errcheck
|
#@go get -u github.com/kisielk/errcheck
|
||||||
@go get -u github.com/gordonklaus/ineffassign
|
@go get -u github.com/gordonklaus/ineffassign
|
||||||
|
@go get -u github.com/stevvooe/protobuild
|
||||||
|
|
||||||
generate: protos
|
generate: protos
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@PATH=${ROOTDIR}/bin:${PATH} go generate -x ${PACKAGES}
|
@PATH=${ROOTDIR}/bin:${PATH} go generate -x ${PACKAGES}
|
||||||
|
|
||||||
protos: bin/protoc-gen-gogoctrd bin/ctrd-protobuild ## generate protobuf
|
protos: bin/protoc-gen-gogoctrd ## generate protobuf
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@PATH=${ROOTDIR}/bin:${PATH} ctrd-protobuild ${PACKAGES}
|
@PATH=${ROOTDIR}/bin:${PATH} protobuild ${PACKAGES}
|
||||||
|
|
||||||
checkprotos: protos ## check if protobufs needs to be generated again
|
checkprotos: protos ## check if protobufs needs to be generated again
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@ -84,7 +91,7 @@ vet: binaries ## run go vet
|
|||||||
|
|
||||||
fmt: ## run go fmt
|
fmt: ## run go fmt
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@test -z "$$(gofmt -s -l . | grep -v vendor/ | grep -v ".pb.go$$" | tee /dev/stderr)" || \
|
@test -z "$$(gofmt -s -l . | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go$$" | tee /dev/stderr)" || \
|
||||||
(echo "$(ONI) please format Go code with 'gofmt -s -w'" && false)
|
(echo "$(ONI) please format Go code with 'gofmt -s -w'" && false)
|
||||||
@test -z "$$(find . -path ./vendor -prune -o ! -name timestamp.proto ! -name duration.proto -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \
|
@test -z "$$(find . -path ./vendor -prune -o ! -name timestamp.proto ! -name duration.proto -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \
|
||||||
(echo "$(ONI) please indent proto files with tabs only" && false)
|
(echo "$(ONI) please indent proto files with tabs only" && false)
|
||||||
@ -93,7 +100,7 @@ fmt: ## run go fmt
|
|||||||
|
|
||||||
lint: ## run go lint
|
lint: ## run go lint
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@test -z "$$(golint ./... | grep -v vendor/ | grep -v ".pb.go:" | tee /dev/stderr)"
|
@test -z "$$(golint ./... | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go:" | tee /dev/stderr)"
|
||||||
|
|
||||||
dco: ## dco check
|
dco: ## dco check
|
||||||
@which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found" && false)
|
@which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found" && false)
|
||||||
@ -105,15 +112,15 @@ endif
|
|||||||
|
|
||||||
ineffassign: ## run ineffassign
|
ineffassign: ## run ineffassign
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@test -z "$$(ineffassign . | grep -v vendor/ | grep -v ".pb.go:" | tee /dev/stderr)"
|
@test -z "$$(ineffassign . | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go:" | tee /dev/stderr)"
|
||||||
|
|
||||||
#errcheck: ## run go errcheck
|
#errcheck: ## run go errcheck
|
||||||
# @echo "$(WHALE) $@"
|
# @echo "$(WHALE) $@"
|
||||||
# @test -z "$$(errcheck ./... | grep -v vendor/ | grep -v ".pb.go:" | tee /dev/stderr)"
|
# @test -z "$$(errcheck ./... | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go:" | tee /dev/stderr)"
|
||||||
|
|
||||||
build: ## build the go packages
|
build: ## build the go packages
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@go build -i -v ${EXTRA_FLAGS} ${GO_LDFLAGS} ${GO_GCFLAGS} ${PACKAGES}
|
@go build -v ${EXTRA_FLAGS} ${GO_LDFLAGS} ${GO_GCFLAGS} ${PACKAGES}
|
||||||
|
|
||||||
test: ## run tests, except integration tests and tests that require root
|
test: ## run tests, except integration tests and tests that require root
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@ -121,22 +128,32 @@ test: ## run tests, except integration tests and tests that require root
|
|||||||
|
|
||||||
root-test: ## run tests, except integration tests
|
root-test: ## run tests, except integration tests
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@go test ${TESTFLAGS} ${SNAPSHOT_PACKAGES} -test.root
|
@go test ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${TEST_REQUIRES_ROOT_PACKAGES}) -test.root
|
||||||
|
|
||||||
integration: ## run integration tests
|
integration: ## run integration tests
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@go test ${TESTFLAGS}
|
@go test ${TESTFLAGS} -test.root
|
||||||
|
|
||||||
|
benchmark: ## run benchmarks tests
|
||||||
|
@echo "$(WHALE) $@"
|
||||||
|
@go test ${TESTFLAGS} -bench . -run Benchmark -test.root
|
||||||
|
|
||||||
FORCE:
|
FORCE:
|
||||||
|
|
||||||
# Build a binary from a cmd.
|
# Build a binary from a cmd.
|
||||||
bin/%: cmd/% FORCE
|
bin/%: cmd/% FORCE
|
||||||
@echo "$(WHALE) $@${BINARY_SUFFIX}"
|
@echo "$(WHALE) $@${BINARY_SUFFIX}"
|
||||||
@go build -i -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ${GO_GCFLAGS} ./$<
|
@go build -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ${GO_GCFLAGS} ./$<
|
||||||
|
|
||||||
binaries: $(BINARIES) ## build binaries
|
binaries: $(BINARIES) ## build binaries
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
|
|
||||||
|
release: $(BINARIES)
|
||||||
|
@echo "$(WHALE) $@"
|
||||||
|
@mkdir -p releases/${RELEASE}
|
||||||
|
@cp $(BINARIES) releases/$(RELEASE)/
|
||||||
|
@cd releases/$(RELEASE) && tar -czf ../$(RELEASE).tar.gz *
|
||||||
|
|
||||||
clean: ## clean up binaries
|
clean: ## clean up binaries
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@rm -f $(BINARIES)
|
@rm -f $(BINARIES)
|
||||||
@ -169,14 +186,14 @@ coverage: ## generate coverprofiles from the unit tests, except tests that requi
|
|||||||
|
|
||||||
root-coverage: ## generae coverage profiles for the unit tests
|
root-coverage: ## generae coverage profiles for the unit tests
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@( for pkg in ${SNAPSHOT_PACKAGES}; do \
|
@( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${TEST_REQUIRES_ROOT_PACKAGES}); do \
|
||||||
go test -i ${TESTFLAGS} -test.short -coverprofile="../../../$$pkg/coverage.txt" -covermode=atomic $$pkg -test.root || exit; \
|
go test -i ${TESTFLAGS} -test.short -coverprofile="../../../$$pkg/coverage.txt" -covermode=atomic $$pkg -test.root || exit; \
|
||||||
go test ${TESTFLAGS} -test.short -coverprofile="../../../$$pkg/coverage.txt" -covermode=atomic $$pkg -test.root || exit; \
|
go test ${TESTFLAGS} -test.short -coverprofile="../../../$$pkg/coverage.txt" -covermode=atomic $$pkg -test.root || exit; \
|
||||||
done )
|
done )
|
||||||
|
|
||||||
coverage-integration: ## generate coverprofiles from the integration tests
|
coverage-integration: ## generate coverprofiles from the integration tests
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
go test ${TESTFLAGS} -test.short -coverprofile="../../../${INTEGRATION_PACKAGE}/coverage.txt" -covermode=atomic ${INTEGRATION_PACKAGE}
|
go test ${TESTFLAGS} -test.short -coverprofile="../../../${INTEGRATION_PACKAGE}/coverage.txt" -covermode=atomic ${INTEGRATION_PACKAGE} -test.root
|
||||||
|
|
||||||
vendor:
|
vendor:
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
|
29
vendor/github.com/containerd/containerd/Protobuild.toml
generated
vendored
Normal file
29
vendor/github.com/containerd/containerd/Protobuild.toml
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
version = "unstable"
|
||||||
|
generator = "gogoctrd"
|
||||||
|
plugins = ["grpc"]
|
||||||
|
|
||||||
|
# Control protoc include paths. Below are usually some good defaults, but feel
|
||||||
|
# free to try it without them if it works for your project.
|
||||||
|
[includes]
|
||||||
|
# Include paths that will be added before all others. Typically, you want to
|
||||||
|
# treat the root of the project as an include, but this may not be necessary.
|
||||||
|
before = ["."]
|
||||||
|
|
||||||
|
# Paths that should be treated as include roots in relation to the vendor
|
||||||
|
# directory. These will be calculated with the vendor directory nearest the
|
||||||
|
# target package.
|
||||||
|
vendored = ["github.com/gogo/protobuf"]
|
||||||
|
|
||||||
|
# Paths that will be added untouched to the end of the includes. We use
|
||||||
|
# `/usr/local/include` to pickup the common install location of protobuf.
|
||||||
|
# This is the default.
|
||||||
|
after = ["/usr/local/include"]
|
||||||
|
|
||||||
|
# This section maps protobuf imports to Go packages. These will become
|
||||||
|
# `-M` directives in the call to the go protobuf generator.
|
||||||
|
[packages]
|
||||||
|
"gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto"
|
||||||
|
"google/protobuf/any.proto" = "github.com/gogo/protobuf/types"
|
||||||
|
"google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||||
|
"google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types"
|
||||||
|
"google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types"
|
221
vendor/github.com/containerd/containerd/README.md
generated
vendored
221
vendor/github.com/containerd/containerd/README.md
generated
vendored
@ -1,26 +1,147 @@
|
|||||||

|

|
||||||
|
|
||||||
|
[](https://godoc.org/github.com/containerd/containerd)
|
||||||
[](https://travis-ci.org/containerd/containerd)
|
[](https://travis-ci.org/containerd/containerd)
|
||||||
[](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
|
[](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
|
||||||
|
[](https://goreportcard.com/report/github.com/containerd/containerd)
|
||||||
|
|
||||||
containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc..
|
containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc.
|
||||||
|
|
||||||
containerd is designed to be embedded into a larger system, rather than being used directly by developers or end-users.
|
containerd is designed to be embedded into a larger system, rather than being used directly by developers or end-users.
|
||||||
|
|
||||||
### State of the Project
|
## Features
|
||||||
|
|
||||||
containerd currently has two active branches.
|
### Client
|
||||||
There is a [v0.2.x](https://github.com/containerd/containerd/tree/v0.2.x) branch for the current release of containerd that is being consumed by Docker and others and the master branch is the development branch for the 1.0 roadmap and feature set.
|
|
||||||
Any PR or issue that is intended for the current v0.2.x release should be tagged with the same `v0.2.x` tag.
|
|
||||||
|
|
||||||
### Communication
|
containerd offers a full client package to help you integrate containerd into your platform.
|
||||||
|
|
||||||
For async communication and long running discussions please use issues and pull requests on the github repo.
|
```go
|
||||||
This will be the best place to discuss design and implementation.
|
|
||||||
|
|
||||||
For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development.
|
import "github.com/containerd/containerd"
|
||||||
|
|
||||||
**Slack:** https://dockr.ly/community
|
func main() {
|
||||||
|
client, err := containerd.New("/run/containerd/containerd.sock")
|
||||||
|
defer client.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Namespaces
|
||||||
|
|
||||||
|
Namespaces allow multiple consumers to use the same containerd without conflicting with each other. It has the benefit of sharing content but still having separation with containers and images.
|
||||||
|
|
||||||
|
To set a namespace for requests to the API:
|
||||||
|
|
||||||
|
```go
|
||||||
|
context = context.Background()
|
||||||
|
// create a context for docker
|
||||||
|
docker = namespaces.WithNamespace(context, "docker")
|
||||||
|
|
||||||
|
containerd, err := client.NewContainer(docker, "id")
|
||||||
|
```
|
||||||
|
|
||||||
|
To set a default namespace on the client:
|
||||||
|
|
||||||
|
```go
|
||||||
|
client, err := containerd.New(address, containerd.WithDefaultNamespace("docker"))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Distribution
|
||||||
|
|
||||||
|
```go
|
||||||
|
// pull an image
|
||||||
|
image, err := client.Pull(context, "docker.io/library/redis:latest")
|
||||||
|
|
||||||
|
// push an image
|
||||||
|
err := client.Push(context, "docker.io/library/redis:latest", image.Target())
|
||||||
|
```
|
||||||
|
|
||||||
|
### OCI Runtime Specification
|
||||||
|
|
||||||
|
containerd fully supports the OCI runtime specification for running containers. We have built in functions to help you generate runtime specifications based on images as well as custom parameters.
|
||||||
|
|
||||||
|
```go
|
||||||
|
spec, err := containerd.GenerateSpec(containerd.WithImageConfig(context, image))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Containers
|
||||||
|
|
||||||
|
In containerd, a container is a metadata object. Resources such as an OCI runtime specification, image, root filesystem, and other metadata can be attached to a container.
|
||||||
|
|
||||||
|
```go
|
||||||
|
redis, err := client.NewContainer(context, "redis-master",
|
||||||
|
containerd.WithSpec(spec),
|
||||||
|
)
|
||||||
|
defer redis.Delete(context)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Root Filesystems
|
||||||
|
|
||||||
|
containerd allows you to use overlay or snapshot filesystems with your containers. It comes with builtin support for overlayfs and btrfs.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// pull an image and unpack it into the configured snapshotter
|
||||||
|
image, err := client.Pull(context, "docker.io/library/redis:latest", containerd.WithPullUnpack)
|
||||||
|
|
||||||
|
// allocate a new RW root filesystem for a container based on the image
|
||||||
|
redis, err := client.NewContainer(context, "redis-master",
|
||||||
|
containerd.WithSpec(spec),
|
||||||
|
containerd.WithNewSnapshot("redis-rootfs", image),
|
||||||
|
)
|
||||||
|
|
||||||
|
// use a readonly filesystem with multiple containers
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
id := fmt.Sprintf("id-%s", i)
|
||||||
|
container, err := client.NewContainer(ctx, id,
|
||||||
|
containerd.WithSpec(spec),
|
||||||
|
containerd.WithNewSnapshotView(id, image),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Tasks
|
||||||
|
|
||||||
|
Taking a container object and turning it into a runnable process on a system is done by creating a new `Task` from the container. A task represents the runnable object within containerd.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// create a new task
|
||||||
|
task, err := redis.NewTask(context, containerd.Stdio)
|
||||||
|
defer task.Delete(context)
|
||||||
|
|
||||||
|
// the task is now running and has a pid that can be use to setup networking
|
||||||
|
// or other runtime settings outside of containerd
|
||||||
|
pid := task.Pid()
|
||||||
|
|
||||||
|
// start the redis-server process inside the container
|
||||||
|
err := task.Start(context)
|
||||||
|
|
||||||
|
// wait for the task to exit and get the exit status
|
||||||
|
status, err := task.Wait(context)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Checkpoint and Restore
|
||||||
|
|
||||||
|
If you have [criu](https://criu.org/Main_Page) installed on your machine you can checkpoint and restore containers and their tasks. This allow you to clone and/or live migrate containers to other machines.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// checkpoint the task then push it to a registry
|
||||||
|
checkpoint, err := task.Checkpoint(context, containerd.WithExit)
|
||||||
|
|
||||||
|
err := client.Push(context, "myregistry/checkpoints/redis:master", checkpoint)
|
||||||
|
|
||||||
|
// on a new machine pull the checkpoint and restore the redis container
|
||||||
|
image, err := client.Pull(context, "myregistry/checkpoints/redis:master")
|
||||||
|
|
||||||
|
checkpoint := image.Target()
|
||||||
|
|
||||||
|
redis, err = client.NewContainer(context, "redis-master", containerd.WithCheckpoint(checkpoint, "redis-rootfs"))
|
||||||
|
defer container.Delete(context)
|
||||||
|
|
||||||
|
task, err = redis.NewTask(context, containerd.Stdio, containerd.WithTaskCheckpoint(checkpoint))
|
||||||
|
defer task.Delete(context)
|
||||||
|
|
||||||
|
err := task.Start(context)
|
||||||
|
```
|
||||||
|
|
||||||
### Developer Quick-Start
|
### Developer Quick-Start
|
||||||
|
|
||||||
@ -47,44 +168,6 @@ Vendoring of external imports uses the [`vndr` tool](https://github.com/LK4D4/vn
|
|||||||
|
|
||||||
Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
|
Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
|
||||||
|
|
||||||
## Features
|
|
||||||
|
|
||||||
* OCI Image Spec support
|
|
||||||
* OCI Runtime Spec support
|
|
||||||
* Image push and pull support
|
|
||||||
* Container runtime and lifecycle support
|
|
||||||
* Management of network namespaces containers to join existing namespaces
|
|
||||||
* Multi-tenant supported with CAS storage for global images
|
|
||||||
|
|
||||||
## Scope and Principles
|
|
||||||
|
|
||||||
Having a clearly defined scope of a project is important for ensuring consistency and focus.
|
|
||||||
These following criteria will be used when reviewing pull requests, features, and changes for the project before being accepted.
|
|
||||||
|
|
||||||
### Components
|
|
||||||
|
|
||||||
Components should not have tight dependencies on each other so that they are able to be used independently.
|
|
||||||
The APIs for images and containers should be designed in a way that when used together the components have a natural flow but still be useful independently.
|
|
||||||
|
|
||||||
An example for this design can be seen with the overlay filesystems and the container execution layer.
|
|
||||||
The execution layer and overlay filesystems can be used independently but if you were to use both, they share a common `Mount` struct that the filesystems produce and the execution layer consumes.
|
|
||||||
|
|
||||||
### Primitives
|
|
||||||
|
|
||||||
containerd should expose primitives to solve problems instead of building high level abstractions in the API.
|
|
||||||
A common example of this is how build would be implemented.
|
|
||||||
Instead of having a build API in containerd we should expose the lower level primitives that allow things required in build to work.
|
|
||||||
Breaking up the filesystem APIs to allow snapshots, copy functionality, and mounts allow people implementing build at the higher levels more flexibility.
|
|
||||||
|
|
||||||
### Extensibility and Defaults
|
|
||||||
|
|
||||||
For the various components in containerd there should be defined extension points where implementations can be swapped for alternatives.
|
|
||||||
The best example of this is that containerd will use `runc` from OCI as the default runtime in the execution layer but other runtimes conforming to the OCI Runtime specification they can be easily added to containerd.
|
|
||||||
|
|
||||||
containerd will come with a default implementation for the various components.
|
|
||||||
These defaults will be chosen by the maintainers of the project and should not change unless better tech for that component comes out.
|
|
||||||
Additional implementations will not be accepted into the core repository and should be developed in a separate repository not maintained by the containerd maintainers.
|
|
||||||
|
|
||||||
### Releases
|
### Releases
|
||||||
|
|
||||||
containerd will be released with a 1.0 when feature complete and this version will be supported for 1 year with security and bug fixes applied and released.
|
containerd will be released with a 1.0 when feature complete and this version will be supported for 1 year with security and bug fixes applied and released.
|
||||||
@ -96,45 +179,23 @@ There is no compatibility guarantees with upgrades from two minor releases. i.e.
|
|||||||
There are not backwards compatibility guarantees with upgrades to major versions. i.e 1.0.0 to 2.0.0.
|
There are not backwards compatibility guarantees with upgrades to major versions. i.e 1.0.0 to 2.0.0.
|
||||||
Each major version will be supported for 1 year with bug fixes and security patches.
|
Each major version will be supported for 1 year with bug fixes and security patches.
|
||||||
|
|
||||||
### Scope
|
|
||||||
|
|
||||||
The following table specifies the various components of containerd and general features of container runtimes.
|
|
||||||
The table specifies whether or not the feature/component is in or out of scope.
|
|
||||||
|
|
||||||
| Name | Description | In/Out | Reason |
|
|
||||||
|------------------------------|--------------------------------------------------------------------------------------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| execution | Provide an extensible execution layer for executing a container | in | Create,start, stop pause, resume exec, signal, delete |
|
|
||||||
| cow filesystem | Built in functionality for overlay, aufs, and other copy on write filesystems for containers | in | |
|
|
||||||
| distribution | Having the ability to push and pull images as well as operations on images as a first class API object | in | containerd will fully support the management and retrieval of images |
|
|
||||||
| metrics | container-level metrics, cgroup stats, and OOM events | in |
|
|
||||||
| networking | creation and management of network interfaces | out | Networking will be handled and provided to containerd via higher level systems. |
|
|
||||||
| build | Building images as a first class API | out | Build is a higher level tooling feature and can be implemented in many different ways on top of containerd |
|
|
||||||
| volumes | Volume management for external data | out | The API supports mounts, binds, etc where all volumes type systems can be built on top of containerd. |
|
|
||||||
| logging | Persisting container logs | out | Logging can be build on top of containerd because the container’s STDIO will be provided to the clients and they can persist any way they see fit. There is no io copying of container STDIO in containerd. |
|
|
||||||
|
|
||||||
|
|
||||||
containerd is scoped to a single host and makes assumptions based on that fact.
|
|
||||||
It can be used to build things like a node agent that launches containers but does not have any concepts of a distributed system.
|
|
||||||
|
|
||||||
containerd is designed to be embedded into a larger system, hence it only includes a barebone CLI (`ctr`) specifically for development and debugging purpose, with no mandate to be human-friendly, and no guarantee of interface stability over time.
|
|
||||||
|
|
||||||
Also things like service discovery are out of scope even though networking is in scope.
|
|
||||||
containerd should provide the primitives to create, add, remove, or manage network interfaces and network namespaces for a container but IP allocation, discovery, and DNS should be handled at higher layers.
|
|
||||||
|
|
||||||
### How is the scope changed?
|
|
||||||
|
|
||||||
The scope of this project is a whitelist.
|
|
||||||
If it's not mentioned as being in scope, it is out of scope.
|
|
||||||
For the scope of this project to change it requires a 100% vote from all maintainers of the project.
|
|
||||||
|
|
||||||
### Development reports.
|
### Development reports.
|
||||||
|
|
||||||
Weekly summary on the progress and what is being worked on.
|
Weekly summary on the progress and what is being worked on.
|
||||||
https://github.com/containerd/containerd/tree/master/reports
|
https://github.com/containerd/containerd/tree/master/reports
|
||||||
|
|
||||||
|
### Communication
|
||||||
|
|
||||||
|
For async communication and long running discussions please use issues and pull requests on the github repo.
|
||||||
|
This will be the best place to discuss design and implementation.
|
||||||
|
|
||||||
|
For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development.
|
||||||
|
|
||||||
|
**Slack:** https://dockr.ly/community
|
||||||
|
|
||||||
## Copyright and license
|
## Copyright and license
|
||||||
|
|
||||||
Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code
|
Copyright ©2016-2017 Docker, Inc. All rights reserved, except as follows. Code
|
||||||
is released under the Apache 2.0 license. The README.md file, and files in the
|
is released under the Apache 2.0 license. The README.md file, and files in the
|
||||||
"docs" folder are licensed under the Creative Commons Attribution 4.0
|
"docs" folder are licensed under the Creative Commons Attribution 4.0
|
||||||
International License under the terms and conditions set forth in the file
|
International License under the terms and conditions set forth in the file
|
||||||
|
79
vendor/github.com/containerd/containerd/ROADMAP.md
generated
vendored
79
vendor/github.com/containerd/containerd/ROADMAP.md
generated
vendored
@ -1,78 +1,3 @@
|
|||||||
# containerd roadmap
|
# containerd Roadmap
|
||||||
|
|
||||||
This is a high level roadmap for the project that outlines what is currently being worked on, what comes next, and where you can help.
|
Please review the milestones on [github](https://github.com/containerd/containerd/milestones) for the updated roadmap and release information.
|
||||||
|
|
||||||
For a more up to date look please review the milestones on [github](https://github.com/containerd/containerd/milestones).
|
|
||||||
|
|
||||||
The following are the different status the various phases of development can be in:
|
|
||||||
* Not Started - no work or thinking has been done towards the goal
|
|
||||||
* In Design - design work has started for the component and you can find design documents in the `design` folder
|
|
||||||
* In Progress - design has mostly finished and development has started
|
|
||||||
* Completed - the development work has been completed
|
|
||||||
* Stable - the apis for the phase are feature complete and considered stable
|
|
||||||
|
|
||||||
We would like to follow the roadmap and develop the components one by one to completion before starting the next phase. If PRs are opened for another phase before the previous phase has been completed they will be closed as we are not ready for them at that time.
|
|
||||||
|
|
||||||
## Phase 1
|
|
||||||
|
|
||||||
**Status:** In Progress
|
|
||||||
|
|
||||||
### GRPC API
|
|
||||||
|
|
||||||
**Documents:**
|
|
||||||
|
|
||||||
We are going from a top down design for filling out this missing pieces of containerd and design of the API.
|
|
||||||
|
|
||||||
### Design
|
|
||||||
|
|
||||||
**Documents:**
|
|
||||||
|
|
||||||
The high level design work is needed so that the architecture of containerd stays consistent throughout the development process.
|
|
||||||
|
|
||||||
### Build & Test Process
|
|
||||||
|
|
||||||
**Documents:**
|
|
||||||
|
|
||||||
We need to have a simple build and test process for new developers to bootstrap their environments.
|
|
||||||
Because containerd will be the base of many high level systems we need to have a simple build process that does
|
|
||||||
not require high level tooling.
|
|
||||||
|
|
||||||
## Phase 2
|
|
||||||
|
|
||||||
Phase 2 includes most of the design and development work for the execution and storage layers of containerd.
|
|
||||||
It will include porting over existing "graph drivers" from Docker Engine and finding a common model for representing snapshots for layered filesystems.
|
|
||||||
|
|
||||||
This will also include moving the existing execution code support OCI's Runtime Spec and the existing containerd execution code.
|
|
||||||
|
|
||||||
**Status:** In Design
|
|
||||||
|
|
||||||
### Runtime
|
|
||||||
|
|
||||||
The runtime layer is responsible for the creation of containers and their management, and supervision of the processes inside those containers.
|
|
||||||
|
|
||||||
### Storage
|
|
||||||
|
|
||||||
**Documents:** https://github.com/containerd/containerd/blob/master/design/snapshots.md
|
|
||||||
|
|
||||||
The current graph drivers were built when we only had overlay filesystems like aufs.
|
|
||||||
We forced the model to be designed around overlay filesystems and this introduced a lot of complexity for snapshotting graph drivers like btrfs and devicemapper thin-p.
|
|
||||||
Our current approach is to model our storage layer after snapshotting drivers instead of overlay drivers as we can get the same results and its cleaner and more robust to have an overlay filesytem model snapshots than it is to have a snapshot filesystem model overlay filesystems.
|
|
||||||
|
|
||||||
## Phase 3
|
|
||||||
|
|
||||||
This phase includes getting support for the OCI Image spec built into containerd.
|
|
||||||
|
|
||||||
**Status:** Not Started
|
|
||||||
|
|
||||||
### Distribution
|
|
||||||
|
|
||||||
## Phase 4
|
|
||||||
|
|
||||||
Phase 4 involves graduating to version 1.0, and shifting the focus from features to maintenance. Graduating to 1.0 implies:
|
|
||||||
|
|
||||||
- Completing all of the above phases.
|
|
||||||
- Covering the functionalities required by a majority of container-centric platforms.
|
|
||||||
- Offering feature parity, to the extent of technical possibilities, across Linux and Windows.
|
|
||||||
- Demonstrating that containerd fulfills the requirements of at least one higher-level platforms through its complete integration as an upstream.
|
|
||||||
|
|
||||||
**Status:** Not Started
|
|
||||||
|
2
vendor/github.com/containerd/containerd/RUNC.md
generated
vendored
2
vendor/github.com/containerd/containerd/RUNC.md
generated
vendored
@ -2,7 +2,7 @@ containerd is built with OCI support and with support for advanced features prov
|
|||||||
|
|
||||||
We depend on a specific runc version when dealing with advanced features. You should have a specific build for development. The current supported runc commit is:
|
We depend on a specific runc version when dealing with advanced features. You should have a specific build for development. The current supported runc commit is:
|
||||||
|
|
||||||
RUNC_COMMIT = 50401b5b4c2e01e4f1372b73a021742deeaf4e2d
|
RUNC_COMMIT = e775f0fba3ea329b8b766451c892c41a3d49594d
|
||||||
|
|
||||||
## building
|
## building
|
||||||
|
|
||||||
|
57
vendor/github.com/containerd/containerd/SCOPE.md
generated
vendored
Normal file
57
vendor/github.com/containerd/containerd/SCOPE.md
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
# Scope and Principles
|
||||||
|
|
||||||
|
Having a clearly defined scope of a project is important for ensuring consistency and focus.
|
||||||
|
These following criteria will be used when reviewing pull requests, features, and changes for the project before being accepted.
|
||||||
|
|
||||||
|
### Components
|
||||||
|
|
||||||
|
Components should not have tight dependencies on each other so that they are able to be used independently.
|
||||||
|
The APIs for images and containers should be designed in a way that when used together the components have a natural flow but still be useful independently.
|
||||||
|
|
||||||
|
An example for this design can be seen with the overlay filesystems and the container execution layer.
|
||||||
|
The execution layer and overlay filesystems can be used independently but if you were to use both, they share a common `Mount` struct that the filesystems produce and the execution layer consumes.
|
||||||
|
|
||||||
|
### Primitives
|
||||||
|
|
||||||
|
containerd should expose primitives to solve problems instead of building high level abstractions in the API.
|
||||||
|
A common example of this is how build would be implemented.
|
||||||
|
Instead of having a build API in containerd we should expose the lower level primitives that allow things required in build to work.
|
||||||
|
Breaking up the filesystem APIs to allow snapshots, copy functionality, and mounts allow people implementing build at the higher levels with more flexibility.
|
||||||
|
|
||||||
|
### Extensibility and Defaults
|
||||||
|
|
||||||
|
For the various components in containerd there should be defined extension points where implementations can be swapped for alternatives.
|
||||||
|
The best example of this is that containerd will use `runc` from OCI as the default runtime in the execution layer but other runtimes conforming to the OCI Runtime specification can be easily added to containerd.
|
||||||
|
|
||||||
|
containerd will come with a default implementation for the various components.
|
||||||
|
These defaults will be chosen by the maintainers of the project and should not change unless better tech for that component comes out.
|
||||||
|
Additional implementations will not be accepted into the core repository and should be developed in a separate repository not maintained by the containerd maintainers.
|
||||||
|
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
The following table specifies the various components of containerd and general features of container runtimes.
|
||||||
|
The table specifies whether or not the feature/component is in or out of scope.
|
||||||
|
|
||||||
|
| Name | Description | In/Out | Reason |
|
||||||
|
|------------------------------|--------------------------------------------------------------------------------------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| execution | Provide an extensible execution layer for executing a container | in | Create,start, stop pause, resume exec, signal, delete |
|
||||||
|
| cow filesystem | Built in functionality for overlay, aufs, and other copy on write filesystems for containers | in | |
|
||||||
|
| distribution | Having the ability to push and pull images as well as operations on images as a first class API object | in | containerd will fully support the management and retrieval of images |
|
||||||
|
| metrics | container-level metrics, cgroup stats, and OOM events | in |
|
||||||
|
| networking | creation and management of network interfaces | out | Networking will be handled and provided to containerd via higher level systems. |
|
||||||
|
| build | Building images as a first class API | out | Build is a higher level tooling feature and can be implemented in many different ways on top of containerd |
|
||||||
|
| volumes | Volume management for external data | out | The API supports mounts, binds, etc where all volumes type systems can be built on top of containerd. |
|
||||||
|
| logging | Persisting container logs | out | Logging can be build on top of containerd because the container’s STDIO will be provided to the clients and they can persist any way they see fit. There is no io copying of container STDIO in containerd. |
|
||||||
|
|
||||||
|
|
||||||
|
containerd is scoped to a single host and makes assumptions based on that fact.
|
||||||
|
It can be used to build things like a node agent that launches containers but does not have any concepts of a distributed system.
|
||||||
|
|
||||||
|
containerd is designed to be embedded into a larger system, hence it only includes a barebone CLI (`ctr`) specifically for development and debugging purpose, with no mandate to be human-friendly, and no guarantee of interface stability over time.
|
||||||
|
|
||||||
|
### How is the scope changed?
|
||||||
|
|
||||||
|
The scope of this project is a whitelist.
|
||||||
|
If it's not mentioned as being in scope, it is out of scope.
|
||||||
|
For the scope of this project to change it requires a 100% vote from all maintainers of the project.
|
@ -1,12 +1,12 @@
|
|||||||
// Code generated by protoc-gen-gogo.
|
// Code generated by protoc-gen-gogo.
|
||||||
// source: github.com/containerd/containerd/api/services/containers/containers.proto
|
// source: github.com/containerd/containerd/api/services/containers/v1/containers.proto
|
||||||
// DO NOT EDIT!
|
// DO NOT EDIT!
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package containers is a generated protocol buffer package.
|
Package containers is a generated protocol buffer package.
|
||||||
|
|
||||||
It is generated from these files:
|
It is generated from these files:
|
||||||
github.com/containerd/containerd/api/services/containers/containers.proto
|
github.com/containerd/containerd/api/services/containers/v1/containers.proto
|
||||||
|
|
||||||
It has these top-level messages:
|
It has these top-level messages:
|
||||||
Container
|
Container
|
||||||
@ -30,7 +30,7 @@ import google_protobuf1 "github.com/gogo/protobuf/types"
|
|||||||
import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
|
import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
|
||||||
import google_protobuf3 "github.com/gogo/protobuf/types"
|
import google_protobuf3 "github.com/gogo/protobuf/types"
|
||||||
import _ "github.com/gogo/protobuf/types"
|
import _ "github.com/gogo/protobuf/types"
|
||||||
import _ "github.com/containerd/containerd/api/types/descriptor"
|
import _ "github.com/containerd/containerd/api/types"
|
||||||
|
|
||||||
import time "time"
|
import time "time"
|
||||||
|
|
||||||
@ -75,9 +75,11 @@ type Container struct {
|
|||||||
// If this field is updated, the spec and rootfs needed to updated, as well.
|
// If this field is updated, the spec and rootfs needed to updated, as well.
|
||||||
Image string `protobuf:"bytes,3,opt,name=image,proto3" json:"image,omitempty"`
|
Image string `protobuf:"bytes,3,opt,name=image,proto3" json:"image,omitempty"`
|
||||||
// Runtime specifies which runtime to use for executing this container.
|
// Runtime specifies which runtime to use for executing this container.
|
||||||
Runtime string `protobuf:"bytes,4,opt,name=runtime,proto3" json:"runtime,omitempty"`
|
Runtime *Container_Runtime `protobuf:"bytes,4,opt,name=runtime" json:"runtime,omitempty"`
|
||||||
// Spec to be used when creating the container. This is runtime specific.
|
// Spec to be used when creating the container. This is runtime specific.
|
||||||
Spec *google_protobuf1.Any `protobuf:"bytes,6,opt,name=spec" json:"spec,omitempty"`
|
Spec *google_protobuf1.Any `protobuf:"bytes,5,opt,name=spec" json:"spec,omitempty"`
|
||||||
|
// Snapshotter specifies the snapshotter name used for rootfs
|
||||||
|
Snapshotter string `protobuf:"bytes,6,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
|
||||||
// RootFS specifies the snapshot key to use for the container's root
|
// RootFS specifies the snapshot key to use for the container's root
|
||||||
// filesystem. When starting a task from this container, a caller should
|
// filesystem. When starting a task from this container, a caller should
|
||||||
// look up the mounts from the snapshot service and include those on the
|
// look up the mounts from the snapshot service and include those on the
|
||||||
@ -87,7 +89,9 @@ type Container struct {
|
|||||||
//
|
//
|
||||||
// This field may be updated.
|
// This field may be updated.
|
||||||
RootFS string `protobuf:"bytes,7,opt,name=rootfs,proto3" json:"rootfs,omitempty"`
|
RootFS string `protobuf:"bytes,7,opt,name=rootfs,proto3" json:"rootfs,omitempty"`
|
||||||
|
// CreatedAt is the time the container was first created.
|
||||||
CreatedAt time.Time `protobuf:"bytes,8,opt,name=created_at,json=createdAt,stdtime" json:"created_at"`
|
CreatedAt time.Time `protobuf:"bytes,8,opt,name=created_at,json=createdAt,stdtime" json:"created_at"`
|
||||||
|
// UpdatedAt is the last time the container was mutated.
|
||||||
UpdatedAt time.Time `protobuf:"bytes,9,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
|
UpdatedAt time.Time `protobuf:"bytes,9,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,6 +99,17 @@ func (m *Container) Reset() { *m = Container{} }
|
|||||||
func (*Container) ProtoMessage() {}
|
func (*Container) ProtoMessage() {}
|
||||||
func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{0} }
|
func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{0} }
|
||||||
|
|
||||||
|
type Container_Runtime struct {
|
||||||
|
// Name is the name of the runtime.
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
// Options specify additional runtime initialization options.
|
||||||
|
Options *google_protobuf1.Any `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Container_Runtime) Reset() { *m = Container_Runtime{} }
|
||||||
|
func (*Container_Runtime) ProtoMessage() {}
|
||||||
|
func (*Container_Runtime) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{0, 1} }
|
||||||
|
|
||||||
type GetContainerRequest struct {
|
type GetContainerRequest struct {
|
||||||
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||||
}
|
}
|
||||||
@ -112,7 +127,17 @@ func (*GetContainerResponse) ProtoMessage() {}
|
|||||||
func (*GetContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{2} }
|
func (*GetContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{2} }
|
||||||
|
|
||||||
type ListContainersRequest struct {
|
type ListContainersRequest struct {
|
||||||
Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
|
// Filters contains one or more filters using the syntax defined in the
|
||||||
|
// containerd filter package.
|
||||||
|
//
|
||||||
|
// The returned result will be those that match any of the provided
|
||||||
|
// filters. Expanded, containers that match the following will be
|
||||||
|
// returned:
|
||||||
|
//
|
||||||
|
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||||
|
//
|
||||||
|
// If filters is zero-length or nil, all items will be returned.
|
||||||
|
Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ListContainersRequest) Reset() { *m = ListContainersRequest{} }
|
func (m *ListContainersRequest) Reset() { *m = ListContainersRequest{} }
|
||||||
@ -183,16 +208,17 @@ func (*DeleteContainerRequest) ProtoMessage() {}
|
|||||||
func (*DeleteContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{9} }
|
func (*DeleteContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{9} }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Container)(nil), "containerd.v1.Container")
|
proto.RegisterType((*Container)(nil), "containerd.services.containers.v1.Container")
|
||||||
proto.RegisterType((*GetContainerRequest)(nil), "containerd.v1.GetContainerRequest")
|
proto.RegisterType((*Container_Runtime)(nil), "containerd.services.containers.v1.Container.Runtime")
|
||||||
proto.RegisterType((*GetContainerResponse)(nil), "containerd.v1.GetContainerResponse")
|
proto.RegisterType((*GetContainerRequest)(nil), "containerd.services.containers.v1.GetContainerRequest")
|
||||||
proto.RegisterType((*ListContainersRequest)(nil), "containerd.v1.ListContainersRequest")
|
proto.RegisterType((*GetContainerResponse)(nil), "containerd.services.containers.v1.GetContainerResponse")
|
||||||
proto.RegisterType((*ListContainersResponse)(nil), "containerd.v1.ListContainersResponse")
|
proto.RegisterType((*ListContainersRequest)(nil), "containerd.services.containers.v1.ListContainersRequest")
|
||||||
proto.RegisterType((*CreateContainerRequest)(nil), "containerd.v1.CreateContainerRequest")
|
proto.RegisterType((*ListContainersResponse)(nil), "containerd.services.containers.v1.ListContainersResponse")
|
||||||
proto.RegisterType((*CreateContainerResponse)(nil), "containerd.v1.CreateContainerResponse")
|
proto.RegisterType((*CreateContainerRequest)(nil), "containerd.services.containers.v1.CreateContainerRequest")
|
||||||
proto.RegisterType((*UpdateContainerRequest)(nil), "containerd.v1.UpdateContainerRequest")
|
proto.RegisterType((*CreateContainerResponse)(nil), "containerd.services.containers.v1.CreateContainerResponse")
|
||||||
proto.RegisterType((*UpdateContainerResponse)(nil), "containerd.v1.UpdateContainerResponse")
|
proto.RegisterType((*UpdateContainerRequest)(nil), "containerd.services.containers.v1.UpdateContainerRequest")
|
||||||
proto.RegisterType((*DeleteContainerRequest)(nil), "containerd.v1.DeleteContainerRequest")
|
proto.RegisterType((*UpdateContainerResponse)(nil), "containerd.services.containers.v1.UpdateContainerResponse")
|
||||||
|
proto.RegisterType((*DeleteContainerRequest)(nil), "containerd.services.containers.v1.DeleteContainerRequest")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@ -223,7 +249,7 @@ func NewContainersClient(cc *grpc.ClientConn) ContainersClient {
|
|||||||
|
|
||||||
func (c *containersClient) Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) {
|
func (c *containersClient) Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) {
|
||||||
out := new(GetContainerResponse)
|
out := new(GetContainerResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.Containers/Get", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Get", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -232,7 +258,7 @@ func (c *containersClient) Get(ctx context.Context, in *GetContainerRequest, opt
|
|||||||
|
|
||||||
func (c *containersClient) List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) {
|
func (c *containersClient) List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) {
|
||||||
out := new(ListContainersResponse)
|
out := new(ListContainersResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.Containers/List", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/List", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -241,7 +267,7 @@ func (c *containersClient) List(ctx context.Context, in *ListContainersRequest,
|
|||||||
|
|
||||||
func (c *containersClient) Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) {
|
func (c *containersClient) Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) {
|
||||||
out := new(CreateContainerResponse)
|
out := new(CreateContainerResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.Containers/Create", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Create", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -250,7 +276,7 @@ func (c *containersClient) Create(ctx context.Context, in *CreateContainerReques
|
|||||||
|
|
||||||
func (c *containersClient) Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) {
|
func (c *containersClient) Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) {
|
||||||
out := new(UpdateContainerResponse)
|
out := new(UpdateContainerResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.Containers/Update", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Update", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -259,7 +285,7 @@ func (c *containersClient) Update(ctx context.Context, in *UpdateContainerReques
|
|||||||
|
|
||||||
func (c *containersClient) Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
|
func (c *containersClient) Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
|
||||||
out := new(google_protobuf2.Empty)
|
out := new(google_protobuf2.Empty)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.Containers/Delete", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Delete", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -290,7 +316,7 @@ func _Containers_Get_Handler(srv interface{}, ctx context.Context, dec func(inte
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.Containers/Get",
|
FullMethod: "/containerd.services.containers.v1.Containers/Get",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(ContainersServer).Get(ctx, req.(*GetContainerRequest))
|
return srv.(ContainersServer).Get(ctx, req.(*GetContainerRequest))
|
||||||
@ -308,7 +334,7 @@ func _Containers_List_Handler(srv interface{}, ctx context.Context, dec func(int
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.Containers/List",
|
FullMethod: "/containerd.services.containers.v1.Containers/List",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(ContainersServer).List(ctx, req.(*ListContainersRequest))
|
return srv.(ContainersServer).List(ctx, req.(*ListContainersRequest))
|
||||||
@ -326,7 +352,7 @@ func _Containers_Create_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.Containers/Create",
|
FullMethod: "/containerd.services.containers.v1.Containers/Create",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(ContainersServer).Create(ctx, req.(*CreateContainerRequest))
|
return srv.(ContainersServer).Create(ctx, req.(*CreateContainerRequest))
|
||||||
@ -344,7 +370,7 @@ func _Containers_Update_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.Containers/Update",
|
FullMethod: "/containerd.services.containers.v1.Containers/Update",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(ContainersServer).Update(ctx, req.(*UpdateContainerRequest))
|
return srv.(ContainersServer).Update(ctx, req.(*UpdateContainerRequest))
|
||||||
@ -362,7 +388,7 @@ func _Containers_Delete_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.Containers/Delete",
|
FullMethod: "/containerd.services.containers.v1.Containers/Delete",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(ContainersServer).Delete(ctx, req.(*DeleteContainerRequest))
|
return srv.(ContainersServer).Delete(ctx, req.(*DeleteContainerRequest))
|
||||||
@ -371,7 +397,7 @@ func _Containers_Delete_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _Containers_serviceDesc = grpc.ServiceDesc{
|
var _Containers_serviceDesc = grpc.ServiceDesc{
|
||||||
ServiceName: "containerd.v1.Containers",
|
ServiceName: "containerd.services.containers.v1.Containers",
|
||||||
HandlerType: (*ContainersServer)(nil),
|
HandlerType: (*ContainersServer)(nil),
|
||||||
Methods: []grpc.MethodDesc{
|
Methods: []grpc.MethodDesc{
|
||||||
{
|
{
|
||||||
@ -396,7 +422,7 @@ var _Containers_serviceDesc = grpc.ServiceDesc{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Streams: []grpc.StreamDesc{},
|
Streams: []grpc.StreamDesc{},
|
||||||
Metadata: "github.com/containerd/containerd/api/services/containers/containers.proto",
|
Metadata: "github.com/containerd/containerd/api/services/containers/v1/containers.proto",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Container) Marshal() (dAtA []byte, err error) {
|
func (m *Container) Marshal() (dAtA []byte, err error) {
|
||||||
@ -443,22 +469,32 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
i = encodeVarintContainers(dAtA, i, uint64(len(m.Image)))
|
i = encodeVarintContainers(dAtA, i, uint64(len(m.Image)))
|
||||||
i += copy(dAtA[i:], m.Image)
|
i += copy(dAtA[i:], m.Image)
|
||||||
}
|
}
|
||||||
if len(m.Runtime) > 0 {
|
if m.Runtime != nil {
|
||||||
dAtA[i] = 0x22
|
dAtA[i] = 0x22
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(len(m.Runtime)))
|
i = encodeVarintContainers(dAtA, i, uint64(m.Runtime.Size()))
|
||||||
i += copy(dAtA[i:], m.Runtime)
|
n1, err := m.Runtime.MarshalTo(dAtA[i:])
|
||||||
}
|
|
||||||
if m.Spec != nil {
|
|
||||||
dAtA[i] = 0x32
|
|
||||||
i++
|
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(m.Spec.Size()))
|
|
||||||
n1, err := m.Spec.MarshalTo(dAtA[i:])
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n1
|
i += n1
|
||||||
}
|
}
|
||||||
|
if m.Spec != nil {
|
||||||
|
dAtA[i] = 0x2a
|
||||||
|
i++
|
||||||
|
i = encodeVarintContainers(dAtA, i, uint64(m.Spec.Size()))
|
||||||
|
n2, err := m.Spec.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n2
|
||||||
|
}
|
||||||
|
if len(m.Snapshotter) > 0 {
|
||||||
|
dAtA[i] = 0x32
|
||||||
|
i++
|
||||||
|
i = encodeVarintContainers(dAtA, i, uint64(len(m.Snapshotter)))
|
||||||
|
i += copy(dAtA[i:], m.Snapshotter)
|
||||||
|
}
|
||||||
if len(m.RootFS) > 0 {
|
if len(m.RootFS) > 0 {
|
||||||
dAtA[i] = 0x3a
|
dAtA[i] = 0x3a
|
||||||
i++
|
i++
|
||||||
@ -468,19 +504,53 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0x42
|
dAtA[i] = 0x42
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
|
i = encodeVarintContainers(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
|
||||||
n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
|
n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
i += n2
|
|
||||||
dAtA[i] = 0x4a
|
|
||||||
i++
|
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
|
|
||||||
n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n3
|
i += n3
|
||||||
|
dAtA[i] = 0x4a
|
||||||
|
i++
|
||||||
|
i = encodeVarintContainers(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
|
||||||
|
n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n4
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Container_Runtime) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Container_Runtime) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintContainers(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
if m.Options != nil {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintContainers(dAtA, i, uint64(m.Options.Size()))
|
||||||
|
n5, err := m.Options.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n5
|
||||||
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -526,11 +596,11 @@ func (m *GetContainerResponse) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
||||||
n4, err := m.Container.MarshalTo(dAtA[i:])
|
n6, err := m.Container.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n4
|
i += n6
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -549,11 +619,20 @@ func (m *ListContainersRequest) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m.Filter) > 0 {
|
if len(m.Filters) > 0 {
|
||||||
|
for _, s := range m.Filters {
|
||||||
dAtA[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(len(m.Filter)))
|
l = len(s)
|
||||||
i += copy(dAtA[i:], m.Filter)
|
for l >= 1<<7 {
|
||||||
|
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||||
|
l >>= 7
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
dAtA[i] = uint8(l)
|
||||||
|
i++
|
||||||
|
i += copy(dAtA[i:], s)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
@ -606,11 +685,11 @@ func (m *CreateContainerRequest) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
||||||
n5, err := m.Container.MarshalTo(dAtA[i:])
|
n7, err := m.Container.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n5
|
i += n7
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -632,11 +711,11 @@ func (m *CreateContainerResponse) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
||||||
n6, err := m.Container.MarshalTo(dAtA[i:])
|
n8, err := m.Container.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n6
|
i += n8
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -658,20 +737,20 @@ func (m *UpdateContainerRequest) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
||||||
n7, err := m.Container.MarshalTo(dAtA[i:])
|
n9, err := m.Container.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n7
|
i += n9
|
||||||
if m.UpdateMask != nil {
|
if m.UpdateMask != nil {
|
||||||
dAtA[i] = 0x12
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(m.UpdateMask.Size()))
|
i = encodeVarintContainers(dAtA, i, uint64(m.UpdateMask.Size()))
|
||||||
n8, err := m.UpdateMask.MarshalTo(dAtA[i:])
|
n10, err := m.UpdateMask.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n8
|
i += n10
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
@ -694,11 +773,11 @@ func (m *UpdateContainerResponse) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
dAtA[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
|
||||||
n9, err := m.Container.MarshalTo(dAtA[i:])
|
n11, err := m.Container.MarshalTo(dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n9
|
i += n11
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -772,14 +851,18 @@ func (m *Container) Size() (n int) {
|
|||||||
if l > 0 {
|
if l > 0 {
|
||||||
n += 1 + l + sovContainers(uint64(l))
|
n += 1 + l + sovContainers(uint64(l))
|
||||||
}
|
}
|
||||||
l = len(m.Runtime)
|
if m.Runtime != nil {
|
||||||
if l > 0 {
|
l = m.Runtime.Size()
|
||||||
n += 1 + l + sovContainers(uint64(l))
|
n += 1 + l + sovContainers(uint64(l))
|
||||||
}
|
}
|
||||||
if m.Spec != nil {
|
if m.Spec != nil {
|
||||||
l = m.Spec.Size()
|
l = m.Spec.Size()
|
||||||
n += 1 + l + sovContainers(uint64(l))
|
n += 1 + l + sovContainers(uint64(l))
|
||||||
}
|
}
|
||||||
|
l = len(m.Snapshotter)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovContainers(uint64(l))
|
||||||
|
}
|
||||||
l = len(m.RootFS)
|
l = len(m.RootFS)
|
||||||
if l > 0 {
|
if l > 0 {
|
||||||
n += 1 + l + sovContainers(uint64(l))
|
n += 1 + l + sovContainers(uint64(l))
|
||||||
@ -791,6 +874,20 @@ func (m *Container) Size() (n int) {
|
|||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Container_Runtime) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovContainers(uint64(l))
|
||||||
|
}
|
||||||
|
if m.Options != nil {
|
||||||
|
l = m.Options.Size()
|
||||||
|
n += 1 + l + sovContainers(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
func (m *GetContainerRequest) Size() (n int) {
|
func (m *GetContainerRequest) Size() (n int) {
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
@ -812,10 +909,12 @@ func (m *GetContainerResponse) Size() (n int) {
|
|||||||
func (m *ListContainersRequest) Size() (n int) {
|
func (m *ListContainersRequest) Size() (n int) {
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
l = len(m.Filter)
|
if len(m.Filters) > 0 {
|
||||||
if l > 0 {
|
for _, s := range m.Filters {
|
||||||
|
l = len(s)
|
||||||
n += 1 + l + sovContainers(uint64(l))
|
n += 1 + l + sovContainers(uint64(l))
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -908,8 +1007,9 @@ func (this *Container) String() string {
|
|||||||
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
||||||
`Labels:` + mapStringForLabels + `,`,
|
`Labels:` + mapStringForLabels + `,`,
|
||||||
`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
|
`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
|
||||||
`Runtime:` + fmt.Sprintf("%v", this.Runtime) + `,`,
|
`Runtime:` + strings.Replace(fmt.Sprintf("%v", this.Runtime), "Container_Runtime", "Container_Runtime", 1) + `,`,
|
||||||
`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf1.Any", 1) + `,`,
|
`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf1.Any", 1) + `,`,
|
||||||
|
`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
|
||||||
`RootFS:` + fmt.Sprintf("%v", this.RootFS) + `,`,
|
`RootFS:` + fmt.Sprintf("%v", this.RootFS) + `,`,
|
||||||
`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf4.Timestamp", 1), `&`, ``, 1) + `,`,
|
`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf4.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||||
`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf4.Timestamp", 1), `&`, ``, 1) + `,`,
|
`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf4.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||||
@ -917,6 +1017,17 @@ func (this *Container) String() string {
|
|||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
func (this *Container_Runtime) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&Container_Runtime{`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf1.Any", 1) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
func (this *GetContainerRequest) String() string {
|
func (this *GetContainerRequest) String() string {
|
||||||
if this == nil {
|
if this == nil {
|
||||||
return "nil"
|
return "nil"
|
||||||
@ -942,7 +1053,7 @@ func (this *ListContainersRequest) String() string {
|
|||||||
return "nil"
|
return "nil"
|
||||||
}
|
}
|
||||||
s := strings.Join([]string{`&ListContainersRequest{`,
|
s := strings.Join([]string{`&ListContainersRequest{`,
|
||||||
`Filter:` + fmt.Sprintf("%v", this.Filter) + `,`,
|
`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
@ -1223,7 +1334,7 @@ func (m *Container) Unmarshal(dAtA []byte) error {
|
|||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType)
|
return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType)
|
||||||
}
|
}
|
||||||
var stringLen uint64
|
var msglen int
|
||||||
for shift := uint(0); ; shift += 7 {
|
for shift := uint(0); ; shift += 7 {
|
||||||
if shift >= 64 {
|
if shift >= 64 {
|
||||||
return ErrIntOverflowContainers
|
return ErrIntOverflowContainers
|
||||||
@ -1233,22 +1344,26 @@ func (m *Container) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
intStringLen := int(stringLen)
|
if msglen < 0 {
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthContainers
|
return ErrInvalidLengthContainers
|
||||||
}
|
}
|
||||||
postIndex := iNdEx + intStringLen
|
postIndex := iNdEx + msglen
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Runtime = string(dAtA[iNdEx:postIndex])
|
if m.Runtime == nil {
|
||||||
|
m.Runtime = &Container_Runtime{}
|
||||||
|
}
|
||||||
|
if err := m.Runtime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 6:
|
case 5:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
|
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
|
||||||
}
|
}
|
||||||
@ -1281,6 +1396,35 @@ func (m *Container) Unmarshal(dAtA []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 6:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContainers
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthContainers
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Snapshotter = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
case 7:
|
case 7:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field RootFS", wireType)
|
return fmt.Errorf("proto: wrong wireType = %d for field RootFS", wireType)
|
||||||
@ -1391,6 +1535,118 @@ func (m *Container) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
func (m *Container_Runtime) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContainers
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: Runtime: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: Runtime: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContainers
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthContainers
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContainers
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthContainers
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if m.Options == nil {
|
||||||
|
m.Options = &google_protobuf1.Any{}
|
||||||
|
}
|
||||||
|
if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipContainers(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthContainers
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
func (m *GetContainerRequest) Unmarshal(dAtA []byte) error {
|
func (m *GetContainerRequest) Unmarshal(dAtA []byte) error {
|
||||||
l := len(dAtA)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
@ -1581,7 +1837,7 @@ func (m *ListContainersRequest) Unmarshal(dAtA []byte) error {
|
|||||||
switch fieldNum {
|
switch fieldNum {
|
||||||
case 1:
|
case 1:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType)
|
return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
|
||||||
}
|
}
|
||||||
var stringLen uint64
|
var stringLen uint64
|
||||||
for shift := uint(0); ; shift += 7 {
|
for shift := uint(0); ; shift += 7 {
|
||||||
@ -1606,7 +1862,7 @@ func (m *ListContainersRequest) Unmarshal(dAtA []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Filter = string(dAtA[iNdEx:postIndex])
|
m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex]))
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
@ -2248,52 +2504,57 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/services/containers/containers.proto", fileDescriptorContainers)
|
proto.RegisterFile("github.com/containerd/containerd/api/services/containers/v1/containers.proto", fileDescriptorContainers)
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptorContainers = []byte{
|
var fileDescriptorContainers = []byte{
|
||||||
// 680 bytes of a gzipped FileDescriptorProto
|
// 757 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x6e, 0xd3, 0x40,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcb, 0x72, 0xd3, 0x4a,
|
||||||
0x10, 0xae, 0x93, 0xe0, 0x36, 0x13, 0x21, 0xa1, 0x25, 0x04, 0x63, 0xa4, 0x24, 0x32, 0x3f, 0xca,
|
0x10, 0x8d, 0x6c, 0x47, 0x8e, 0xdb, 0x9b, 0x5b, 0x73, 0x7d, 0x7d, 0x85, 0xa8, 0xb2, 0x8d, 0x57,
|
||||||
0x05, 0x1b, 0xc2, 0x85, 0x9f, 0x0a, 0xa9, 0xe9, 0x9f, 0x2a, 0x15, 0x0e, 0x6e, 0xa1, 0xdc, 0x2a,
|
0x5e, 0x80, 0x4c, 0x0c, 0x05, 0x79, 0xac, 0xe2, 0xbc, 0x8a, 0xaa, 0x84, 0x4a, 0x0d, 0xb0, 0x81,
|
||||||
0x27, 0xde, 0x04, 0xab, 0x8e, 0xd7, 0x78, 0x37, 0x95, 0x72, 0xe3, 0x11, 0x90, 0x78, 0x05, 0x8e,
|
0x45, 0x90, 0xed, 0xb1, 0x23, 0x2c, 0x69, 0x84, 0x66, 0xec, 0x2a, 0x17, 0x0b, 0xf8, 0x04, 0xfe,
|
||||||
0x3c, 0x48, 0x8f, 0x1c, 0x39, 0x15, 0x9a, 0x27, 0x41, 0xbb, 0x5e, 0xd7, 0xa9, 0xed, 0x50, 0x10,
|
0x82, 0x5f, 0xc9, 0x92, 0x25, 0xab, 0x3c, 0xfc, 0x25, 0x94, 0x46, 0xa3, 0xc8, 0xf8, 0x51, 0xc8,
|
||||||
0xbd, 0xcd, 0x78, 0xbe, 0xf9, 0x32, 0xf3, 0xcd, 0x67, 0x07, 0x76, 0x46, 0x1e, 0xfb, 0x30, 0xe9,
|
0x81, 0xec, 0xa6, 0x3d, 0x7d, 0xba, 0x8f, 0x4e, 0x9f, 0x96, 0x05, 0x47, 0x3d, 0x8b, 0x9f, 0x0d,
|
||||||
0x9b, 0x03, 0x32, 0xb6, 0x06, 0x24, 0x60, 0x8e, 0x17, 0xe0, 0xc8, 0x9d, 0x0f, 0x9d, 0xd0, 0xb3,
|
0x5a, 0x46, 0x9b, 0x3a, 0xf5, 0x36, 0x75, 0xb9, 0x69, 0xb9, 0xc4, 0xef, 0x4c, 0x1e, 0x4d, 0xcf,
|
||||||
0x28, 0x8e, 0x8e, 0xbd, 0x01, 0xa6, 0xe9, 0xf3, 0xf9, 0xd0, 0x0c, 0x23, 0xc2, 0x08, 0xba, 0x9e,
|
0xaa, 0x33, 0xe2, 0x0f, 0xad, 0x36, 0x61, 0xf1, 0xef, 0xac, 0x3e, 0x5c, 0x9f, 0x88, 0x0c, 0xcf,
|
||||||
0x36, 0x99, 0xc7, 0x4f, 0xf4, 0xfa, 0x88, 0x8c, 0x88, 0xa8, 0x58, 0x3c, 0x8a, 0x41, 0xfa, 0x9d,
|
0xa7, 0x9c, 0xa2, 0x07, 0x31, 0xce, 0x88, 0x30, 0xc6, 0x44, 0xd6, 0x70, 0x5d, 0x2f, 0xf4, 0x68,
|
||||||
0x11, 0x21, 0x23, 0x1f, 0x5b, 0x22, 0xeb, 0x4f, 0x86, 0x96, 0x13, 0x4c, 0x65, 0xe9, 0x6e, 0xb6,
|
0x8f, 0x8a, 0xec, 0x7a, 0x70, 0x0a, 0x81, 0xfa, 0xbd, 0x1e, 0xa5, 0x3d, 0x9b, 0xd4, 0x45, 0xd4,
|
||||||
0x84, 0xc7, 0x21, 0x4b, 0x8a, 0xed, 0x6c, 0x71, 0xe8, 0x61, 0xdf, 0x3d, 0x1c, 0x3b, 0xf4, 0x48,
|
0x1a, 0x74, 0xeb, 0xa6, 0x3b, 0x92, 0x57, 0xf7, 0xa7, 0xaf, 0x88, 0xe3, 0xf1, 0xe8, 0xb2, 0x32,
|
||||||
0x22, 0x5a, 0x59, 0x04, 0xf3, 0xc6, 0x98, 0x32, 0x67, 0x1c, 0x4a, 0xc0, 0xd6, 0x5f, 0xad, 0xca,
|
0x7d, 0xd9, 0xb5, 0x88, 0xdd, 0x39, 0x75, 0x4c, 0xd6, 0x97, 0x19, 0xe5, 0xe9, 0x0c, 0x6e, 0x39,
|
||||||
0xa6, 0x21, 0xa6, 0x96, 0x8b, 0xe9, 0x20, 0xf2, 0x42, 0x46, 0xa2, 0xb9, 0x30, 0xe6, 0x31, 0xbe,
|
0x84, 0x71, 0xd3, 0xf1, 0x64, 0xc2, 0x76, 0x22, 0x05, 0xf8, 0xc8, 0x23, 0xac, 0xde, 0x21, 0xac,
|
||||||
0x96, 0xa1, 0xba, 0x9e, 0x34, 0xa1, 0x06, 0x94, 0x3c, 0x57, 0x53, 0xda, 0x4a, 0xa7, 0xda, 0x53,
|
0xed, 0x5b, 0x1e, 0xa7, 0x7e, 0x08, 0xae, 0x5e, 0x66, 0x20, 0xb7, 0x1b, 0x65, 0xa2, 0x22, 0xa4,
|
||||||
0x67, 0xa7, 0xad, 0xd2, 0xce, 0x86, 0x5d, 0xf2, 0x5c, 0xb4, 0x0a, 0xaa, 0xef, 0xf4, 0xb1, 0x4f,
|
0xac, 0x8e, 0xa6, 0x54, 0x94, 0x5a, 0xae, 0xa9, 0x8e, 0x2f, 0xca, 0xa9, 0x17, 0x7b, 0x38, 0x65,
|
||||||
0xb5, 0x52, 0xbb, 0xdc, 0xa9, 0x75, 0xef, 0x9b, 0x17, 0xe4, 0x31, 0xcf, 0x19, 0xcc, 0x5d, 0x01,
|
0x75, 0xd0, 0x09, 0xa8, 0xb6, 0xd9, 0x22, 0x36, 0xd3, 0x52, 0x95, 0x74, 0x2d, 0xdf, 0xd8, 0x30,
|
||||||
0xdb, 0x0c, 0x58, 0x34, 0xb5, 0x65, 0x0f, 0xaa, 0xc3, 0x35, 0x6f, 0xec, 0x8c, 0xb0, 0x56, 0xe6,
|
0x7e, 0xab, 0x93, 0x71, 0x53, 0xd5, 0x38, 0x12, 0xd0, 0x7d, 0x97, 0xfb, 0x23, 0x2c, 0xeb, 0xa0,
|
||||||
0xc4, 0x76, 0x9c, 0x20, 0x0d, 0x96, 0xa3, 0x49, 0xc0, 0xf7, 0xd2, 0x2a, 0xe2, 0x79, 0x92, 0xa2,
|
0x02, 0xac, 0x5a, 0x8e, 0xd9, 0x23, 0x5a, 0x3a, 0x68, 0x86, 0xc3, 0x00, 0xbd, 0x84, 0xac, 0x3f,
|
||||||
0x0e, 0x54, 0x68, 0x88, 0x07, 0x9a, 0xda, 0x56, 0x3a, 0xb5, 0x6e, 0xdd, 0x8c, 0xb5, 0x30, 0x13,
|
0x70, 0x83, 0x07, 0xd4, 0x32, 0x15, 0xa5, 0x96, 0x6f, 0x3c, 0x5d, 0xaa, 0x11, 0x0e, 0xb1, 0x38,
|
||||||
0x2d, 0xcc, 0xb5, 0x60, 0x6a, 0x0b, 0x04, 0x32, 0x40, 0x8d, 0x08, 0x61, 0x43, 0xaa, 0x2d, 0x8b,
|
0x2a, 0x82, 0x6a, 0x90, 0x61, 0x1e, 0x69, 0x6b, 0xab, 0xa2, 0x58, 0xc1, 0x08, 0xa5, 0x34, 0x22,
|
||||||
0x99, 0x61, 0x76, 0xda, 0x52, 0x6d, 0x42, 0xd8, 0xd6, 0x9e, 0x2d, 0x2b, 0x68, 0x1d, 0x60, 0x10,
|
0x29, 0x8d, 0x1d, 0x77, 0x84, 0x45, 0x06, 0xaa, 0x40, 0x9e, 0xb9, 0xa6, 0xc7, 0xce, 0x28, 0xe7,
|
||||||
0x61, 0x87, 0x61, 0xf7, 0xd0, 0x61, 0xda, 0x8a, 0xe0, 0xd4, 0x73, 0x9c, 0xfb, 0x89, 0xbe, 0xbd,
|
0xc4, 0xd7, 0x54, 0xc1, 0x6a, 0xf2, 0x27, 0x54, 0x05, 0xd5, 0xa7, 0x94, 0x77, 0x99, 0x96, 0x15,
|
||||||
0x95, 0x93, 0xd3, 0xd6, 0xd2, 0xe7, 0x9f, 0x2d, 0xc5, 0xae, 0xca, 0xbe, 0x35, 0xc6, 0x49, 0x26,
|
0xfa, 0xc0, 0xf8, 0xa2, 0xac, 0x62, 0x4a, 0xf9, 0xc1, 0x2b, 0x2c, 0x6f, 0xd0, 0x2e, 0x40, 0xdb,
|
||||||
0xa1, 0x9b, 0x90, 0x54, 0xff, 0x85, 0x44, 0xf6, 0xad, 0x31, 0xfd, 0x39, 0xd4, 0xe6, 0xe4, 0x41,
|
0x27, 0x26, 0x27, 0x9d, 0x53, 0x93, 0x6b, 0x6b, 0xa2, 0xab, 0x3e, 0xd3, 0xf5, 0x75, 0x34, 0xc0,
|
||||||
0x37, 0xa0, 0x7c, 0x84, 0xa7, 0xb1, 0xda, 0x36, 0x0f, 0xb9, 0x50, 0xc7, 0x8e, 0x3f, 0xc1, 0x5a,
|
0xe6, 0xda, 0xf9, 0x45, 0x79, 0xe5, 0xeb, 0x65, 0x59, 0xc1, 0x39, 0x89, 0xdb, 0xe1, 0x41, 0x91,
|
||||||
0x29, 0x16, 0x4a, 0x24, 0x2f, 0x4a, 0xcf, 0x14, 0xe3, 0x11, 0xdc, 0xdc, 0xc6, 0xec, 0x5c, 0x66,
|
0x81, 0xd7, 0x89, 0x8a, 0xe4, 0x96, 0x29, 0x22, 0x71, 0x3b, 0x5c, 0xdf, 0x84, 0xfc, 0x84, 0xec,
|
||||||
0x1b, 0x7f, 0x9c, 0x60, 0xca, 0x16, 0xdd, 0xcb, 0xd8, 0x87, 0xfa, 0x45, 0x38, 0x0d, 0x49, 0x40,
|
0xe8, 0x1f, 0x48, 0xf7, 0xc9, 0x28, 0x9c, 0x2c, 0x0e, 0x8e, 0xc1, 0x00, 0x86, 0xa6, 0x3d, 0x20,
|
||||||
0x31, 0x5a, 0x85, 0xea, 0xf9, 0xe1, 0x44, 0x5b, 0xad, 0xab, 0x2d, 0x3a, 0x65, 0xaf, 0xc2, 0x77,
|
0x5a, 0x2a, 0x1c, 0x80, 0x08, 0xb6, 0x52, 0x1b, 0x8a, 0x7e, 0x0c, 0x59, 0x29, 0x24, 0x42, 0x90,
|
||||||
0xb0, 0xd3, 0x06, 0xc3, 0x82, 0x5b, 0xbb, 0x1e, 0x4d, 0x69, 0x69, 0x3a, 0x86, 0x3a, 0xf4, 0x7c,
|
0x71, 0x4d, 0x87, 0x48, 0x9c, 0x38, 0x23, 0x03, 0xb2, 0xd4, 0xe3, 0x16, 0x75, 0x99, 0x80, 0x2e,
|
||||||
0x26, 0x39, 0xab, 0xb6, 0xcc, 0x8c, 0xf7, 0xd0, 0xc8, 0x36, 0xc8, 0x41, 0x5e, 0x01, 0xa4, 0xaf,
|
0x92, 0x35, 0x4a, 0xaa, 0x3e, 0x82, 0x7f, 0x0f, 0x09, 0xbf, 0x19, 0x12, 0x26, 0x1f, 0x07, 0x84,
|
||||||
0x9c, 0xa6, 0x08, 0x53, 0x5d, 0x36, 0xc9, 0x5c, 0x87, 0xf1, 0x0e, 0x1a, 0xeb, 0xe2, 0x38, 0x39,
|
0xf1, 0x45, 0x56, 0xab, 0x9e, 0x41, 0xe1, 0xd7, 0x74, 0xe6, 0x51, 0x97, 0x11, 0x74, 0x02, 0xb9,
|
||||||
0x49, 0xfe, 0x6f, 0xc5, 0x03, 0xb8, 0x9d, 0xe3, 0xbd, 0x12, 0xed, 0xbe, 0x28, 0xd0, 0x78, 0x2b,
|
0x9b, 0xb1, 0x0b, 0x58, 0xbe, 0xf1, 0x70, 0x19, 0x73, 0x34, 0x33, 0x81, 0x4c, 0x38, 0x2e, 0x52,
|
||||||
0x9c, 0x70, 0xb5, 0x13, 0xa3, 0x97, 0x50, 0x8b, 0x1d, 0x26, 0x3e, 0x1f, 0xc2, 0x39, 0x45, 0xd6,
|
0x5d, 0x87, 0xff, 0x8e, 0x2c, 0x16, 0xb7, 0x62, 0x11, 0x35, 0x0d, 0xb2, 0x5d, 0xcb, 0xe6, 0xc4,
|
||||||
0xdc, 0xe2, 0x5f, 0x98, 0xd7, 0x0e, 0x3d, 0xb2, 0xa5, 0x91, 0x79, 0xcc, 0xd7, 0xcd, 0x0d, 0x75,
|
0x67, 0x9a, 0x52, 0x49, 0xd7, 0x72, 0x38, 0x0a, 0xab, 0x36, 0x14, 0xa7, 0x21, 0x92, 0x1e, 0x06,
|
||||||
0x25, 0xeb, 0x3e, 0x86, 0xc6, 0x06, 0xf6, 0x71, 0xc1, 0xb6, 0x0b, 0x2c, 0xdb, 0xfd, 0x56, 0x06,
|
0x88, 0x1b, 0x0b, 0xd8, 0xed, 0xf8, 0x4d, 0x54, 0xa9, 0x7e, 0x80, 0xe2, 0xae, 0x70, 0xc5, 0x8c,
|
||||||
0x48, 0x8d, 0x82, 0xde, 0x40, 0x79, 0x1b, 0x33, 0x64, 0x64, 0x7e, 0xb2, 0xe0, 0x25, 0xd0, 0xef,
|
0x78, 0x7f, 0x5f, 0x8c, 0x3e, 0xfc, 0x3f, 0xd3, 0xeb, 0xce, 0x94, 0xff, 0xa6, 0x40, 0xf1, 0x8d,
|
||||||
0xfd, 0x11, 0x23, 0xd7, 0xd9, 0x83, 0x0a, 0xb7, 0x22, 0xca, 0x7e, 0xb9, 0x0a, 0x0d, 0xad, 0x3f,
|
0xb0, 0xea, 0xdd, 0x3f, 0x19, 0xda, 0x86, 0x7c, 0xb8, 0x16, 0xe2, 0xa5, 0x2a, 0x3d, 0x3b, 0xbb,
|
||||||
0xb8, 0x04, 0x25, 0x49, 0x0f, 0x40, 0x8d, 0xdd, 0x82, 0xb2, 0x0d, 0xc5, 0xe6, 0xd4, 0x1f, 0x5e,
|
0x4f, 0x07, 0xc1, 0x7b, 0xf7, 0xd8, 0x64, 0x7d, 0x2c, 0xb7, 0x2f, 0x38, 0x07, 0xb2, 0xcc, 0x10,
|
||||||
0x06, 0x4b, 0x89, 0xe3, 0xbb, 0xe4, 0x88, 0x8b, 0x3d, 0x94, 0x23, 0x5e, 0x74, 0xd5, 0x6d, 0x50,
|
0xbd, 0x33, 0x59, 0x1e, 0x43, 0x71, 0x8f, 0xd8, 0x64, 0x8e, 0x2a, 0x0b, 0x96, 0xa5, 0x71, 0x95,
|
||||||
0xe3, 0xbb, 0xe4, 0x88, 0x8b, 0xcf, 0xa5, 0x37, 0x72, 0x4e, 0xda, 0xe4, 0x7f, 0x64, 0x3d, 0xed,
|
0x01, 0x88, 0xcd, 0x88, 0x86, 0x90, 0x3e, 0x24, 0x1c, 0x3d, 0x4b, 0x40, 0x63, 0xce, 0x4a, 0xea,
|
||||||
0xe4, 0xac, 0xb9, 0xf4, 0xe3, 0xac, 0xb9, 0xf4, 0x69, 0xd6, 0x54, 0x4e, 0x66, 0x4d, 0xe5, 0xfb,
|
0xcf, 0x97, 0xc6, 0x49, 0x29, 0x3e, 0x41, 0x26, 0x58, 0x0b, 0x94, 0xe4, 0x6f, 0x61, 0xee, 0xca,
|
||||||
0xac, 0xa9, 0xfc, 0x9a, 0x35, 0x95, 0xbe, 0x2a, 0x90, 0x4f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff,
|
0xe9, 0x9b, 0xb7, 0x40, 0xca, 0xe6, 0x9f, 0x41, 0x0d, 0x9d, 0x8b, 0x92, 0x14, 0x99, 0xbf, 0x50,
|
||||||
0xc1, 0x18, 0xeb, 0x02, 0x8d, 0x07, 0x00, 0x00,
|
0xfa, 0xd6, 0x6d, 0xa0, 0x31, 0x81, 0xd0, 0x23, 0x89, 0x08, 0xcc, 0xf7, 0x7d, 0x22, 0x02, 0x8b,
|
||||||
|
0x9c, 0xf8, 0x0e, 0xd4, 0xd0, 0x37, 0x89, 0x08, 0xcc, 0xb7, 0x98, 0x5e, 0x9c, 0xd9, 0x88, 0xfd,
|
||||||
|
0xe0, 0x33, 0xa5, 0xf9, 0xfe, 0xfc, 0xba, 0xb4, 0xf2, 0xe3, 0xba, 0xb4, 0xf2, 0x65, 0x5c, 0x52,
|
||||||
|
0xce, 0xc7, 0x25, 0xe5, 0xfb, 0xb8, 0xa4, 0x5c, 0x8d, 0x4b, 0xca, 0xdb, 0x83, 0x3f, 0xf8, 0xf2,
|
||||||
|
0xda, 0x8e, 0xa3, 0x96, 0x2a, 0x3a, 0x3e, 0xf9, 0x19, 0x00, 0x00, 0xff, 0xff, 0xa1, 0xaf, 0xe2,
|
||||||
|
0x52, 0xca, 0x09, 0x00, 0x00,
|
||||||
}
|
}
|
@ -1,19 +1,21 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
package containerd.v1;
|
package containerd.services.containers.v1;
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
import "google/protobuf/any.proto";
|
import "google/protobuf/any.proto";
|
||||||
import "google/protobuf/empty.proto";
|
import "google/protobuf/empty.proto";
|
||||||
import "google/protobuf/field_mask.proto";
|
import "google/protobuf/field_mask.proto";
|
||||||
import "google/protobuf/timestamp.proto";
|
import "google/protobuf/timestamp.proto";
|
||||||
import "github.com/containerd/containerd/api/types/descriptor/descriptor.proto";
|
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/containers/v1;containers";
|
||||||
|
|
||||||
// Containers provides metadata storage for containers used in the execution
|
// Containers provides metadata storage for containers used in the execution
|
||||||
// service.
|
// service.
|
||||||
//
|
//
|
||||||
// The objects here provide an state-independent view of containers for use in
|
// The objects here provide an state-independent view of containers for use in
|
||||||
// management and resource pinning. From that perspective, contaienrs do not
|
// management and resource pinning. From that perspective, containers do not
|
||||||
// have a "state" but rather this is the set of resources that will be
|
// have a "state" but rather this is the set of resources that will be
|
||||||
// considered in use by the container.
|
// considered in use by the container.
|
||||||
//
|
//
|
||||||
@ -51,11 +53,20 @@ message Container {
|
|||||||
// If this field is updated, the spec and rootfs needed to updated, as well.
|
// If this field is updated, the spec and rootfs needed to updated, as well.
|
||||||
string image = 3;
|
string image = 3;
|
||||||
|
|
||||||
|
message Runtime {
|
||||||
|
// Name is the name of the runtime.
|
||||||
|
string name = 1;
|
||||||
|
// Options specify additional runtime initialization options.
|
||||||
|
google.protobuf.Any options = 2;
|
||||||
|
}
|
||||||
// Runtime specifies which runtime to use for executing this container.
|
// Runtime specifies which runtime to use for executing this container.
|
||||||
string runtime = 4;
|
Runtime runtime = 4;
|
||||||
|
|
||||||
// Spec to be used when creating the container. This is runtime specific.
|
// Spec to be used when creating the container. This is runtime specific.
|
||||||
google.protobuf.Any spec = 6;
|
google.protobuf.Any spec = 5;
|
||||||
|
|
||||||
|
// Snapshotter specifies the snapshotter name used for rootfs
|
||||||
|
string snapshotter = 6;
|
||||||
|
|
||||||
// RootFS specifies the snapshot key to use for the container's root
|
// RootFS specifies the snapshot key to use for the container's root
|
||||||
// filesystem. When starting a task from this container, a caller should
|
// filesystem. When starting a task from this container, a caller should
|
||||||
@ -67,7 +78,10 @@ message Container {
|
|||||||
// This field may be updated.
|
// This field may be updated.
|
||||||
string rootfs = 7 [(gogoproto.customname) = "RootFS"];
|
string rootfs = 7 [(gogoproto.customname) = "RootFS"];
|
||||||
|
|
||||||
|
// CreatedAt is the time the container was first created.
|
||||||
google.protobuf.Timestamp created_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
google.protobuf.Timestamp created_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// UpdatedAt is the last time the container was mutated.
|
||||||
google.protobuf.Timestamp updated_at = 9 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
google.protobuf.Timestamp updated_at = 9 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -80,7 +94,17 @@ message GetContainerResponse {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message ListContainersRequest {
|
message ListContainersRequest {
|
||||||
string filter = 1; // TODO(stevvooe): Define a filtering syntax to make these queries.
|
// Filters contains one or more filters using the syntax defined in the
|
||||||
|
// containerd filter package.
|
||||||
|
//
|
||||||
|
// The returned result will be those that match any of the provided
|
||||||
|
// filters. Expanded, containers that match the following will be
|
||||||
|
// returned:
|
||||||
|
//
|
||||||
|
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||||
|
//
|
||||||
|
// If filters is zero-length or nil, all items will be returned.
|
||||||
|
repeated string filters = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ListContainersResponse {
|
message ListContainersResponse {
|
File diff suppressed because it is too large
Load Diff
@ -1,11 +1,14 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
package containerd.v1;
|
package containerd.services.content.v1;
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
|
import "google/protobuf/field_mask.proto";
|
||||||
import "google/protobuf/timestamp.proto";
|
import "google/protobuf/timestamp.proto";
|
||||||
import "google/protobuf/empty.proto";
|
import "google/protobuf/empty.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/content/v1;content";
|
||||||
|
|
||||||
// Content provides access to a content addressable storage system.
|
// Content provides access to a content addressable storage system.
|
||||||
service Content {
|
service Content {
|
||||||
// Info returns information about a committed object.
|
// Info returns information about a committed object.
|
||||||
@ -14,6 +17,13 @@ service Content {
|
|||||||
// existence.
|
// existence.
|
||||||
rpc Info(InfoRequest) returns (InfoResponse);
|
rpc Info(InfoRequest) returns (InfoResponse);
|
||||||
|
|
||||||
|
// Update updates content metadata.
|
||||||
|
//
|
||||||
|
// This call can be used to manage the mutable content labels. The
|
||||||
|
// immutable metadata such as digest, size, and committed at cannot
|
||||||
|
// be updated.
|
||||||
|
rpc Update(UpdateRequest) returns (UpdateResponse);
|
||||||
|
|
||||||
// List streams the entire set of content as Info objects and closes the
|
// List streams the entire set of content as Info objects and closes the
|
||||||
// stream.
|
// stream.
|
||||||
//
|
//
|
||||||
@ -28,15 +38,18 @@ service Content {
|
|||||||
// Read allows one to read an object based on the offset into the content.
|
// Read allows one to read an object based on the offset into the content.
|
||||||
//
|
//
|
||||||
// The requested data may be returned in one or more messages.
|
// The requested data may be returned in one or more messages.
|
||||||
rpc Read(ReadRequest) returns (stream ReadResponse);
|
rpc Read(ReadContentRequest) returns (stream ReadContentResponse);
|
||||||
|
|
||||||
// Status returns the status of ongoing object ingestions, started via
|
// Status returns the status for a single reference.
|
||||||
|
rpc Status(StatusRequest) returns (StatusResponse);
|
||||||
|
|
||||||
|
// ListStatuses returns the status of ongoing object ingestions, started via
|
||||||
// Write.
|
// Write.
|
||||||
//
|
//
|
||||||
// Only those matching the regular expression will be provided in the
|
// Only those matching the regular expression will be provided in the
|
||||||
// response. If the provided regular expression is empty, all ingestions
|
// response. If the provided regular expression is empty, all ingestions
|
||||||
// will be provided.
|
// will be provided.
|
||||||
rpc Status(StatusRequest) returns (StatusResponse);
|
rpc ListStatuses(ListStatusesRequest) returns (ListStatusesResponse);
|
||||||
|
|
||||||
// Write begins or resumes writes to a resource identified by a unique ref.
|
// Write begins or resumes writes to a resource identified by a unique ref.
|
||||||
// Only one active stream may exist at a time for each ref.
|
// Only one active stream may exist at a time for each ref.
|
||||||
@ -54,7 +67,7 @@ service Content {
|
|||||||
//
|
//
|
||||||
// When completed, the commit flag should be set to true. If expected size
|
// When completed, the commit flag should be set to true. If expected size
|
||||||
// or digest is set, the content will be validated against those values.
|
// or digest is set, the content will be validated against those values.
|
||||||
rpc Write(stream WriteRequest) returns (stream WriteResponse);
|
rpc Write(stream WriteContentRequest) returns (stream WriteContentResponse);
|
||||||
|
|
||||||
// Abort cancels the ongoing write named in the request. Any resources
|
// Abort cancels the ongoing write named in the request. Any resources
|
||||||
// associated with the write will be collected.
|
// associated with the write will be collected.
|
||||||
@ -68,8 +81,14 @@ message Info {
|
|||||||
// Size is the total number of bytes in the blob.
|
// Size is the total number of bytes in the blob.
|
||||||
int64 size = 2;
|
int64 size = 2;
|
||||||
|
|
||||||
// CommittedAt provides the time at which the blob was committed.
|
// CreatedAt provides the time at which the blob was committed.
|
||||||
google.protobuf.Timestamp committed_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
google.protobuf.Timestamp created_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// UpdatedAt provides the time the info was last updated.
|
||||||
|
google.protobuf.Timestamp updated_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// Labels are arbitrary data on content.
|
||||||
|
map<string, string> labels = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message InfoRequest {
|
message InfoRequest {
|
||||||
@ -80,7 +99,35 @@ message InfoResponse {
|
|||||||
Info info = 1 [(gogoproto.nullable) = false];
|
Info info = 1 [(gogoproto.nullable) = false];
|
||||||
}
|
}
|
||||||
|
|
||||||
message ListContentRequest {}
|
message UpdateRequest {
|
||||||
|
Info info = 1 [(gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// UpdateMask specifies which fields to perform the update on. If empty,
|
||||||
|
// the operation applies to all fields.
|
||||||
|
//
|
||||||
|
// In info, Digest, Size, and CreatedAt are immutable,
|
||||||
|
// other field may be updated using this mask.
|
||||||
|
// If no mask is provided, all mutable field are updated.
|
||||||
|
google.protobuf.FieldMask update_mask = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UpdateResponse {
|
||||||
|
Info info = 1 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListContentRequest {
|
||||||
|
// Filters contains one or more filters using the syntax defined in the
|
||||||
|
// containerd filter package.
|
||||||
|
//
|
||||||
|
// The returned result will be those that match any of the provided
|
||||||
|
// filters. Expanded, containers that match the following will be
|
||||||
|
// returned:
|
||||||
|
//
|
||||||
|
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||||
|
//
|
||||||
|
// If filters is zero-length or nil, all items will be returned.
|
||||||
|
repeated string filters = 1;
|
||||||
|
}
|
||||||
|
|
||||||
message ListContentResponse {
|
message ListContentResponse {
|
||||||
repeated Info info = 1 [(gogoproto.nullable) = false];
|
repeated Info info = 1 [(gogoproto.nullable) = false];
|
||||||
@ -91,9 +138,9 @@ message DeleteContentRequest {
|
|||||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadRequest defines the fields that make up a request to read a portion of
|
// ReadContentRequest defines the fields that make up a request to read a portion of
|
||||||
// data from a stored object.
|
// data from a stored object.
|
||||||
message ReadRequest {
|
message ReadContentRequest {
|
||||||
// Digest is the hash identity to read.
|
// Digest is the hash identity to read.
|
||||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
|
|
||||||
@ -107,16 +154,12 @@ message ReadRequest {
|
|||||||
int64 size = 3;
|
int64 size = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadResponse carries byte data for a read request.
|
// ReadContentResponse carries byte data for a read request.
|
||||||
message ReadResponse {
|
message ReadContentResponse {
|
||||||
int64 offset = 1; // offset of the returned data
|
int64 offset = 1; // offset of the returned data
|
||||||
bytes data = 2; // actual data
|
bytes data = 2; // actual data
|
||||||
}
|
}
|
||||||
|
|
||||||
message StatusRequest {
|
|
||||||
string regexp = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Status {
|
message Status {
|
||||||
google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
@ -126,7 +169,20 @@ message Status {
|
|||||||
string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
message StatusRequest {
|
||||||
|
string ref = 1;
|
||||||
|
}
|
||||||
|
|
||||||
message StatusResponse {
|
message StatusResponse {
|
||||||
|
Status status = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListStatusesRequest {
|
||||||
|
repeated string filters = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListStatusesResponse {
|
||||||
repeated Status statuses = 1 [(gogoproto.nullable) = false];
|
repeated Status statuses = 1 [(gogoproto.nullable) = false];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,8 +213,8 @@ enum WriteAction {
|
|||||||
COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"];
|
COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"];
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteRequest writes data to the request ref at offset.
|
// WriteContentRequest writes data to the request ref at offset.
|
||||||
message WriteRequest {
|
message WriteContentRequest {
|
||||||
// Action sets the behavior of the write.
|
// Action sets the behavior of the write.
|
||||||
//
|
//
|
||||||
// When this is a write and the ref is not yet allocated, the ref will be
|
// When this is a write and the ref is not yet allocated, the ref will be
|
||||||
@ -215,8 +271,8 @@ message WriteRequest {
|
|||||||
bytes data = 6;
|
bytes data = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteResponse is returned on the culmination of a write call.
|
// WriteContentResponse is returned on the culmination of a write call.
|
||||||
message WriteResponse {
|
message WriteContentResponse {
|
||||||
// Action contains the action for the final message of the stream. A writer
|
// Action contains the action for the final message of the stream. A writer
|
||||||
// should confirm that they match the intended result.
|
// should confirm that they match the intended result.
|
||||||
WriteAction action = 1;
|
WriteAction action = 1;
|
@ -1,12 +1,12 @@
|
|||||||
// Code generated by protoc-gen-gogo.
|
// Code generated by protoc-gen-gogo.
|
||||||
// source: github.com/containerd/containerd/api/services/diff/diff.proto
|
// source: github.com/containerd/containerd/api/services/diff/v1/diff.proto
|
||||||
// DO NOT EDIT!
|
// DO NOT EDIT!
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package diff is a generated protocol buffer package.
|
Package diff is a generated protocol buffer package.
|
||||||
|
|
||||||
It is generated from these files:
|
It is generated from these files:
|
||||||
github.com/containerd/containerd/api/services/diff/diff.proto
|
github.com/containerd/containerd/api/services/diff/v1/diff.proto
|
||||||
|
|
||||||
It has these top-level messages:
|
It has these top-level messages:
|
||||||
ApplyRequest
|
ApplyRequest
|
||||||
@ -22,8 +22,8 @@ import math "math"
|
|||||||
import _ "github.com/gogo/protobuf/gogoproto"
|
import _ "github.com/gogo/protobuf/gogoproto"
|
||||||
import _ "github.com/golang/protobuf/ptypes/empty"
|
import _ "github.com/golang/protobuf/ptypes/empty"
|
||||||
import _ "github.com/gogo/protobuf/types"
|
import _ "github.com/gogo/protobuf/types"
|
||||||
import containerd_v1_types "github.com/containerd/containerd/api/types/mount"
|
import containerd_types "github.com/containerd/containerd/api/types"
|
||||||
import containerd_v1_types1 "github.com/containerd/containerd/api/types/descriptor"
|
import containerd_types1 "github.com/containerd/containerd/api/types"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
context "golang.org/x/net/context"
|
context "golang.org/x/net/context"
|
||||||
@ -48,8 +48,8 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
|||||||
|
|
||||||
type ApplyRequest struct {
|
type ApplyRequest struct {
|
||||||
// Diff is the descriptor of the diff to be extracted
|
// Diff is the descriptor of the diff to be extracted
|
||||||
Diff *containerd_v1_types1.Descriptor `protobuf:"bytes,1,opt,name=diff" json:"diff,omitempty"`
|
Diff *containerd_types1.Descriptor `protobuf:"bytes,1,opt,name=diff" json:"diff,omitempty"`
|
||||||
Mounts []*containerd_v1_types.Mount `protobuf:"bytes,2,rep,name=mounts" json:"mounts,omitempty"`
|
Mounts []*containerd_types.Mount `protobuf:"bytes,2,rep,name=mounts" json:"mounts,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ApplyRequest) Reset() { *m = ApplyRequest{} }
|
func (m *ApplyRequest) Reset() { *m = ApplyRequest{} }
|
||||||
@ -60,7 +60,7 @@ type ApplyResponse struct {
|
|||||||
// Applied is the descriptor for the object which was applied.
|
// Applied is the descriptor for the object which was applied.
|
||||||
// If the input was a compressed blob then the result will be
|
// If the input was a compressed blob then the result will be
|
||||||
// the descriptor for the uncompressed blob.
|
// the descriptor for the uncompressed blob.
|
||||||
Applied *containerd_v1_types1.Descriptor `protobuf:"bytes,1,opt,name=applied" json:"applied,omitempty"`
|
Applied *containerd_types1.Descriptor `protobuf:"bytes,1,opt,name=applied" json:"applied,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ApplyResponse) Reset() { *m = ApplyResponse{} }
|
func (m *ApplyResponse) Reset() { *m = ApplyResponse{} }
|
||||||
@ -70,16 +70,16 @@ func (*ApplyResponse) Descriptor() ([]byte, []int) { return fileDescriptorDiff,
|
|||||||
type DiffRequest struct {
|
type DiffRequest struct {
|
||||||
// Left are the mounts which represent the older copy
|
// Left are the mounts which represent the older copy
|
||||||
// in which is the base of the computed changes.
|
// in which is the base of the computed changes.
|
||||||
Left []*containerd_v1_types.Mount `protobuf:"bytes,1,rep,name=left" json:"left,omitempty"`
|
Left []*containerd_types.Mount `protobuf:"bytes,1,rep,name=left" json:"left,omitempty"`
|
||||||
// Right are the mounts which represents the newer copy
|
// Right are the mounts which represents the newer copy
|
||||||
// in which changes from the left were made into.
|
// in which changes from the left were made into.
|
||||||
Right []*containerd_v1_types.Mount `protobuf:"bytes,2,rep,name=right" json:"right,omitempty"`
|
Right []*containerd_types.Mount `protobuf:"bytes,2,rep,name=right" json:"right,omitempty"`
|
||||||
// MediaType is the media type descriptor for the created diff
|
// MediaType is the media type descriptor for the created diff
|
||||||
// object
|
// object
|
||||||
MediaType string `protobuf:"bytes,3,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
|
MediaType string `protobuf:"bytes,3,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
|
||||||
// Ref identifies the pre-commit content store object. This
|
// Ref identifies the pre-commit content store object. This
|
||||||
// reference can be used to get the status from the content store.
|
// reference can be used to get the status from the content store.
|
||||||
Ref string `protobuf:"bytes,5,opt,name=ref,proto3" json:"ref,omitempty"`
|
Ref string `protobuf:"bytes,4,opt,name=ref,proto3" json:"ref,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *DiffRequest) Reset() { *m = DiffRequest{} }
|
func (m *DiffRequest) Reset() { *m = DiffRequest{} }
|
||||||
@ -88,7 +88,7 @@ func (*DiffRequest) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []
|
|||||||
|
|
||||||
type DiffResponse struct {
|
type DiffResponse struct {
|
||||||
// Diff is the descriptor of the diff which can be applied
|
// Diff is the descriptor of the diff which can be applied
|
||||||
Diff *containerd_v1_types1.Descriptor `protobuf:"bytes,3,opt,name=diff" json:"diff,omitempty"`
|
Diff *containerd_types1.Descriptor `protobuf:"bytes,3,opt,name=diff" json:"diff,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *DiffResponse) Reset() { *m = DiffResponse{} }
|
func (m *DiffResponse) Reset() { *m = DiffResponse{} }
|
||||||
@ -96,10 +96,10 @@ func (*DiffResponse) ProtoMessage() {}
|
|||||||
func (*DiffResponse) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{3} }
|
func (*DiffResponse) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{3} }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*ApplyRequest)(nil), "containerd.v1.ApplyRequest")
|
proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest")
|
||||||
proto.RegisterType((*ApplyResponse)(nil), "containerd.v1.ApplyResponse")
|
proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse")
|
||||||
proto.RegisterType((*DiffRequest)(nil), "containerd.v1.DiffRequest")
|
proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest")
|
||||||
proto.RegisterType((*DiffResponse)(nil), "containerd.v1.DiffResponse")
|
proto.RegisterType((*DiffResponse)(nil), "containerd.services.diff.v1.DiffResponse")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@ -132,7 +132,7 @@ func NewDiffClient(cc *grpc.ClientConn) DiffClient {
|
|||||||
|
|
||||||
func (c *diffClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) {
|
func (c *diffClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) {
|
||||||
out := new(ApplyResponse)
|
out := new(ApplyResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.Diff/Apply", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Apply", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -141,7 +141,7 @@ func (c *diffClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.C
|
|||||||
|
|
||||||
func (c *diffClient) Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) {
|
func (c *diffClient) Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) {
|
||||||
out := new(DiffResponse)
|
out := new(DiffResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.Diff/Diff", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Diff", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -174,7 +174,7 @@ func _Diff_Apply_Handler(srv interface{}, ctx context.Context, dec func(interfac
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.Diff/Apply",
|
FullMethod: "/containerd.services.diff.v1.Diff/Apply",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(DiffServer).Apply(ctx, req.(*ApplyRequest))
|
return srv.(DiffServer).Apply(ctx, req.(*ApplyRequest))
|
||||||
@ -192,7 +192,7 @@ func _Diff_Diff_Handler(srv interface{}, ctx context.Context, dec func(interface
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.Diff/Diff",
|
FullMethod: "/containerd.services.diff.v1.Diff/Diff",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(DiffServer).Diff(ctx, req.(*DiffRequest))
|
return srv.(DiffServer).Diff(ctx, req.(*DiffRequest))
|
||||||
@ -201,7 +201,7 @@ func _Diff_Diff_Handler(srv interface{}, ctx context.Context, dec func(interface
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _Diff_serviceDesc = grpc.ServiceDesc{
|
var _Diff_serviceDesc = grpc.ServiceDesc{
|
||||||
ServiceName: "containerd.v1.Diff",
|
ServiceName: "containerd.services.diff.v1.Diff",
|
||||||
HandlerType: (*DiffServer)(nil),
|
HandlerType: (*DiffServer)(nil),
|
||||||
Methods: []grpc.MethodDesc{
|
Methods: []grpc.MethodDesc{
|
||||||
{
|
{
|
||||||
@ -214,7 +214,7 @@ var _Diff_serviceDesc = grpc.ServiceDesc{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Streams: []grpc.StreamDesc{},
|
Streams: []grpc.StreamDesc{},
|
||||||
Metadata: "github.com/containerd/containerd/api/services/diff/diff.proto",
|
Metadata: "github.com/containerd/containerd/api/services/diff/v1/diff.proto",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ApplyRequest) Marshal() (dAtA []byte, err error) {
|
func (m *ApplyRequest) Marshal() (dAtA []byte, err error) {
|
||||||
@ -331,7 +331,7 @@ func (m *DiffRequest) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
i += copy(dAtA[i:], m.MediaType)
|
i += copy(dAtA[i:], m.MediaType)
|
||||||
}
|
}
|
||||||
if len(m.Ref) > 0 {
|
if len(m.Ref) > 0 {
|
||||||
dAtA[i] = 0x2a
|
dAtA[i] = 0x22
|
||||||
i++
|
i++
|
||||||
i = encodeVarintDiff(dAtA, i, uint64(len(m.Ref)))
|
i = encodeVarintDiff(dAtA, i, uint64(len(m.Ref)))
|
||||||
i += copy(dAtA[i:], m.Ref)
|
i += copy(dAtA[i:], m.Ref)
|
||||||
@ -474,8 +474,8 @@ func (this *ApplyRequest) String() string {
|
|||||||
return "nil"
|
return "nil"
|
||||||
}
|
}
|
||||||
s := strings.Join([]string{`&ApplyRequest{`,
|
s := strings.Join([]string{`&ApplyRequest{`,
|
||||||
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "containerd_v1_types1.Descriptor", 1) + `,`,
|
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
|
||||||
`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "containerd_v1_types.Mount", 1) + `,`,
|
`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "containerd_types.Mount", 1) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
@ -485,7 +485,7 @@ func (this *ApplyResponse) String() string {
|
|||||||
return "nil"
|
return "nil"
|
||||||
}
|
}
|
||||||
s := strings.Join([]string{`&ApplyResponse{`,
|
s := strings.Join([]string{`&ApplyResponse{`,
|
||||||
`Applied:` + strings.Replace(fmt.Sprintf("%v", this.Applied), "Descriptor", "containerd_v1_types1.Descriptor", 1) + `,`,
|
`Applied:` + strings.Replace(fmt.Sprintf("%v", this.Applied), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
@ -495,8 +495,8 @@ func (this *DiffRequest) String() string {
|
|||||||
return "nil"
|
return "nil"
|
||||||
}
|
}
|
||||||
s := strings.Join([]string{`&DiffRequest{`,
|
s := strings.Join([]string{`&DiffRequest{`,
|
||||||
`Left:` + strings.Replace(fmt.Sprintf("%v", this.Left), "Mount", "containerd_v1_types.Mount", 1) + `,`,
|
`Left:` + strings.Replace(fmt.Sprintf("%v", this.Left), "Mount", "containerd_types.Mount", 1) + `,`,
|
||||||
`Right:` + strings.Replace(fmt.Sprintf("%v", this.Right), "Mount", "containerd_v1_types.Mount", 1) + `,`,
|
`Right:` + strings.Replace(fmt.Sprintf("%v", this.Right), "Mount", "containerd_types.Mount", 1) + `,`,
|
||||||
`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
|
`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
|
||||||
`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
|
`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
@ -508,7 +508,7 @@ func (this *DiffResponse) String() string {
|
|||||||
return "nil"
|
return "nil"
|
||||||
}
|
}
|
||||||
s := strings.Join([]string{`&DiffResponse{`,
|
s := strings.Join([]string{`&DiffResponse{`,
|
||||||
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "containerd_v1_types1.Descriptor", 1) + `,`,
|
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
@ -577,7 +577,7 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if m.Diff == nil {
|
if m.Diff == nil {
|
||||||
m.Diff = &containerd_v1_types1.Descriptor{}
|
m.Diff = &containerd_types1.Descriptor{}
|
||||||
}
|
}
|
||||||
if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -609,7 +609,7 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Mounts = append(m.Mounts, &containerd_v1_types.Mount{})
|
m.Mounts = append(m.Mounts, &containerd_types.Mount{})
|
||||||
if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -691,7 +691,7 @@ func (m *ApplyResponse) Unmarshal(dAtA []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if m.Applied == nil {
|
if m.Applied == nil {
|
||||||
m.Applied = &containerd_v1_types1.Descriptor{}
|
m.Applied = &containerd_types1.Descriptor{}
|
||||||
}
|
}
|
||||||
if err := m.Applied.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
if err := m.Applied.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -773,7 +773,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Left = append(m.Left, &containerd_v1_types.Mount{})
|
m.Left = append(m.Left, &containerd_types.Mount{})
|
||||||
if err := m.Left[len(m.Left)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
if err := m.Left[len(m.Left)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -804,7 +804,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
|||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Right = append(m.Right, &containerd_v1_types.Mount{})
|
m.Right = append(m.Right, &containerd_types.Mount{})
|
||||||
if err := m.Right[len(m.Right)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
if err := m.Right[len(m.Right)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -838,7 +838,7 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
m.MediaType = string(dAtA[iNdEx:postIndex])
|
m.MediaType = string(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 5:
|
case 4:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
|
return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
|
||||||
}
|
}
|
||||||
@ -944,7 +944,7 @@ func (m *DiffResponse) Unmarshal(dAtA []byte) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if m.Diff == nil {
|
if m.Diff == nil {
|
||||||
m.Diff = &containerd_v1_types1.Descriptor{}
|
m.Diff = &containerd_types1.Descriptor{}
|
||||||
}
|
}
|
||||||
if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1077,35 +1077,36 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/services/diff/diff.proto", fileDescriptorDiff)
|
proto.RegisterFile("github.com/containerd/containerd/api/services/diff/v1/diff.proto", fileDescriptorDiff)
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptorDiff = []byte{
|
var fileDescriptorDiff = []byte{
|
||||||
// 407 bytes of a gzipped FileDescriptorProto
|
// 427 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0xcf, 0x93, 0x40,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0x31, 0x6f, 0xd4, 0x30,
|
||||||
0x10, 0xfd, 0x56, 0xda, 0xcf, 0x74, 0xdb, 0x26, 0x66, 0xe3, 0x81, 0x50, 0xa5, 0x0d, 0xa7, 0x9e,
|
0x14, 0xc7, 0x6b, 0xee, 0x5a, 0x54, 0x5f, 0x91, 0x90, 0x85, 0x44, 0x94, 0x42, 0x38, 0x65, 0x4a,
|
||||||
0x40, 0xe9, 0xc9, 0x44, 0x63, 0xac, 0x8d, 0x07, 0x13, 0x2f, 0xc4, 0xbb, 0xa1, 0x30, 0xd0, 0x4d,
|
0x41, 0xd8, 0xf4, 0x90, 0x3a, 0xd0, 0xa5, 0xa0, 0x4a, 0x4c, 0x2c, 0x51, 0x27, 0x90, 0x40, 0xb9,
|
||||||
0x80, 0x5d, 0xd9, 0xa5, 0x86, 0x9b, 0x77, 0xff, 0x84, 0x3f, 0xa7, 0x47, 0x8f, 0x1e, 0x2d, 0xbf,
|
0xe4, 0x25, 0xb5, 0x94, 0xc4, 0x6e, 0xec, 0x9c, 0x94, 0x8d, 0xcf, 0xc1, 0xd7, 0x61, 0xe9, 0xc8,
|
||||||
0xc4, 0xb0, 0x6c, 0x15, 0x9b, 0x26, 0xb6, 0x97, 0xcd, 0xb0, 0xef, 0xcd, 0x9b, 0x37, 0x8f, 0xc5,
|
0xc8, 0x48, 0xf3, 0x49, 0x50, 0x1c, 0x07, 0x22, 0x90, 0x8e, 0xd0, 0xc9, 0x2f, 0x7e, 0xbf, 0xff,
|
||||||
0xaf, 0x52, 0x2a, 0x77, 0xd5, 0xd6, 0x8d, 0x58, 0xee, 0x45, 0xac, 0x90, 0x21, 0x2d, 0xa0, 0x8c,
|
0x7b, 0x7f, 0xbf, 0xd8, 0xf8, 0x2c, 0xe3, 0xfa, 0xb2, 0x5e, 0xd3, 0x58, 0x14, 0x2c, 0x16, 0xa5,
|
||||||
0xfb, 0x65, 0xc8, 0xa9, 0x27, 0xa0, 0xdc, 0xd3, 0x08, 0x84, 0x17, 0xd3, 0x24, 0x51, 0x87, 0xcb,
|
0x8e, 0x78, 0x09, 0x55, 0x32, 0x0e, 0x23, 0xc9, 0x99, 0x82, 0x6a, 0xc3, 0x63, 0x50, 0x2c, 0xe1,
|
||||||
0x4b, 0x26, 0x19, 0x99, 0xfe, 0x25, 0xba, 0xfb, 0xe7, 0xd6, 0xe3, 0x94, 0xa5, 0x4c, 0x21, 0x5e,
|
0x69, 0xca, 0x36, 0xc7, 0x66, 0xa5, 0xb2, 0x12, 0x5a, 0x90, 0xc3, 0xdf, 0x2c, 0x1d, 0x38, 0x6a,
|
||||||
0x5b, 0x75, 0x24, 0x6b, 0x96, 0x32, 0x96, 0x66, 0xe0, 0xa9, 0xaf, 0x6d, 0x95, 0x78, 0x90, 0x73,
|
0xf2, 0x9b, 0x63, 0xf7, 0x41, 0x26, 0x32, 0x61, 0x38, 0xd6, 0x45, 0xbd, 0xc4, 0x3d, 0xcc, 0x84,
|
||||||
0x59, 0x6b, 0x70, 0x7e, 0x0e, 0x4a, 0x9a, 0x83, 0x90, 0x61, 0xce, 0x35, 0xe1, 0xe5, 0x55, 0x0e,
|
0xc8, 0x72, 0x60, 0xe6, 0x6b, 0x5d, 0xa7, 0x0c, 0x0a, 0xa9, 0x1b, 0x9b, 0x7c, 0xf2, 0x67, 0x52,
|
||||||
0x65, 0xcd, 0x41, 0x78, 0x39, 0xab, 0x0a, 0xd9, 0x9d, 0xba, 0xfb, 0xdd, 0x0d, 0xdd, 0x31, 0x88,
|
0xf3, 0x02, 0x94, 0x8e, 0x0a, 0x69, 0x81, 0x93, 0x49, 0x96, 0x75, 0x23, 0x41, 0xb1, 0x42, 0xd4,
|
||||||
0xa8, 0xa4, 0x5c, 0xb2, 0xb2, 0x57, 0x76, 0x3a, 0xce, 0x17, 0x3c, 0x79, 0xc3, 0x79, 0x56, 0x07,
|
0xa5, 0xb6, 0xba, 0xd3, 0xff, 0xd0, 0x25, 0xa0, 0xe2, 0x8a, 0x4b, 0x2d, 0xaa, 0x5e, 0xec, 0x5f,
|
||||||
0xf0, 0xb9, 0x02, 0x21, 0xc9, 0x0a, 0x0f, 0xda, 0x18, 0x4c, 0xb4, 0x40, 0xcb, 0xb1, 0x3f, 0x77,
|
0xe1, 0x83, 0xd7, 0x52, 0xe6, 0x4d, 0x08, 0x57, 0x35, 0x28, 0x4d, 0x5e, 0xe0, 0x79, 0x77, 0x46,
|
||||||
0xff, 0xc9, 0xc1, 0x55, 0x7a, 0xee, 0xe6, 0x8f, 0x48, 0xa0, 0xc8, 0xc4, 0xc7, 0xf7, 0xca, 0x9b,
|
0x07, 0x2d, 0x51, 0xb0, 0x58, 0x3d, 0xa2, 0xa3, 0x21, 0x98, 0x0a, 0xf4, 0xfc, 0x57, 0x85, 0xd0,
|
||||||
0x30, 0x1f, 0x2c, 0x8c, 0xe5, 0xd8, 0xb7, 0x2e, 0xb6, 0x7d, 0x68, 0x29, 0x81, 0x66, 0x3a, 0xef,
|
0x90, 0x84, 0xe1, 0x3d, 0xe3, 0x46, 0x39, 0x77, 0x96, 0xb3, 0x60, 0xb1, 0x7a, 0xf8, 0xb7, 0xe6,
|
||||||
0xf1, 0x54, 0x0f, 0x16, 0x9c, 0x15, 0x02, 0xc8, 0x0b, 0xfc, 0x30, 0xe4, 0x3c, 0xa3, 0x10, 0x5f,
|
0x5d, 0x97, 0x0f, 0x2d, 0xe6, 0xbf, 0xc5, 0xf7, 0x6c, 0x4b, 0x25, 0x45, 0xa9, 0x80, 0x9c, 0xe0,
|
||||||
0x3b, 0xfc, 0xc4, 0x77, 0xbe, 0x23, 0x3c, 0xde, 0xd0, 0x24, 0x39, 0x2d, 0xe1, 0xe2, 0x41, 0x06,
|
0xbb, 0x91, 0x94, 0x39, 0x87, 0x64, 0x52, 0xdb, 0x01, 0xf6, 0xbf, 0x20, 0xbc, 0x38, 0xe7, 0x69,
|
||||||
0x89, 0x34, 0xd1, 0x7f, 0xdd, 0x28, 0x1e, 0x79, 0x86, 0x87, 0x25, 0x4d, 0x77, 0xf2, 0x0a, 0xfb,
|
0x3a, 0x78, 0x7f, 0x86, 0xe7, 0x39, 0xa4, 0xda, 0x41, 0xdb, 0x7d, 0x18, 0x88, 0x3c, 0xc7, 0xbb,
|
||||||
0x1d, 0x91, 0x3c, 0xc5, 0x38, 0x87, 0x98, 0x86, 0x9f, 0x5a, 0xcc, 0x34, 0x16, 0x68, 0x39, 0x0a,
|
0x15, 0xcf, 0x2e, 0xf5, 0xbf, 0x5c, 0xf7, 0x14, 0x79, 0x8c, 0x71, 0x01, 0x09, 0x8f, 0x3e, 0x75,
|
||||||
0x46, 0xea, 0xe6, 0x63, 0xcd, 0x81, 0x3c, 0xc2, 0x46, 0x09, 0x89, 0x39, 0x54, 0xf7, 0x6d, 0xe9,
|
0x39, 0x67, 0xb6, 0x44, 0xc1, 0x7e, 0xb8, 0x6f, 0x76, 0x2e, 0x1a, 0x09, 0xe4, 0x3e, 0x9e, 0x55,
|
||||||
0xbc, 0xc5, 0x93, 0xce, 0xa1, 0xde, 0xf6, 0x94, 0xb3, 0x71, 0x43, 0xce, 0xfe, 0x37, 0x84, 0x07,
|
0x90, 0x3a, 0x73, 0xb3, 0xdf, 0x85, 0xfe, 0x19, 0x3e, 0xe8, 0xbd, 0xd9, 0x43, 0x0e, 0x83, 0x9d,
|
||||||
0xad, 0x0a, 0x59, 0xe3, 0xa1, 0x0a, 0x8f, 0xcc, 0xce, 0x1a, 0xfb, 0xff, 0xd2, 0x7a, 0x72, 0x19,
|
0x4d, 0x1d, 0xec, 0xea, 0x2b, 0xc2, 0xf3, 0xae, 0x04, 0xf9, 0x88, 0x77, 0xcd, 0xc0, 0xc8, 0x11,
|
||||||
0xd4, 0x0e, 0x5e, 0x6b, 0xad, 0xf3, 0x6d, 0x7b, 0x41, 0x5a, 0xb3, 0x8b, 0x58, 0x27, 0xb0, 0x36,
|
0xdd, 0x72, 0x27, 0xe9, 0xf8, 0x3f, 0xba, 0x4f, 0xa7, 0xa0, 0xd6, 0xda, 0x07, 0xdb, 0x27, 0xd8,
|
||||||
0x0f, 0x47, 0xfb, 0xee, 0xe7, 0xd1, 0xbe, 0xfb, 0xda, 0xd8, 0xe8, 0xd0, 0xd8, 0xe8, 0x47, 0x63,
|
0xaa, 0x19, 0x4d, 0xda, 0x3d, 0x9a, 0x40, 0xf6, 0xc5, 0xdf, 0x5c, 0x5c, 0xdf, 0x78, 0x3b, 0xdf,
|
||||||
0xa3, 0x5f, 0x8d, 0x8d, 0xb6, 0xf7, 0xea, 0x6d, 0xad, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0x61,
|
0x6f, 0xbc, 0x9d, 0xcf, 0xad, 0x87, 0xae, 0x5b, 0x0f, 0x7d, 0x6b, 0x3d, 0xf4, 0xa3, 0xf5, 0xd0,
|
||||||
0x65, 0x17, 0x47, 0x85, 0x03, 0x00, 0x00,
|
0xfb, 0x57, 0xb7, 0x7a, 0xa2, 0xa7, 0xdd, 0xba, 0xde, 0x33, 0xb7, 0xf7, 0xe5, 0xcf, 0x00, 0x00,
|
||||||
|
0x00, 0xff, 0xff, 0x44, 0x8b, 0x75, 0x5d, 0xe7, 0x03, 0x00, 0x00,
|
||||||
}
|
}
|
@ -1,12 +1,14 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
package containerd.v1;
|
package containerd.services.diff.v1;
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
import "google/protobuf/empty.proto";
|
import "google/protobuf/empty.proto";
|
||||||
import "google/protobuf/timestamp.proto";
|
import "google/protobuf/timestamp.proto";
|
||||||
import "github.com/containerd/containerd/api/types/mount/mount.proto";
|
import "github.com/containerd/containerd/api/types/mount.proto";
|
||||||
import "github.com/containerd/containerd/api/types/descriptor/descriptor.proto";
|
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/diff/v1;diff";
|
||||||
|
|
||||||
// Diff service creates and applies diffs
|
// Diff service creates and applies diffs
|
||||||
service Diff {
|
service Diff {
|
||||||
@ -22,26 +24,26 @@ service Diff {
|
|||||||
|
|
||||||
message ApplyRequest {
|
message ApplyRequest {
|
||||||
// Diff is the descriptor of the diff to be extracted
|
// Diff is the descriptor of the diff to be extracted
|
||||||
containerd.v1.types.Descriptor diff = 1;
|
containerd.types.Descriptor diff = 1;
|
||||||
|
|
||||||
repeated containerd.v1.types.Mount mounts = 2;
|
repeated containerd.types.Mount mounts = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ApplyResponse {
|
message ApplyResponse {
|
||||||
// Applied is the descriptor for the object which was applied.
|
// Applied is the descriptor for the object which was applied.
|
||||||
// If the input was a compressed blob then the result will be
|
// If the input was a compressed blob then the result will be
|
||||||
// the descriptor for the uncompressed blob.
|
// the descriptor for the uncompressed blob.
|
||||||
containerd.v1.types.Descriptor applied = 1;
|
containerd.types.Descriptor applied = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message DiffRequest {
|
message DiffRequest {
|
||||||
// Left are the mounts which represent the older copy
|
// Left are the mounts which represent the older copy
|
||||||
// in which is the base of the computed changes.
|
// in which is the base of the computed changes.
|
||||||
repeated containerd.v1.types.Mount left = 1;
|
repeated containerd.types.Mount left = 1;
|
||||||
|
|
||||||
// Right are the mounts which represents the newer copy
|
// Right are the mounts which represents the newer copy
|
||||||
// in which changes from the left were made into.
|
// in which changes from the left were made into.
|
||||||
repeated containerd.v1.types.Mount right = 2;
|
repeated containerd.types.Mount right = 2;
|
||||||
|
|
||||||
// MediaType is the media type descriptor for the created diff
|
// MediaType is the media type descriptor for the created diff
|
||||||
// object
|
// object
|
||||||
@ -49,10 +51,10 @@ message DiffRequest {
|
|||||||
|
|
||||||
// Ref identifies the pre-commit content store object. This
|
// Ref identifies the pre-commit content store object. This
|
||||||
// reference can be used to get the status from the content store.
|
// reference can be used to get the status from the content store.
|
||||||
string ref = 5;
|
string ref = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
message DiffResponse {
|
message DiffResponse {
|
||||||
// Diff is the descriptor of the diff which can be applied
|
// Diff is the descriptor of the diff which can be applied
|
||||||
containerd.v1.types.Descriptor diff = 3;
|
containerd.types.Descriptor diff = 3;
|
||||||
}
|
}
|
1161
vendor/github.com/containerd/containerd/api/services/events/v1/container.pb.go
generated
vendored
Normal file
1161
vendor/github.com/containerd/containerd/api/services/events/v1/container.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
29
vendor/github.com/containerd/containerd/api/services/events/v1/container.proto
generated
vendored
Normal file
29
vendor/github.com/containerd/containerd/api/services/events/v1/container.proto
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.events.v1;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
import "google/protobuf/any.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
|
||||||
|
|
||||||
|
message ContainerCreate {
|
||||||
|
string id = 1;
|
||||||
|
string image = 2;
|
||||||
|
message Runtime {
|
||||||
|
string name = 1;
|
||||||
|
google.protobuf.Any options = 2;
|
||||||
|
}
|
||||||
|
Runtime runtime = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ContainerUpdate {
|
||||||
|
string id = 1;
|
||||||
|
string image = 2;
|
||||||
|
map<string, string> labels = 3;
|
||||||
|
string rootfs = 4 [(gogoproto.customname) = "RootFS"];
|
||||||
|
}
|
||||||
|
|
||||||
|
message ContainerDelete {
|
||||||
|
string id = 1;
|
||||||
|
}
|
331
vendor/github.com/containerd/containerd/api/services/events/v1/content.pb.go
generated
vendored
Normal file
331
vendor/github.com/containerd/containerd/api/services/events/v1/content.pb.go
generated
vendored
Normal file
@ -0,0 +1,331 @@
|
|||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: github.com/containerd/containerd/api/services/events/v1/content.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package events
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
import _ "github.com/gogo/protobuf/gogoproto"
|
||||||
|
|
||||||
|
import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
type ContentDelete struct {
|
||||||
|
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ContentDelete) Reset() { *m = ContentDelete{} }
|
||||||
|
func (*ContentDelete) ProtoMessage() {}
|
||||||
|
func (*ContentDelete) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{0} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*ContentDelete)(nil), "containerd.services.events.v1.ContentDelete")
|
||||||
|
}
|
||||||
|
func (m *ContentDelete) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ContentDelete) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Digest) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
|
||||||
|
i += copy(dAtA[i:], m.Digest)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Content(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Content(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintContent(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *ContentDelete) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Digest)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovContent(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovContent(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozContent(x uint64) (n int) {
|
||||||
|
return sovContent(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *ContentDelete) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&ContentDelete{`,
|
||||||
|
`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringContent(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *ContentDelete) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: ContentDelete: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: ContentDelete: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthContent
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipContent(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthContent
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipContent(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthContent
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipContent(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowContent = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/content.proto", fileDescriptorContent)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptorContent = []byte{
|
||||||
|
// 210 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4d, 0xcf, 0x2c, 0xc9,
|
||||||
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||||
|
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
||||||
|
0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0x82, 0x55, 0xa4, 0xe6, 0x95, 0xe8, 0x15,
|
||||||
|
0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x34, 0xe8, 0xc1, 0x14, 0xeb, 0x41, 0x14, 0xeb, 0x95,
|
||||||
|
0x19, 0x4a, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x55, 0xea, 0x83, 0x58, 0x10, 0x4d, 0x4a, 0xd1,
|
||||||
|
0x5c, 0xbc, 0xce, 0x10, 0x53, 0x5c, 0x52, 0x73, 0x52, 0x4b, 0x52, 0x85, 0xbc, 0xb8, 0xd8, 0x52,
|
||||||
|
0x32, 0xd3, 0x53, 0x8b, 0x4b, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x9d, 0x8c, 0x4e, 0xdc, 0x93,
|
||||||
|
0x67, 0xb8, 0x75, 0x4f, 0x5e, 0x0b, 0xc9, 0x91, 0xf9, 0x05, 0xa9, 0x79, 0x70, 0xcb, 0x8a, 0xf5,
|
||||||
|
0xd3, 0xf3, 0x75, 0x21, 0x5a, 0xf4, 0x5c, 0xc0, 0x54, 0x10, 0xd4, 0x04, 0xa7, 0x88, 0x13, 0x0f,
|
||||||
|
0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85,
|
||||||
|
0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x46, 0xd9, 0x91, 0xe9, 0x65, 0x6b, 0x08, 0x2b, 0x89,
|
||||||
|
0x0d, 0xec, 0x7a, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x41, 0x46, 0x06, 0x3b, 0x01,
|
||||||
|
0x00, 0x00,
|
||||||
|
}
|
11
vendor/github.com/containerd/containerd/api/services/events/v1/content.proto
generated
vendored
Normal file
11
vendor/github.com/containerd/containerd/api/services/events/v1/content.proto
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.events.v1;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
|
||||||
|
|
||||||
|
message ContentDelete {
|
||||||
|
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
|
}
|
903
vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
generated
vendored
Normal file
903
vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
generated
vendored
Normal file
@ -0,0 +1,903 @@
|
|||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: github.com/containerd/containerd/api/services/events/v1/events.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package events
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
import _ "github.com/gogo/protobuf/gogoproto"
|
||||||
|
import google_protobuf1 "github.com/gogo/protobuf/types"
|
||||||
|
import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
|
||||||
|
import _ "github.com/gogo/protobuf/types"
|
||||||
|
|
||||||
|
import time "time"
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "golang.org/x/net/context"
|
||||||
|
grpc "google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
var _ = time.Kitchen
|
||||||
|
|
||||||
|
type SubscribeRequest struct {
|
||||||
|
Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SubscribeRequest) Reset() { *m = SubscribeRequest{} }
|
||||||
|
func (*SubscribeRequest) ProtoMessage() {}
|
||||||
|
func (*SubscribeRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{0} }
|
||||||
|
|
||||||
|
type PublishRequest struct {
|
||||||
|
Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope" json:"envelope,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *PublishRequest) Reset() { *m = PublishRequest{} }
|
||||||
|
func (*PublishRequest) ProtoMessage() {}
|
||||||
|
func (*PublishRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{1} }
|
||||||
|
|
||||||
|
type Envelope struct {
|
||||||
|
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,stdtime" json:"timestamp"`
|
||||||
|
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||||
|
Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"`
|
||||||
|
Event *google_protobuf1.Any `protobuf:"bytes,4,opt,name=event" json:"event,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Envelope) Reset() { *m = Envelope{} }
|
||||||
|
func (*Envelope) ProtoMessage() {}
|
||||||
|
func (*Envelope) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{2} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*SubscribeRequest)(nil), "containerd.services.events.v1.SubscribeRequest")
|
||||||
|
proto.RegisterType((*PublishRequest)(nil), "containerd.services.events.v1.PublishRequest")
|
||||||
|
proto.RegisterType((*Envelope)(nil), "containerd.services.events.v1.Envelope")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ context.Context
|
||||||
|
var _ grpc.ClientConn
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the grpc package it is being compiled against.
|
||||||
|
const _ = grpc.SupportPackageIsVersion4
|
||||||
|
|
||||||
|
// Client API for Events service
|
||||||
|
|
||||||
|
type EventsClient interface {
|
||||||
|
Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
|
||||||
|
Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventsClient struct {
|
||||||
|
cc *grpc.ClientConn
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEventsClient(cc *grpc.ClientConn) EventsClient {
|
||||||
|
return &eventsClient{cc}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *eventsClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
|
||||||
|
out := new(google_protobuf2.Empty)
|
||||||
|
err := grpc.Invoke(ctx, "/containerd.services.events.v1.Events/Publish", in, out, c.cc, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *eventsClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error) {
|
||||||
|
stream, err := grpc.NewClientStream(ctx, &_Events_serviceDesc.Streams[0], c.cc, "/containerd.services.events.v1.Events/Subscribe", opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
x := &eventsSubscribeClient{stream}
|
||||||
|
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := x.ClientStream.CloseSend(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Events_SubscribeClient interface {
|
||||||
|
Recv() (*Envelope, error)
|
||||||
|
grpc.ClientStream
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventsSubscribeClient struct {
|
||||||
|
grpc.ClientStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *eventsSubscribeClient) Recv() (*Envelope, error) {
|
||||||
|
m := new(Envelope)
|
||||||
|
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Server API for Events service
|
||||||
|
|
||||||
|
type EventsServer interface {
|
||||||
|
Publish(context.Context, *PublishRequest) (*google_protobuf2.Empty, error)
|
||||||
|
Subscribe(*SubscribeRequest, Events_SubscribeServer) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegisterEventsServer(s *grpc.Server, srv EventsServer) {
|
||||||
|
s.RegisterService(&_Events_serviceDesc, srv)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Events_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(PublishRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(EventsServer).Publish(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: "/containerd.services.events.v1.Events/Publish",
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(EventsServer).Publish(ctx, req.(*PublishRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Events_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||||
|
m := new(SubscribeRequest)
|
||||||
|
if err := stream.RecvMsg(m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return srv.(EventsServer).Subscribe(m, &eventsSubscribeServer{stream})
|
||||||
|
}
|
||||||
|
|
||||||
|
type Events_SubscribeServer interface {
|
||||||
|
Send(*Envelope) error
|
||||||
|
grpc.ServerStream
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventsSubscribeServer struct {
|
||||||
|
grpc.ServerStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *eventsSubscribeServer) Send(m *Envelope) error {
|
||||||
|
return x.ServerStream.SendMsg(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _Events_serviceDesc = grpc.ServiceDesc{
|
||||||
|
ServiceName: "containerd.services.events.v1.Events",
|
||||||
|
HandlerType: (*EventsServer)(nil),
|
||||||
|
Methods: []grpc.MethodDesc{
|
||||||
|
{
|
||||||
|
MethodName: "Publish",
|
||||||
|
Handler: _Events_Publish_Handler,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Streams: []grpc.StreamDesc{
|
||||||
|
{
|
||||||
|
StreamName: "Subscribe",
|
||||||
|
Handler: _Events_Subscribe_Handler,
|
||||||
|
ServerStreams: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Metadata: "github.com/containerd/containerd/api/services/events/v1/events.proto",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SubscribeRequest) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SubscribeRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Filters) > 0 {
|
||||||
|
for _, s := range m.Filters {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
l = len(s)
|
||||||
|
for l >= 1<<7 {
|
||||||
|
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||||
|
l >>= 7
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
dAtA[i] = uint8(l)
|
||||||
|
i++
|
||||||
|
i += copy(dAtA[i:], s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *PublishRequest) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *PublishRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.Envelope != nil {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintEvents(dAtA, i, uint64(m.Envelope.Size()))
|
||||||
|
n1, err := m.Envelope.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n1
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Envelope) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Envelope) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintEvents(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
|
||||||
|
n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n2
|
||||||
|
if len(m.Namespace) > 0 {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintEvents(dAtA, i, uint64(len(m.Namespace)))
|
||||||
|
i += copy(dAtA[i:], m.Namespace)
|
||||||
|
}
|
||||||
|
if len(m.Topic) > 0 {
|
||||||
|
dAtA[i] = 0x1a
|
||||||
|
i++
|
||||||
|
i = encodeVarintEvents(dAtA, i, uint64(len(m.Topic)))
|
||||||
|
i += copy(dAtA[i:], m.Topic)
|
||||||
|
}
|
||||||
|
if m.Event != nil {
|
||||||
|
dAtA[i] = 0x22
|
||||||
|
i++
|
||||||
|
i = encodeVarintEvents(dAtA, i, uint64(m.Event.Size()))
|
||||||
|
n3, err := m.Event.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n3
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Events(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Events(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintEvents(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *SubscribeRequest) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Filters) > 0 {
|
||||||
|
for _, s := range m.Filters {
|
||||||
|
l = len(s)
|
||||||
|
n += 1 + l + sovEvents(uint64(l))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *PublishRequest) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.Envelope != nil {
|
||||||
|
l = m.Envelope.Size()
|
||||||
|
n += 1 + l + sovEvents(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Envelope) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
|
||||||
|
n += 1 + l + sovEvents(uint64(l))
|
||||||
|
l = len(m.Namespace)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovEvents(uint64(l))
|
||||||
|
}
|
||||||
|
l = len(m.Topic)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovEvents(uint64(l))
|
||||||
|
}
|
||||||
|
if m.Event != nil {
|
||||||
|
l = m.Event.Size()
|
||||||
|
n += 1 + l + sovEvents(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovEvents(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozEvents(x uint64) (n int) {
|
||||||
|
return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *SubscribeRequest) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&SubscribeRequest{`,
|
||||||
|
`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *PublishRequest) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&PublishRequest{`,
|
||||||
|
`Envelope:` + strings.Replace(fmt.Sprintf("%v", this.Envelope), "Envelope", "Envelope", 1) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *Envelope) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&Envelope{`,
|
||||||
|
`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||||
|
`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
|
||||||
|
`Topic:` + fmt.Sprintf("%v", this.Topic) + `,`,
|
||||||
|
`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "google_protobuf1.Any", 1) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringEvents(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *SubscribeRequest) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: SubscribeRequest: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: SubscribeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex]))
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipEvents(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *PublishRequest) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: PublishRequest: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: PublishRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Envelope", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if m.Envelope == nil {
|
||||||
|
m.Envelope = &Envelope{}
|
||||||
|
}
|
||||||
|
if err := m.Envelope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipEvents(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *Envelope) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: Envelope: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: Envelope: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Namespace = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 3:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Topic = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 4:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if m.Event == nil {
|
||||||
|
m.Event = &google_protobuf1.Any{}
|
||||||
|
}
|
||||||
|
if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipEvents(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipEvents(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthEvents
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowEvents
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipEvents(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/events.proto", fileDescriptorEvents)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptorEvents = []byte{
|
||||||
|
// 407 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xcd, 0x6e, 0xd3, 0x40,
|
||||||
|
0x10, 0xc7, 0xb3, 0x84, 0x7c, 0x78, 0x91, 0x10, 0x5a, 0x45, 0xc8, 0x18, 0x70, 0xa2, 0x5c, 0x88,
|
||||||
|
0x10, 0xec, 0x92, 0x70, 0x44, 0x42, 0x22, 0x90, 0x7b, 0x64, 0x40, 0x42, 0xdc, 0x6c, 0x77, 0xe2,
|
||||||
|
0xac, 0x64, 0x7b, 0x5d, 0xef, 0xda, 0x52, 0x6e, 0x7d, 0x84, 0x3e, 0x49, 0x5f, 0xa2, 0x97, 0x1c,
|
||||||
|
0x7b, 0xec, 0xa9, 0x6d, 0xfc, 0x24, 0x55, 0xfc, 0x91, 0xb4, 0x89, 0xd4, 0x54, 0xbd, 0xcd, 0xec,
|
||||||
|
0xff, 0x37, 0x3b, 0xfb, 0x9f, 0x59, 0xfc, 0xcb, 0xe3, 0x6a, 0x9e, 0x38, 0xd4, 0x15, 0x01, 0x73,
|
||||||
|
0x45, 0xa8, 0x6c, 0x1e, 0x42, 0x7c, 0x74, 0x37, 0xb4, 0x23, 0xce, 0x24, 0xc4, 0x29, 0x77, 0x41,
|
||||||
|
0x32, 0x48, 0x21, 0x54, 0x92, 0xa5, 0xc3, 0x32, 0xa2, 0x51, 0x2c, 0x94, 0x20, 0xef, 0xb7, 0x3c,
|
||||||
|
0xad, 0x58, 0x5a, 0x12, 0xe9, 0xd0, 0xe8, 0x78, 0xc2, 0x13, 0x39, 0xc9, 0xd6, 0x51, 0x51, 0x64,
|
||||||
|
0xbc, 0xf1, 0x84, 0xf0, 0x7c, 0x60, 0x79, 0xe6, 0x24, 0x33, 0x66, 0x87, 0x8b, 0x52, 0x7a, 0xbb,
|
||||||
|
0x2b, 0x41, 0x10, 0xa9, 0x4a, 0xec, 0xee, 0x8a, 0x8a, 0x07, 0x20, 0x95, 0x1d, 0x44, 0x05, 0xd0,
|
||||||
|
0xff, 0x84, 0x5f, 0xfd, 0x4e, 0x1c, 0xe9, 0xc6, 0xdc, 0x01, 0x0b, 0x8e, 0x13, 0x90, 0x8a, 0xe8,
|
||||||
|
0xb8, 0x35, 0xe3, 0xbe, 0x82, 0x58, 0xea, 0xa8, 0x57, 0x1f, 0x68, 0x56, 0x95, 0xf6, 0xff, 0xe2,
|
||||||
|
0x97, 0xd3, 0xc4, 0xf1, 0xb9, 0x9c, 0x57, 0xec, 0x4f, 0xdc, 0x86, 0x30, 0x05, 0x5f, 0x44, 0xa0,
|
||||||
|
0xa3, 0x1e, 0x1a, 0xbc, 0x18, 0x7d, 0xa0, 0x0f, 0x1a, 0xa4, 0x93, 0x12, 0xb7, 0x36, 0x85, 0xfd,
|
||||||
|
0x33, 0x84, 0xdb, 0xd5, 0x31, 0x19, 0x63, 0x6d, 0xf3, 0xc8, 0xf2, 0x4a, 0x83, 0x16, 0x36, 0x68,
|
||||||
|
0x65, 0x83, 0xfe, 0xa9, 0x88, 0x71, 0x7b, 0x79, 0xd5, 0xad, 0x9d, 0x5e, 0x77, 0x91, 0xb5, 0x2d,
|
||||||
|
0x23, 0xef, 0xb0, 0x16, 0xda, 0x01, 0xc8, 0xc8, 0x76, 0x41, 0x7f, 0xd6, 0x43, 0x03, 0xcd, 0xda,
|
||||||
|
0x1e, 0x90, 0x0e, 0x6e, 0x28, 0x11, 0x71, 0x57, 0xaf, 0xe7, 0x4a, 0x91, 0x90, 0x8f, 0xb8, 0x91,
|
||||||
|
0x3f, 0x52, 0x7f, 0x9e, 0xf7, 0xec, 0xec, 0xf5, 0xfc, 0x11, 0x2e, 0xac, 0x02, 0x19, 0x9d, 0x23,
|
||||||
|
0xdc, 0x9c, 0xe4, 0x8e, 0xc8, 0x14, 0xb7, 0xca, 0x91, 0x90, 0xcf, 0x07, 0x9c, 0xdf, 0x1f, 0x9d,
|
||||||
|
0xf1, 0x7a, 0xaf, 0xc3, 0x64, 0xbd, 0x39, 0xe2, 0x61, 0x6d, 0xb3, 0x12, 0xc2, 0x0e, 0xdc, 0xb9,
|
||||||
|
0xbb, 0x3c, 0xe3, 0xb1, 0xe3, 0xff, 0x82, 0xc6, 0xff, 0x96, 0x2b, 0xb3, 0x76, 0xb9, 0x32, 0x6b,
|
||||||
|
0x27, 0x99, 0x89, 0x96, 0x99, 0x89, 0x2e, 0x32, 0x13, 0xdd, 0x64, 0x26, 0xfa, 0xff, 0xfd, 0x89,
|
||||||
|
0x3f, 0xfd, 0x5b, 0x11, 0x39, 0xcd, 0xdc, 0xd2, 0xd7, 0xdb, 0x00, 0x00, 0x00, 0xff, 0xff, 0x13,
|
||||||
|
0x35, 0xd0, 0x60, 0x32, 0x03, 0x00, 0x00,
|
||||||
|
}
|
30
vendor/github.com/containerd/containerd/api/services/events/v1/events.proto
generated
vendored
Normal file
30
vendor/github.com/containerd/containerd/api/services/events/v1/events.proto
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.events.v1;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
import "google/protobuf/any.proto";
|
||||||
|
import "google/protobuf/empty.proto";
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
|
||||||
|
|
||||||
|
service Events {
|
||||||
|
rpc Publish(PublishRequest) returns (google.protobuf.Empty);
|
||||||
|
rpc Subscribe(SubscribeRequest) returns (stream Envelope);
|
||||||
|
}
|
||||||
|
|
||||||
|
message SubscribeRequest {
|
||||||
|
repeated string filters = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PublishRequest {
|
||||||
|
Envelope envelope = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Envelope {
|
||||||
|
google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
string namespace = 2;
|
||||||
|
string topic = 3;
|
||||||
|
google.protobuf.Any event = 4;
|
||||||
|
}
|
902
vendor/github.com/containerd/containerd/api/services/events/v1/image.pb.go
generated
vendored
Normal file
902
vendor/github.com/containerd/containerd/api/services/events/v1/image.pb.go
generated
vendored
Normal file
@ -0,0 +1,902 @@
|
|||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: github.com/containerd/containerd/api/services/events/v1/image.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package events
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
type ImageCreate struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageCreate) Reset() { *m = ImageCreate{} }
|
||||||
|
func (*ImageCreate) ProtoMessage() {}
|
||||||
|
func (*ImageCreate) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{0} }
|
||||||
|
|
||||||
|
type ImageUpdate struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageUpdate) Reset() { *m = ImageUpdate{} }
|
||||||
|
func (*ImageUpdate) ProtoMessage() {}
|
||||||
|
func (*ImageUpdate) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{1} }
|
||||||
|
|
||||||
|
type ImageDelete struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageDelete) Reset() { *m = ImageDelete{} }
|
||||||
|
func (*ImageDelete) ProtoMessage() {}
|
||||||
|
func (*ImageDelete) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{2} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*ImageCreate)(nil), "containerd.services.images.v1.ImageCreate")
|
||||||
|
proto.RegisterType((*ImageUpdate)(nil), "containerd.services.images.v1.ImageUpdate")
|
||||||
|
proto.RegisterType((*ImageDelete)(nil), "containerd.services.images.v1.ImageDelete")
|
||||||
|
}
|
||||||
|
func (m *ImageCreate) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageCreate) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
if len(m.Labels) > 0 {
|
||||||
|
for k, _ := range m.Labels {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
v := m.Labels[k]
|
||||||
|
mapSize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(mapSize))
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(len(k)))
|
||||||
|
i += copy(dAtA[i:], k)
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(len(v)))
|
||||||
|
i += copy(dAtA[i:], v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageUpdate) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageUpdate) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
if len(m.Labels) > 0 {
|
||||||
|
for k, _ := range m.Labels {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
v := m.Labels[k]
|
||||||
|
mapSize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(mapSize))
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(len(k)))
|
||||||
|
i += copy(dAtA[i:], k)
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(len(v)))
|
||||||
|
i += copy(dAtA[i:], v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageDelete) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageDelete) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Image(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Image(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintImage(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *ImageCreate) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovImage(uint64(l))
|
||||||
|
}
|
||||||
|
if len(m.Labels) > 0 {
|
||||||
|
for k, v := range m.Labels {
|
||||||
|
_ = k
|
||||||
|
_ = v
|
||||||
|
mapEntrySize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
|
||||||
|
n += mapEntrySize + 1 + sovImage(uint64(mapEntrySize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageUpdate) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovImage(uint64(l))
|
||||||
|
}
|
||||||
|
if len(m.Labels) > 0 {
|
||||||
|
for k, v := range m.Labels {
|
||||||
|
_ = k
|
||||||
|
_ = v
|
||||||
|
mapEntrySize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
|
||||||
|
n += mapEntrySize + 1 + sovImage(uint64(mapEntrySize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ImageDelete) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovImage(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovImage(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozImage(x uint64) (n int) {
|
||||||
|
return sovImage(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *ImageCreate) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
keysForLabels := make([]string, 0, len(this.Labels))
|
||||||
|
for k, _ := range this.Labels {
|
||||||
|
keysForLabels = append(keysForLabels, k)
|
||||||
|
}
|
||||||
|
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
|
||||||
|
mapStringForLabels := "map[string]string{"
|
||||||
|
for _, k := range keysForLabels {
|
||||||
|
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
|
||||||
|
}
|
||||||
|
mapStringForLabels += "}"
|
||||||
|
s := strings.Join([]string{`&ImageCreate{`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`Labels:` + mapStringForLabels + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *ImageUpdate) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
keysForLabels := make([]string, 0, len(this.Labels))
|
||||||
|
for k, _ := range this.Labels {
|
||||||
|
keysForLabels = append(keysForLabels, k)
|
||||||
|
}
|
||||||
|
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
|
||||||
|
mapStringForLabels := "map[string]string{"
|
||||||
|
for _, k := range keysForLabels {
|
||||||
|
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
|
||||||
|
}
|
||||||
|
mapStringForLabels += "}"
|
||||||
|
s := strings.Join([]string{`&ImageUpdate{`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`Labels:` + mapStringForLabels + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *ImageDelete) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&ImageDelete{`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringImage(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *ImageCreate) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: ImageCreate: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: ImageCreate: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
var keykey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
keykey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapkey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapkey := int(stringLenmapkey)
|
||||||
|
if intStringLenmapkey < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||||
|
if postStringIndexmapkey > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||||
|
iNdEx = postStringIndexmapkey
|
||||||
|
if m.Labels == nil {
|
||||||
|
m.Labels = make(map[string]string)
|
||||||
|
}
|
||||||
|
if iNdEx < postIndex {
|
||||||
|
var valuekey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
valuekey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapvalue uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapvalue := int(stringLenmapvalue)
|
||||||
|
if intStringLenmapvalue < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||||
|
if postStringIndexmapvalue > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||||
|
iNdEx = postStringIndexmapvalue
|
||||||
|
m.Labels[mapkey] = mapvalue
|
||||||
|
} else {
|
||||||
|
var mapvalue string
|
||||||
|
m.Labels[mapkey] = mapvalue
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipImage(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *ImageUpdate) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: ImageUpdate: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: ImageUpdate: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
var keykey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
keykey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapkey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapkey := int(stringLenmapkey)
|
||||||
|
if intStringLenmapkey < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||||
|
if postStringIndexmapkey > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||||
|
iNdEx = postStringIndexmapkey
|
||||||
|
if m.Labels == nil {
|
||||||
|
m.Labels = make(map[string]string)
|
||||||
|
}
|
||||||
|
if iNdEx < postIndex {
|
||||||
|
var valuekey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
valuekey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapvalue uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapvalue := int(stringLenmapvalue)
|
||||||
|
if intStringLenmapvalue < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||||
|
if postStringIndexmapvalue > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||||
|
iNdEx = postStringIndexmapvalue
|
||||||
|
m.Labels[mapkey] = mapvalue
|
||||||
|
} else {
|
||||||
|
var mapvalue string
|
||||||
|
m.Labels[mapkey] = mapvalue
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipImage(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *ImageDelete) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: ImageDelete: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: ImageDelete: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipImage(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipImage(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthImage
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowImage
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipImage(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthImage = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowImage = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/image.proto", fileDescriptorImage)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptorImage = []byte{
|
||||||
|
// 263 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4e, 0xcf, 0x2c, 0xc9,
|
||||||
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||||
|
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
||||||
|
0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0x67, 0xe6, 0x26, 0xa6, 0xa7, 0xea,
|
||||||
|
0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x94, 0xeb, 0xc1, 0x94, 0xea, 0x81, 0x15, 0x14,
|
||||||
|
0xeb, 0x95, 0x19, 0x2a, 0xad, 0x61, 0xe4, 0xe2, 0xf6, 0x04, 0xf1, 0x9c, 0x8b, 0x52, 0x13, 0x4b,
|
||||||
|
0x52, 0x85, 0x84, 0xb8, 0x58, 0xf2, 0x12, 0x73, 0x53, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83,
|
||||||
|
0xc0, 0x6c, 0x21, 0x3f, 0x2e, 0xb6, 0x9c, 0xc4, 0xa4, 0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66,
|
||||||
|
0x0d, 0x6e, 0x23, 0x33, 0x3d, 0xbc, 0x66, 0xea, 0x21, 0x99, 0xa7, 0xe7, 0x03, 0xd6, 0xe8, 0x9a,
|
||||||
|
0x57, 0x52, 0x54, 0x19, 0x04, 0x35, 0x45, 0xca, 0x92, 0x8b, 0x1b, 0x49, 0x58, 0x48, 0x80, 0x8b,
|
||||||
|
0x39, 0x3b, 0xb5, 0x12, 0x6a, 0x23, 0x88, 0x29, 0x24, 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a,
|
||||||
|
0x2a, 0xc1, 0x04, 0x16, 0x83, 0x70, 0xac, 0x98, 0x2c, 0x18, 0x11, 0xce, 0x0d, 0x2d, 0x48, 0xa1,
|
||||||
|
0xaa, 0x73, 0x21, 0xe6, 0x51, 0xdb, 0xb9, 0x8a, 0x50, 0xd7, 0xba, 0xa4, 0xe6, 0xa4, 0x62, 0x77,
|
||||||
|
0xad, 0x53, 0xc4, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x34, 0x3c, 0x92, 0x63, 0x3c,
|
||||||
|
0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0xa3, 0xec, 0xc8, 0x8c,
|
||||||
|
0x7e, 0x6b, 0x08, 0x2b, 0x89, 0x0d, 0x9c, 0x00, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x13,
|
||||||
|
0x7c, 0x2c, 0x4a, 0x47, 0x02, 0x00, 0x00,
|
||||||
|
}
|
19
vendor/github.com/containerd/containerd/api/services/events/v1/image.proto
generated
vendored
Normal file
19
vendor/github.com/containerd/containerd/api/services/events/v1/image.proto
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.images.v1;
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
|
||||||
|
|
||||||
|
message ImageCreate {
|
||||||
|
string name = 1;
|
||||||
|
map<string, string> labels = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ImageUpdate {
|
||||||
|
string name = 1;
|
||||||
|
map<string, string> labels = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ImageDelete {
|
||||||
|
string name = 1;
|
||||||
|
}
|
904
vendor/github.com/containerd/containerd/api/services/events/v1/namespace.pb.go
generated
vendored
Normal file
904
vendor/github.com/containerd/containerd/api/services/events/v1/namespace.pb.go
generated
vendored
Normal file
@ -0,0 +1,904 @@
|
|||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: github.com/containerd/containerd/api/services/events/v1/namespace.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package events
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
import _ "github.com/gogo/protobuf/gogoproto"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
type NamespaceCreate struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceCreate) Reset() { *m = NamespaceCreate{} }
|
||||||
|
func (*NamespaceCreate) ProtoMessage() {}
|
||||||
|
func (*NamespaceCreate) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{0} }
|
||||||
|
|
||||||
|
type NamespaceUpdate struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceUpdate) Reset() { *m = NamespaceUpdate{} }
|
||||||
|
func (*NamespaceUpdate) ProtoMessage() {}
|
||||||
|
func (*NamespaceUpdate) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{1} }
|
||||||
|
|
||||||
|
type NamespaceDelete struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceDelete) Reset() { *m = NamespaceDelete{} }
|
||||||
|
func (*NamespaceDelete) ProtoMessage() {}
|
||||||
|
func (*NamespaceDelete) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{2} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*NamespaceCreate)(nil), "containerd.services.events.v1.NamespaceCreate")
|
||||||
|
proto.RegisterType((*NamespaceUpdate)(nil), "containerd.services.events.v1.NamespaceUpdate")
|
||||||
|
proto.RegisterType((*NamespaceDelete)(nil), "containerd.services.events.v1.NamespaceDelete")
|
||||||
|
}
|
||||||
|
func (m *NamespaceCreate) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceCreate) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
if len(m.Labels) > 0 {
|
||||||
|
for k, _ := range m.Labels {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
v := m.Labels[k]
|
||||||
|
mapSize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(mapSize))
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(len(k)))
|
||||||
|
i += copy(dAtA[i:], k)
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(len(v)))
|
||||||
|
i += copy(dAtA[i:], v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceUpdate) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceUpdate) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
if len(m.Labels) > 0 {
|
||||||
|
for k, _ := range m.Labels {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
v := m.Labels[k]
|
||||||
|
mapSize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(mapSize))
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(len(k)))
|
||||||
|
i += copy(dAtA[i:], k)
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(len(v)))
|
||||||
|
i += copy(dAtA[i:], v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceDelete) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceDelete) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Namespace(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Namespace(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *NamespaceCreate) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovNamespace(uint64(l))
|
||||||
|
}
|
||||||
|
if len(m.Labels) > 0 {
|
||||||
|
for k, v := range m.Labels {
|
||||||
|
_ = k
|
||||||
|
_ = v
|
||||||
|
mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
|
||||||
|
n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceUpdate) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovNamespace(uint64(l))
|
||||||
|
}
|
||||||
|
if len(m.Labels) > 0 {
|
||||||
|
for k, v := range m.Labels {
|
||||||
|
_ = k
|
||||||
|
_ = v
|
||||||
|
mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
|
||||||
|
n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NamespaceDelete) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovNamespace(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovNamespace(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozNamespace(x uint64) (n int) {
|
||||||
|
return sovNamespace(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *NamespaceCreate) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
keysForLabels := make([]string, 0, len(this.Labels))
|
||||||
|
for k, _ := range this.Labels {
|
||||||
|
keysForLabels = append(keysForLabels, k)
|
||||||
|
}
|
||||||
|
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
|
||||||
|
mapStringForLabels := "map[string]string{"
|
||||||
|
for _, k := range keysForLabels {
|
||||||
|
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
|
||||||
|
}
|
||||||
|
mapStringForLabels += "}"
|
||||||
|
s := strings.Join([]string{`&NamespaceCreate{`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`Labels:` + mapStringForLabels + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *NamespaceUpdate) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
keysForLabels := make([]string, 0, len(this.Labels))
|
||||||
|
for k, _ := range this.Labels {
|
||||||
|
keysForLabels = append(keysForLabels, k)
|
||||||
|
}
|
||||||
|
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
|
||||||
|
mapStringForLabels := "map[string]string{"
|
||||||
|
for _, k := range keysForLabels {
|
||||||
|
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
|
||||||
|
}
|
||||||
|
mapStringForLabels += "}"
|
||||||
|
s := strings.Join([]string{`&NamespaceUpdate{`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`Labels:` + mapStringForLabels + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *NamespaceDelete) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&NamespaceDelete{`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringNamespace(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *NamespaceCreate) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: NamespaceCreate: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: NamespaceCreate: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
var keykey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
keykey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapkey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapkey := int(stringLenmapkey)
|
||||||
|
if intStringLenmapkey < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||||
|
if postStringIndexmapkey > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||||
|
iNdEx = postStringIndexmapkey
|
||||||
|
if m.Labels == nil {
|
||||||
|
m.Labels = make(map[string]string)
|
||||||
|
}
|
||||||
|
if iNdEx < postIndex {
|
||||||
|
var valuekey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
valuekey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapvalue uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapvalue := int(stringLenmapvalue)
|
||||||
|
if intStringLenmapvalue < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||||
|
if postStringIndexmapvalue > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||||
|
iNdEx = postStringIndexmapvalue
|
||||||
|
m.Labels[mapkey] = mapvalue
|
||||||
|
} else {
|
||||||
|
var mapvalue string
|
||||||
|
m.Labels[mapkey] = mapvalue
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipNamespace(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *NamespaceUpdate) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: NamespaceUpdate: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: NamespaceUpdate: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
var keykey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
keykey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapkey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapkey := int(stringLenmapkey)
|
||||||
|
if intStringLenmapkey < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||||
|
if postStringIndexmapkey > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||||
|
iNdEx = postStringIndexmapkey
|
||||||
|
if m.Labels == nil {
|
||||||
|
m.Labels = make(map[string]string)
|
||||||
|
}
|
||||||
|
if iNdEx < postIndex {
|
||||||
|
var valuekey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
valuekey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var stringLenmapvalue uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapvalue := int(stringLenmapvalue)
|
||||||
|
if intStringLenmapvalue < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||||
|
if postStringIndexmapvalue > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||||
|
iNdEx = postStringIndexmapvalue
|
||||||
|
m.Labels[mapkey] = mapvalue
|
||||||
|
} else {
|
||||||
|
var mapvalue string
|
||||||
|
m.Labels[mapkey] = mapvalue
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipNamespace(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *NamespaceDelete) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: NamespaceDelete: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: NamespaceDelete: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipNamespace(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipNamespace(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthNamespace
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowNamespace
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipNamespace(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthNamespace = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowNamespace = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/namespace.proto", fileDescriptorNamespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptorNamespace = []byte{
|
||||||
|
// 277 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4f, 0xcf, 0x2c, 0xc9,
|
||||||
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||||
|
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
||||||
|
0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0xe7, 0x25, 0xe6, 0xa6, 0x16, 0x17,
|
||||||
|
0x24, 0x26, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0xb4, 0xe8, 0xc1, 0x94,
|
||||||
|
0xeb, 0x41, 0x94, 0xeb, 0x95, 0x19, 0x4a, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x55, 0xea, 0x83,
|
||||||
|
0x58, 0x10, 0x4d, 0x4a, 0x5b, 0x18, 0xb9, 0xf8, 0xfd, 0x60, 0x06, 0x39, 0x17, 0xa5, 0x26, 0x96,
|
||||||
|
0xa4, 0x0a, 0x09, 0x71, 0xb1, 0x80, 0xcc, 0x96, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xb3,
|
||||||
|
0x85, 0x82, 0xb8, 0xd8, 0x72, 0x12, 0x93, 0x52, 0x73, 0x8a, 0x25, 0x98, 0x14, 0x98, 0x35, 0xb8,
|
||||||
|
0x8d, 0xac, 0xf4, 0xf0, 0xda, 0xa6, 0x87, 0x66, 0xa6, 0x9e, 0x0f, 0x58, 0xb3, 0x6b, 0x5e, 0x49,
|
||||||
|
0x51, 0x65, 0x10, 0xd4, 0x24, 0x29, 0x4b, 0x2e, 0x6e, 0x24, 0x61, 0x21, 0x01, 0x2e, 0xe6, 0xec,
|
||||||
|
0xd4, 0x4a, 0xa8, 0xad, 0x20, 0xa6, 0x90, 0x08, 0x17, 0x6b, 0x59, 0x62, 0x4e, 0x69, 0xaa, 0x04,
|
||||||
|
0x13, 0x58, 0x0c, 0xc2, 0xb1, 0x62, 0xb2, 0x60, 0x44, 0x75, 0x76, 0x68, 0x41, 0x0a, 0xd5, 0x9d,
|
||||||
|
0x0d, 0x31, 0x93, 0xda, 0xce, 0x56, 0x45, 0x72, 0xb5, 0x4b, 0x6a, 0x4e, 0x2a, 0x76, 0x57, 0x3b,
|
||||||
|
0x45, 0x9c, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f,
|
||||||
|
0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x31, 0xca, 0x8e, 0xcc, 0xc4, 0x62,
|
||||||
|
0x0d, 0x61, 0x25, 0xb1, 0x81, 0x63, 0xdd, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xbe, 0xf0, 0x68,
|
||||||
|
0xa6, 0x75, 0x02, 0x00, 0x00,
|
||||||
|
}
|
21
vendor/github.com/containerd/containerd/api/services/events/v1/namespace.proto
generated
vendored
Normal file
21
vendor/github.com/containerd/containerd/api/services/events/v1/namespace.proto
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.events.v1;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
|
||||||
|
|
||||||
|
message NamespaceCreate {
|
||||||
|
string name = 1;
|
||||||
|
map<string, string> labels = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message NamespaceUpdate {
|
||||||
|
string name = 1;
|
||||||
|
map<string, string> labels = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message NamespaceDelete {
|
||||||
|
string name = 1;
|
||||||
|
}
|
674
vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.pb.go
generated
vendored
Normal file
674
vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.pb.go
generated
vendored
Normal file
@ -0,0 +1,674 @@
|
|||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: github.com/containerd/containerd/api/services/events/v1/snapshot.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package events
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
type SnapshotPrepare struct {
|
||||||
|
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||||
|
Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotPrepare) Reset() { *m = SnapshotPrepare{} }
|
||||||
|
func (*SnapshotPrepare) ProtoMessage() {}
|
||||||
|
func (*SnapshotPrepare) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{0} }
|
||||||
|
|
||||||
|
type SnapshotCommit struct {
|
||||||
|
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||||
|
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotCommit) Reset() { *m = SnapshotCommit{} }
|
||||||
|
func (*SnapshotCommit) ProtoMessage() {}
|
||||||
|
func (*SnapshotCommit) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{1} }
|
||||||
|
|
||||||
|
type SnapshotRemove struct {
|
||||||
|
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotRemove) Reset() { *m = SnapshotRemove{} }
|
||||||
|
func (*SnapshotRemove) ProtoMessage() {}
|
||||||
|
func (*SnapshotRemove) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*SnapshotPrepare)(nil), "containerd.services.events.v1.SnapshotPrepare")
|
||||||
|
proto.RegisterType((*SnapshotCommit)(nil), "containerd.services.events.v1.SnapshotCommit")
|
||||||
|
proto.RegisterType((*SnapshotRemove)(nil), "containerd.services.events.v1.SnapshotRemove")
|
||||||
|
}
|
||||||
|
func (m *SnapshotPrepare) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotPrepare) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Key) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
|
||||||
|
i += copy(dAtA[i:], m.Key)
|
||||||
|
}
|
||||||
|
if len(m.Parent) > 0 {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Parent)))
|
||||||
|
i += copy(dAtA[i:], m.Parent)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotCommit) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotCommit) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Key) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
|
||||||
|
i += copy(dAtA[i:], m.Key)
|
||||||
|
}
|
||||||
|
if len(m.Name) > 0 {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Name)))
|
||||||
|
i += copy(dAtA[i:], m.Name)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotRemove) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotRemove) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Key) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
|
||||||
|
i += copy(dAtA[i:], m.Key)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Snapshot(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Snapshot(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *SnapshotPrepare) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Key)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovSnapshot(uint64(l))
|
||||||
|
}
|
||||||
|
l = len(m.Parent)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovSnapshot(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotCommit) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Key)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovSnapshot(uint64(l))
|
||||||
|
}
|
||||||
|
l = len(m.Name)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovSnapshot(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SnapshotRemove) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Key)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovSnapshot(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovSnapshot(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozSnapshot(x uint64) (n int) {
|
||||||
|
return sovSnapshot(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *SnapshotPrepare) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&SnapshotPrepare{`,
|
||||||
|
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
|
||||||
|
`Parent:` + fmt.Sprintf("%v", this.Parent) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *SnapshotCommit) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&SnapshotCommit{`,
|
||||||
|
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
|
||||||
|
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *SnapshotRemove) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&SnapshotRemove{`,
|
||||||
|
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringSnapshot(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *SnapshotPrepare) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: SnapshotPrepare: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: SnapshotPrepare: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Key = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Parent = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipSnapshot(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *SnapshotCommit) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: SnapshotCommit: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: SnapshotCommit: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Key = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Name = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipSnapshot(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *SnapshotRemove) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: SnapshotRemove: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: SnapshotRemove: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Key = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipSnapshot(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipSnapshot(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthSnapshot
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowSnapshot
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipSnapshot(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowSnapshot = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/snapshot.proto", fileDescriptorSnapshot)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptorSnapshot = []byte{
|
||||||
|
// 219 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
|
||||||
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||||
|
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
||||||
|
0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0x17, 0xe7, 0x25, 0x16, 0x14, 0x67,
|
||||||
|
0xe4, 0x97, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x74, 0xe8, 0xc1, 0x54, 0xeb,
|
||||||
|
0x41, 0x54, 0xeb, 0x95, 0x19, 0x2a, 0x59, 0x73, 0xf1, 0x07, 0x43, 0x35, 0x04, 0x14, 0xa5, 0x16,
|
||||||
|
0x24, 0x16, 0xa5, 0x0a, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70,
|
||||||
|
0x06, 0x81, 0x98, 0x42, 0x62, 0x5c, 0x6c, 0x20, 0x99, 0xbc, 0x12, 0x09, 0x26, 0xb0, 0x20, 0x94,
|
||||||
|
0xa7, 0x64, 0xc6, 0xc5, 0x07, 0xd3, 0xec, 0x9c, 0x9f, 0x9b, 0x9b, 0x59, 0x82, 0x45, 0xaf, 0x10,
|
||||||
|
0x17, 0x4b, 0x5e, 0x62, 0x6e, 0x2a, 0x54, 0x27, 0x98, 0xad, 0xa4, 0x84, 0xd0, 0x17, 0x94, 0x9a,
|
||||||
|
0x9b, 0x5f, 0x86, 0xc5, 0x4e, 0xa7, 0x88, 0x13, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0x68,
|
||||||
|
0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31,
|
||||||
|
0x46, 0xd9, 0x91, 0x19, 0x32, 0xd6, 0x10, 0x56, 0x12, 0x1b, 0x38, 0x60, 0x8c, 0x01, 0x01, 0x00,
|
||||||
|
0x00, 0xff, 0xff, 0x10, 0x4c, 0x3d, 0xb2, 0x62, 0x01, 0x00, 0x00,
|
||||||
|
}
|
19
vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.proto
generated
vendored
Normal file
19
vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.proto
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.events.v1;
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
|
||||||
|
|
||||||
|
message SnapshotPrepare {
|
||||||
|
string key = 1;
|
||||||
|
string parent = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SnapshotCommit {
|
||||||
|
string key = 1;
|
||||||
|
string name = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SnapshotRemove {
|
||||||
|
string key = 1;
|
||||||
|
}
|
2257
vendor/github.com/containerd/containerd/api/services/events/v1/task.pb.go
generated
vendored
Normal file
2257
vendor/github.com/containerd/containerd/api/services/events/v1/task.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
68
vendor/github.com/containerd/containerd/api/services/events/v1/task.proto
generated
vendored
Normal file
68
vendor/github.com/containerd/containerd/api/services/events/v1/task.proto
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.events.v1;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
import "github.com/containerd/containerd/api/types/mount.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
|
||||||
|
|
||||||
|
message TaskCreate {
|
||||||
|
string container_id = 1;
|
||||||
|
string bundle = 2;
|
||||||
|
repeated containerd.types.Mount rootfs = 3;
|
||||||
|
TaskIO io = 4 [(gogoproto.customname) = "IO"];
|
||||||
|
string checkpoint = 5;
|
||||||
|
uint32 pid = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskStart {
|
||||||
|
string container_id = 1;
|
||||||
|
uint32 pid = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskDelete {
|
||||||
|
string container_id = 1;
|
||||||
|
uint32 pid = 2;
|
||||||
|
uint32 exit_status = 3;
|
||||||
|
google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskIO {
|
||||||
|
string stdin = 1;
|
||||||
|
string stdout = 2;
|
||||||
|
string stderr = 3;
|
||||||
|
bool terminal = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskExit {
|
||||||
|
string container_id = 1;
|
||||||
|
string id = 2;
|
||||||
|
uint32 pid = 3;
|
||||||
|
uint32 exit_status = 4;
|
||||||
|
google.protobuf.Timestamp exited_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskOOM {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskExecAdded {
|
||||||
|
string container_id = 1;
|
||||||
|
string exec_id = 2;
|
||||||
|
uint32 pid = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskPaused {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskResumed {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TaskCheckpointed {
|
||||||
|
string container_id = 1;
|
||||||
|
string checkpoint = 2;
|
||||||
|
}
|
176
vendor/github.com/containerd/containerd/api/services/execution/execution.proto
generated
vendored
176
vendor/github.com/containerd/containerd/api/services/execution/execution.proto
generated
vendored
@ -1,176 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.v1.services.execution;
|
|
||||||
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
import "google/protobuf/any.proto";
|
|
||||||
import "gogoproto/gogo.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/mount/mount.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/descriptor/descriptor.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/task/task.proto";
|
|
||||||
import "google/protobuf/timestamp.proto";
|
|
||||||
|
|
||||||
service Tasks {
|
|
||||||
rpc Create(CreateRequest) returns (CreateResponse);
|
|
||||||
rpc Start(StartRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc Delete(DeleteRequest) returns (DeleteResponse);
|
|
||||||
rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse);
|
|
||||||
rpc Info(InfoRequest) returns (InfoResponse);
|
|
||||||
rpc List(ListRequest) returns (ListResponse);
|
|
||||||
rpc Kill(KillRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc Events(EventsRequest) returns (stream containerd.v1.types.Event);
|
|
||||||
rpc Exec(ExecRequest) returns (ExecResponse);
|
|
||||||
rpc Pty(PtyRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc CloseStdin(CloseStdinRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc Pause(PauseRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc Resume(ResumeRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc Processes(ProcessesRequest) returns (ProcessesResponse);
|
|
||||||
rpc Checkpoint(CheckpointRequest) returns (CheckpointResponse);
|
|
||||||
}
|
|
||||||
|
|
||||||
message CreateRequest {
|
|
||||||
// ContainerID specifies the container to use for creating this task.
|
|
||||||
//
|
|
||||||
// The spec from the provided container id will be used to create the
|
|
||||||
// task associated with this container. Only one task can be run at a time
|
|
||||||
// per container.
|
|
||||||
//
|
|
||||||
// This should be created using the Containers service.
|
|
||||||
string container_id = 2;
|
|
||||||
|
|
||||||
// RootFS provides the pre-chroot mounts to perform in the shim before
|
|
||||||
// executing the container task.
|
|
||||||
//
|
|
||||||
// These are for mounts that cannot be performed in the user namespace.
|
|
||||||
// Typically, these mounts should be resolved from snapshots specified on
|
|
||||||
// the container object.
|
|
||||||
repeated containerd.v1.types.Mount rootfs = 3;
|
|
||||||
|
|
||||||
string stdin = 5;
|
|
||||||
string stdout = 6;
|
|
||||||
string stderr = 7;
|
|
||||||
bool terminal = 8;
|
|
||||||
|
|
||||||
types.Descriptor checkpoint = 9;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CreateResponse {
|
|
||||||
// TODO(stevvooe): We no longer have an id for a task since they are bound
|
|
||||||
// to a single container. Although, we should represent each new task with
|
|
||||||
// an ID so one can differentiate between each instance of a container
|
|
||||||
// running.
|
|
||||||
//
|
|
||||||
// Hence, we are leaving this here and reserving the field number in case
|
|
||||||
// we need to move in this direction.
|
|
||||||
// string id = 1;
|
|
||||||
|
|
||||||
string container_id = 2;
|
|
||||||
uint32 pid = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message StartRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message DeleteRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message DeleteResponse {
|
|
||||||
string container_id = 1;
|
|
||||||
uint32 exit_status = 2;
|
|
||||||
google.protobuf.Timestamp exited_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message DeleteProcessRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
uint32 pid = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message InfoRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message InfoResponse {
|
|
||||||
types.Task task = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListRequest {
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListResponse {
|
|
||||||
repeated containerd.v1.types.Task tasks = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message KillRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
uint32 signal = 2;
|
|
||||||
oneof pid_or_all {
|
|
||||||
bool all = 3;
|
|
||||||
uint32 pid = 4;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message EventsRequest {
|
|
||||||
}
|
|
||||||
|
|
||||||
message ExecRequest {
|
|
||||||
// ContainerID specifies the container in which to exec the process.
|
|
||||||
string container_id = 1;
|
|
||||||
bool terminal = 2;
|
|
||||||
string stdin = 3;
|
|
||||||
string stdout = 4;
|
|
||||||
string stderr = 5;
|
|
||||||
|
|
||||||
// Spec for starting a process in the target container.
|
|
||||||
//
|
|
||||||
// For runc, this is a process spec, for example.
|
|
||||||
google.protobuf.Any spec = 6;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ExecResponse {
|
|
||||||
uint32 pid = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message PtyRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
uint32 pid = 2;
|
|
||||||
uint32 width = 3;
|
|
||||||
uint32 height = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CloseStdinRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
uint32 pid = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message PauseRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ResumeRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ProcessesRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ProcessesResponse{
|
|
||||||
repeated containerd.v1.types.Process processes = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CheckpointRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
bool allow_tcp = 2;
|
|
||||||
bool allow_unix_sockets = 3;
|
|
||||||
bool allow_terminal = 4;
|
|
||||||
bool file_locks = 5;
|
|
||||||
repeated string empty_namespaces = 6;
|
|
||||||
string parent_checkpoint = 7 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
|
||||||
bool exit = 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CheckpointResponse {
|
|
||||||
repeated types.Descriptor descriptors = 1;
|
|
||||||
}
|
|
1406
vendor/github.com/containerd/containerd/api/services/images/images.pb.go
generated
vendored
1406
vendor/github.com/containerd/containerd/api/services/images/images.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
78
vendor/github.com/containerd/containerd/api/services/images/images.proto
generated
vendored
78
vendor/github.com/containerd/containerd/api/services/images/images.proto
generated
vendored
@ -1,78 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.v1;
|
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/mount/mount.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/descriptor/descriptor.proto";
|
|
||||||
|
|
||||||
// Images is a service that allows one to register images with containerd.
|
|
||||||
//
|
|
||||||
// In containerd, an image is merely the mapping of a name to a content root,
|
|
||||||
// described by a descriptor. The behavior and state of image is purely
|
|
||||||
// dictated by the type of the descriptor.
|
|
||||||
//
|
|
||||||
// From the perspective of this service, these references are mostly shallow,
|
|
||||||
// in that the existence of the required content won't be validated until
|
|
||||||
// required by consuming services.
|
|
||||||
//
|
|
||||||
// As such, this can really be considered a "metadata service".
|
|
||||||
service Images {
|
|
||||||
// Get returns an image by name.
|
|
||||||
rpc Get(GetRequest) returns (GetResponse);
|
|
||||||
|
|
||||||
// List returns a list of all images known to containerd.
|
|
||||||
rpc List(ListRequest) returns (ListResponse);
|
|
||||||
|
|
||||||
// Put assigns the name to a given target image based on the provided
|
|
||||||
// image.
|
|
||||||
rpc Put(PutRequest) returns (google.protobuf.Empty);
|
|
||||||
|
|
||||||
// Delete deletes the image by name.
|
|
||||||
rpc Delete(DeleteRequest) returns (google.protobuf.Empty);
|
|
||||||
}
|
|
||||||
|
|
||||||
message Image {
|
|
||||||
string name = 1;
|
|
||||||
string labels = 2;
|
|
||||||
types.Descriptor target = 3 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetRequest {
|
|
||||||
string name = 1;
|
|
||||||
|
|
||||||
// TODO(stevvooe): Consider that we may want to have multiple images under
|
|
||||||
// the same name or multiple names for the same image. This mapping could
|
|
||||||
// be truly many to many but we'll need a way to identify an entry.
|
|
||||||
//
|
|
||||||
// For now, we consider it unique but an intermediary index could be
|
|
||||||
// created to allow for a dispatch of images.
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetResponse {
|
|
||||||
Image image = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message PutRequest {
|
|
||||||
Image image = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListRequest {
|
|
||||||
// TODO(stevvooe): empty for now, need to ad filtration
|
|
||||||
// Some common use cases we might consider:
|
|
||||||
//
|
|
||||||
// 1. Select by multiple names.
|
|
||||||
// 2. Select by platform.
|
|
||||||
// 3. Select by annotations.
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListResponse {
|
|
||||||
repeated Image images = 1 [(gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// TODO(stevvooe): Add pagination.
|
|
||||||
}
|
|
||||||
|
|
||||||
message DeleteRequest {
|
|
||||||
string name = 1;
|
|
||||||
}
|
|
2190
vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
generated
vendored
Normal file
2190
vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
117
vendor/github.com/containerd/containerd/api/services/images/v1/images.proto
generated
vendored
Normal file
117
vendor/github.com/containerd/containerd/api/services/images/v1/images.proto
generated
vendored
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.images.v1;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
import "google/protobuf/empty.proto";
|
||||||
|
import "google/protobuf/field_mask.proto";
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/images/v1;images";
|
||||||
|
|
||||||
|
// Images is a service that allows one to register images with containerd.
|
||||||
|
//
|
||||||
|
// In containerd, an image is merely the mapping of a name to a content root,
|
||||||
|
// described by a descriptor. The behavior and state of image is purely
|
||||||
|
// dictated by the type of the descriptor.
|
||||||
|
//
|
||||||
|
// From the perspective of this service, these references are mostly shallow,
|
||||||
|
// in that the existence of the required content won't be validated until
|
||||||
|
// required by consuming services.
|
||||||
|
//
|
||||||
|
// As such, this can really be considered a "metadata service".
|
||||||
|
service Images {
|
||||||
|
// Get returns an image by name.
|
||||||
|
rpc Get(GetImageRequest) returns (GetImageResponse);
|
||||||
|
|
||||||
|
// List returns a list of all images known to containerd.
|
||||||
|
rpc List(ListImagesRequest) returns (ListImagesResponse);
|
||||||
|
|
||||||
|
// Create an image record in the metadata store.
|
||||||
|
//
|
||||||
|
// The name of the image must be unique.
|
||||||
|
rpc Create(CreateImageRequest) returns (CreateImageResponse);
|
||||||
|
|
||||||
|
// Update assigns the name to a given target image based on the provided
|
||||||
|
// image.
|
||||||
|
rpc Update(UpdateImageRequest) returns (UpdateImageResponse);
|
||||||
|
|
||||||
|
// Delete deletes the image by name.
|
||||||
|
rpc Delete(DeleteImageRequest) returns (google.protobuf.Empty);
|
||||||
|
}
|
||||||
|
|
||||||
|
message Image {
|
||||||
|
// Name provides a unique name for the image.
|
||||||
|
//
|
||||||
|
// Containerd treats this as the primary identifier.
|
||||||
|
string name = 1;
|
||||||
|
|
||||||
|
// Labels provides free form labels for the image. These are runtime only
|
||||||
|
// and do not get inherited into the package image in any way.
|
||||||
|
//
|
||||||
|
// Labels may be updated using the field mask.
|
||||||
|
map<string, string> labels = 2;
|
||||||
|
|
||||||
|
// Target describes the content entry point of the image.
|
||||||
|
containerd.types.Descriptor target = 3 [(gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// CreatedAt is the time the image was first created.
|
||||||
|
google.protobuf.Timestamp created_at = 7 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// UpdatedAt is the last time the image was mutated.
|
||||||
|
google.protobuf.Timestamp updated_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetImageRequest {
|
||||||
|
string name = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetImageResponse {
|
||||||
|
Image image = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CreateImageRequest {
|
||||||
|
Image image = 1 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message CreateImageResponse {
|
||||||
|
Image image = 1 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message UpdateImageRequest {
|
||||||
|
// Image provides a full or partial image for update.
|
||||||
|
//
|
||||||
|
// The name field must be set or an error will be returned.
|
||||||
|
Image image = 1 [(gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// UpdateMask specifies which fields to perform the update on. If empty,
|
||||||
|
// the operation applies to all fields.
|
||||||
|
google.protobuf.FieldMask update_mask = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UpdateImageResponse {
|
||||||
|
Image image = 1 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListImagesRequest {
|
||||||
|
// Filters contains one or more filters using the syntax defined in the
|
||||||
|
// containerd filter package.
|
||||||
|
//
|
||||||
|
// The returned result will be those that match any of the provided
|
||||||
|
// filters. Expanded, images that match the following will be
|
||||||
|
// returned:
|
||||||
|
//
|
||||||
|
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||||
|
//
|
||||||
|
// If filters is zero-length or nil, all items will be returned.
|
||||||
|
repeated string filters = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListImagesResponse {
|
||||||
|
repeated Image images = 1 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message DeleteImageRequest {
|
||||||
|
string name = 1;
|
||||||
|
}
|
@ -1,12 +1,12 @@
|
|||||||
// Code generated by protoc-gen-gogo.
|
// Code generated by protoc-gen-gogo.
|
||||||
// source: github.com/containerd/containerd/api/services/namespaces/namespace.proto
|
// source: github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
|
||||||
// DO NOT EDIT!
|
// DO NOT EDIT!
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package namespaces is a generated protocol buffer package.
|
Package namespaces is a generated protocol buffer package.
|
||||||
|
|
||||||
It is generated from these files:
|
It is generated from these files:
|
||||||
github.com/containerd/containerd/api/services/namespaces/namespace.proto
|
github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
|
||||||
|
|
||||||
It has these top-level messages:
|
It has these top-level messages:
|
||||||
Namespace
|
Namespace
|
||||||
@ -152,16 +152,16 @@ func (*DeleteNamespaceRequest) ProtoMessage() {}
|
|||||||
func (*DeleteNamespaceRequest) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{9} }
|
func (*DeleteNamespaceRequest) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{9} }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Namespace)(nil), "containerd.v1.namespaces.Namespace")
|
proto.RegisterType((*Namespace)(nil), "containerd.services.namespaces.v1.Namespace")
|
||||||
proto.RegisterType((*GetNamespaceRequest)(nil), "containerd.v1.namespaces.GetNamespaceRequest")
|
proto.RegisterType((*GetNamespaceRequest)(nil), "containerd.services.namespaces.v1.GetNamespaceRequest")
|
||||||
proto.RegisterType((*GetNamespaceResponse)(nil), "containerd.v1.namespaces.GetNamespaceResponse")
|
proto.RegisterType((*GetNamespaceResponse)(nil), "containerd.services.namespaces.v1.GetNamespaceResponse")
|
||||||
proto.RegisterType((*ListNamespacesRequest)(nil), "containerd.v1.namespaces.ListNamespacesRequest")
|
proto.RegisterType((*ListNamespacesRequest)(nil), "containerd.services.namespaces.v1.ListNamespacesRequest")
|
||||||
proto.RegisterType((*ListNamespacesResponse)(nil), "containerd.v1.namespaces.ListNamespacesResponse")
|
proto.RegisterType((*ListNamespacesResponse)(nil), "containerd.services.namespaces.v1.ListNamespacesResponse")
|
||||||
proto.RegisterType((*CreateNamespaceRequest)(nil), "containerd.v1.namespaces.CreateNamespaceRequest")
|
proto.RegisterType((*CreateNamespaceRequest)(nil), "containerd.services.namespaces.v1.CreateNamespaceRequest")
|
||||||
proto.RegisterType((*CreateNamespaceResponse)(nil), "containerd.v1.namespaces.CreateNamespaceResponse")
|
proto.RegisterType((*CreateNamespaceResponse)(nil), "containerd.services.namespaces.v1.CreateNamespaceResponse")
|
||||||
proto.RegisterType((*UpdateNamespaceRequest)(nil), "containerd.v1.namespaces.UpdateNamespaceRequest")
|
proto.RegisterType((*UpdateNamespaceRequest)(nil), "containerd.services.namespaces.v1.UpdateNamespaceRequest")
|
||||||
proto.RegisterType((*UpdateNamespaceResponse)(nil), "containerd.v1.namespaces.UpdateNamespaceResponse")
|
proto.RegisterType((*UpdateNamespaceResponse)(nil), "containerd.services.namespaces.v1.UpdateNamespaceResponse")
|
||||||
proto.RegisterType((*DeleteNamespaceRequest)(nil), "containerd.v1.namespaces.DeleteNamespaceRequest")
|
proto.RegisterType((*DeleteNamespaceRequest)(nil), "containerd.services.namespaces.v1.DeleteNamespaceRequest")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@ -192,7 +192,7 @@ func NewNamespacesClient(cc *grpc.ClientConn) NamespacesClient {
|
|||||||
|
|
||||||
func (c *namespacesClient) Get(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) {
|
func (c *namespacesClient) Get(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) {
|
||||||
out := new(GetNamespaceResponse)
|
out := new(GetNamespaceResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.namespaces.Namespaces/Get", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Get", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -201,7 +201,7 @@ func (c *namespacesClient) Get(ctx context.Context, in *GetNamespaceRequest, opt
|
|||||||
|
|
||||||
func (c *namespacesClient) List(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) {
|
func (c *namespacesClient) List(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) {
|
||||||
out := new(ListNamespacesResponse)
|
out := new(ListNamespacesResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.namespaces.Namespaces/List", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/List", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -210,7 +210,7 @@ func (c *namespacesClient) List(ctx context.Context, in *ListNamespacesRequest,
|
|||||||
|
|
||||||
func (c *namespacesClient) Create(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error) {
|
func (c *namespacesClient) Create(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error) {
|
||||||
out := new(CreateNamespaceResponse)
|
out := new(CreateNamespaceResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.namespaces.Namespaces/Create", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Create", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -219,7 +219,7 @@ func (c *namespacesClient) Create(ctx context.Context, in *CreateNamespaceReques
|
|||||||
|
|
||||||
func (c *namespacesClient) Update(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error) {
|
func (c *namespacesClient) Update(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error) {
|
||||||
out := new(UpdateNamespaceResponse)
|
out := new(UpdateNamespaceResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.namespaces.Namespaces/Update", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Update", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -228,7 +228,7 @@ func (c *namespacesClient) Update(ctx context.Context, in *UpdateNamespaceReques
|
|||||||
|
|
||||||
func (c *namespacesClient) Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
|
func (c *namespacesClient) Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
|
||||||
out := new(google_protobuf1.Empty)
|
out := new(google_protobuf1.Empty)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.namespaces.Namespaces/Delete", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Delete", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -259,7 +259,7 @@ func _Namespaces_Get_Handler(srv interface{}, ctx context.Context, dec func(inte
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.namespaces.Namespaces/Get",
|
FullMethod: "/containerd.services.namespaces.v1.Namespaces/Get",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(NamespacesServer).Get(ctx, req.(*GetNamespaceRequest))
|
return srv.(NamespacesServer).Get(ctx, req.(*GetNamespaceRequest))
|
||||||
@ -277,7 +277,7 @@ func _Namespaces_List_Handler(srv interface{}, ctx context.Context, dec func(int
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.namespaces.Namespaces/List",
|
FullMethod: "/containerd.services.namespaces.v1.Namespaces/List",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(NamespacesServer).List(ctx, req.(*ListNamespacesRequest))
|
return srv.(NamespacesServer).List(ctx, req.(*ListNamespacesRequest))
|
||||||
@ -295,7 +295,7 @@ func _Namespaces_Create_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.namespaces.Namespaces/Create",
|
FullMethod: "/containerd.services.namespaces.v1.Namespaces/Create",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(NamespacesServer).Create(ctx, req.(*CreateNamespaceRequest))
|
return srv.(NamespacesServer).Create(ctx, req.(*CreateNamespaceRequest))
|
||||||
@ -313,7 +313,7 @@ func _Namespaces_Update_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.namespaces.Namespaces/Update",
|
FullMethod: "/containerd.services.namespaces.v1.Namespaces/Update",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(NamespacesServer).Update(ctx, req.(*UpdateNamespaceRequest))
|
return srv.(NamespacesServer).Update(ctx, req.(*UpdateNamespaceRequest))
|
||||||
@ -331,7 +331,7 @@ func _Namespaces_Delete_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.namespaces.Namespaces/Delete",
|
FullMethod: "/containerd.services.namespaces.v1.Namespaces/Delete",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(NamespacesServer).Delete(ctx, req.(*DeleteNamespaceRequest))
|
return srv.(NamespacesServer).Delete(ctx, req.(*DeleteNamespaceRequest))
|
||||||
@ -340,7 +340,7 @@ func _Namespaces_Delete_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _Namespaces_serviceDesc = grpc.ServiceDesc{
|
var _Namespaces_serviceDesc = grpc.ServiceDesc{
|
||||||
ServiceName: "containerd.v1.namespaces.Namespaces",
|
ServiceName: "containerd.services.namespaces.v1.Namespaces",
|
||||||
HandlerType: (*NamespacesServer)(nil),
|
HandlerType: (*NamespacesServer)(nil),
|
||||||
Methods: []grpc.MethodDesc{
|
Methods: []grpc.MethodDesc{
|
||||||
{
|
{
|
||||||
@ -365,7 +365,7 @@ var _Namespaces_serviceDesc = grpc.ServiceDesc{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Streams: []grpc.StreamDesc{},
|
Streams: []grpc.StreamDesc{},
|
||||||
Metadata: "github.com/containerd/containerd/api/services/namespaces/namespace.proto",
|
Metadata: "github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Namespace) Marshal() (dAtA []byte, err error) {
|
func (m *Namespace) Marshal() (dAtA []byte, err error) {
|
||||||
@ -1967,42 +1967,44 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/services/namespaces/namespace.proto", fileDescriptorNamespace)
|
proto.RegisterFile("github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto", fileDescriptorNamespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptorNamespace = []byte{
|
var fileDescriptorNamespace = []byte{
|
||||||
// 528 bytes of a gzipped FileDescriptorProto
|
// 547 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xbd, 0x8e, 0xd3, 0x4c,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x4d, 0x6f, 0xd3, 0x40,
|
||||||
0x14, 0xcd, 0x24, 0xf9, 0x2c, 0xe5, 0xba, 0xf9, 0x34, 0x04, 0x63, 0x19, 0xc9, 0x44, 0xa6, 0x59,
|
0x10, 0xcd, 0x26, 0xc1, 0x52, 0xc6, 0x17, 0xb4, 0x04, 0x13, 0x19, 0xc9, 0x04, 0x9f, 0x8a, 0x54,
|
||||||
0x24, 0x18, 0xb3, 0xa1, 0xe1, 0xa7, 0x5b, 0x58, 0x02, 0xd2, 0x42, 0x61, 0x89, 0x7a, 0x35, 0x4e,
|
0xad, 0xd5, 0x20, 0x41, 0x3f, 0x6e, 0x85, 0xb6, 0x07, 0x0a, 0x42, 0x96, 0xb8, 0xc0, 0x01, 0x9c,
|
||||||
0x26, 0xc6, 0x8a, 0xff, 0xf0, 0x8c, 0x23, 0xa5, 0xe3, 0x0d, 0x78, 0x03, 0x1a, 0x5e, 0x26, 0x25,
|
0x64, 0xe3, 0x9a, 0x38, 0xb6, 0xf1, 0xae, 0x2d, 0x45, 0x1c, 0xe0, 0xdf, 0x70, 0xe1, 0x87, 0xe4,
|
||||||
0x25, 0x15, 0x62, 0xf3, 0x24, 0xc8, 0x63, 0x27, 0xce, 0x6e, 0x1c, 0x2b, 0x2b, 0x85, 0xee, 0x8e,
|
0xc8, 0x91, 0x13, 0x6a, 0xf3, 0x4b, 0xd0, 0xae, 0x9d, 0x38, 0x34, 0x46, 0xb8, 0x81, 0x72, 0x9b,
|
||||||
0x7d, 0x8e, 0xcf, 0xb9, 0xd7, 0xe7, 0x0e, 0xbc, 0xf3, 0x7c, 0xf1, 0x39, 0x73, 0xc9, 0x38, 0x0e,
|
0xb1, 0xf7, 0xcd, 0x7b, 0x3b, 0x7a, 0xcf, 0x86, 0x67, 0xae, 0xc7, 0xcf, 0x92, 0x3e, 0x19, 0x84,
|
||||||
0xed, 0x71, 0x1c, 0x09, 0xea, 0x47, 0x2c, 0x9d, 0x6c, 0x97, 0x34, 0xf1, 0x6d, 0xce, 0xd2, 0xb9,
|
0x13, 0x6b, 0x10, 0x06, 0xdc, 0xf1, 0x02, 0x1a, 0x0f, 0x57, 0x4b, 0x27, 0xf2, 0x2c, 0x46, 0xe3,
|
||||||
0x3f, 0x66, 0xdc, 0x8e, 0x68, 0xc8, 0x78, 0x42, 0xaf, 0x95, 0x24, 0x49, 0x63, 0x11, 0x63, 0xbd,
|
0xd4, 0x1b, 0x50, 0x66, 0x05, 0xce, 0x84, 0xb2, 0xc8, 0x11, 0x65, 0xba, 0x53, 0x74, 0x24, 0x8a,
|
||||||
0xe2, 0x90, 0xf9, 0x29, 0xa9, 0x90, 0x46, 0xdf, 0x8b, 0xbd, 0x58, 0x82, 0xec, 0xbc, 0x2a, 0xf0,
|
0x43, 0x1e, 0xe2, 0xfb, 0x05, 0x8c, 0x2c, 0x20, 0xa4, 0x80, 0x90, 0x74, 0x47, 0x6f, 0xbb, 0xa1,
|
||||||
0xc6, 0x7d, 0x2f, 0x8e, 0xbd, 0x80, 0xd9, 0xf2, 0xe4, 0x66, 0x53, 0x9b, 0x85, 0x89, 0x58, 0x94,
|
0x1b, 0xca, 0xd3, 0x96, 0xa8, 0x32, 0xa0, 0x7e, 0xd7, 0x0d, 0x43, 0xd7, 0xa7, 0x96, 0xec, 0xfa,
|
||||||
0x2f, 0x07, 0x37, 0x5f, 0x4e, 0x7d, 0x16, 0x4c, 0x2e, 0x43, 0xca, 0x67, 0x05, 0xc2, 0xfa, 0x81,
|
0xc9, 0xc8, 0xa2, 0x93, 0x88, 0x4f, 0xf3, 0x97, 0xdd, 0xcb, 0x2f, 0x47, 0x1e, 0xf5, 0x87, 0x6f,
|
||||||
0xa0, 0xf7, 0x71, 0xad, 0x81, 0x31, 0x74, 0x73, 0x41, 0x1d, 0x0d, 0xd0, 0x49, 0xcf, 0x91, 0x35,
|
0x27, 0x0e, 0x1b, 0x67, 0x27, 0xcc, 0xaf, 0x08, 0x5a, 0x2f, 0x16, 0x34, 0x18, 0x43, 0x53, 0x70,
|
||||||
0x1e, 0x81, 0x12, 0x50, 0x97, 0x05, 0x5c, 0x6f, 0x0f, 0x3a, 0x27, 0xea, 0xd0, 0x26, 0xfb, 0x1c,
|
0x76, 0x50, 0x17, 0x6d, 0xb5, 0x6c, 0x59, 0xe3, 0x97, 0xa0, 0xf8, 0x4e, 0x9f, 0xfa, 0xac, 0x53,
|
||||||
0x92, 0xcd, 0x87, 0xc8, 0x85, 0x64, 0x9c, 0x47, 0x22, 0x5d, 0x38, 0x25, 0xdd, 0x78, 0x01, 0xea,
|
0xef, 0x36, 0xb6, 0xd4, 0xde, 0x2e, 0xf9, 0xa3, 0x54, 0xb2, 0x9c, 0x48, 0x4e, 0x25, 0xf4, 0x28,
|
||||||
0xd6, 0x63, 0xfc, 0x3f, 0x74, 0x66, 0x6c, 0x51, 0x4a, 0xe5, 0x25, 0xee, 0xc3, 0x7f, 0x73, 0x1a,
|
0xe0, 0xf1, 0xd4, 0xce, 0xe7, 0xe8, 0x7b, 0xa0, 0xae, 0x3c, 0xc6, 0x37, 0xa1, 0x31, 0xa6, 0xd3,
|
||||||
0x64, 0x4c, 0x6f, 0xcb, 0x67, 0xc5, 0xe1, 0x65, 0xfb, 0x39, 0xb2, 0x1e, 0xc1, 0x9d, 0x11, 0x13,
|
0x9c, 0x53, 0x94, 0xb8, 0x0d, 0x37, 0x52, 0xc7, 0x4f, 0x68, 0xa7, 0x2e, 0x9f, 0x65, 0xcd, 0x7e,
|
||||||
0x9b, 0xcf, 0x3b, 0xec, 0x4b, 0xc6, 0xb8, 0xa8, 0xb3, 0x6b, 0x5d, 0x42, 0xff, 0x3a, 0x94, 0x27,
|
0x7d, 0x17, 0x99, 0x0f, 0xe0, 0xd6, 0x09, 0xe5, 0xcb, 0xf1, 0x36, 0xfd, 0x90, 0x50, 0xc6, 0xcb,
|
||||||
0x71, 0xc4, 0xf3, 0x36, 0x7a, 0x1b, 0xa7, 0x92, 0xa0, 0x0e, 0x1f, 0x1e, 0xd0, 0xc9, 0x59, 0x77,
|
0x74, 0x9b, 0x67, 0xd0, 0xfe, 0xf5, 0x28, 0x8b, 0xc2, 0x80, 0x89, 0xfb, 0xb4, 0x96, 0x62, 0x25,
|
||||||
0xf9, 0xfb, 0x41, 0xcb, 0xa9, 0xb8, 0x96, 0x0d, 0x77, 0x2f, 0x7c, 0x5e, 0x29, 0xf0, 0xb5, 0x1b,
|
0x40, 0xed, 0x6d, 0x5f, 0xe5, 0x4a, 0x87, 0xcd, 0xd9, 0x8f, 0x7b, 0x35, 0xbb, 0x18, 0x62, 0x5a,
|
||||||
0x0d, 0x94, 0xa9, 0x1f, 0x08, 0x96, 0x96, 0x7e, 0xca, 0x93, 0x35, 0x06, 0xed, 0x26, 0xa1, 0xf4,
|
0x70, 0xfb, 0xd4, 0x63, 0x05, 0x15, 0x5b, 0xc8, 0xd2, 0x40, 0x19, 0x79, 0x3e, 0xa7, 0x71, 0x2e,
|
||||||
0xf4, 0x1e, 0xa0, 0xd2, 0xd4, 0x91, 0x1c, 0xef, 0x2d, 0x4c, 0x6d, 0x91, 0x2d, 0x0a, 0xda, 0xeb,
|
0x2c, 0xef, 0x4c, 0x1f, 0xb4, 0xcb, 0x80, 0x5c, 0x9c, 0x0d, 0x50, 0xd0, 0x76, 0x90, 0x5c, 0xf8,
|
||||||
0x94, 0x51, 0xc1, 0x76, 0x86, 0x74, 0xb4, 0xc6, 0x5d, 0xb8, 0xb7, 0x23, 0x71, 0xec, 0xe1, 0x7e,
|
0x26, 0xea, 0x56, 0xa6, 0x98, 0xef, 0x41, 0x7b, 0x12, 0x53, 0x87, 0xd3, 0xb5, 0xb5, 0xfd, 0xfb,
|
||||||
0x47, 0xa0, 0x7d, 0x4a, 0x26, 0xff, 0xb2, 0x0f, 0xfc, 0x0a, 0xd4, 0x4c, 0x4a, 0xc8, 0x3d, 0x90,
|
0x55, 0x8c, 0xe1, 0xce, 0x1a, 0xd7, 0xb5, 0xed, 0xfd, 0x0b, 0x02, 0xed, 0x55, 0x34, 0xfc, 0x2f,
|
||||||
0x61, 0x53, 0x87, 0x06, 0x29, 0x56, 0x85, 0xac, 0x57, 0x85, 0xbc, 0xcd, 0x57, 0xe5, 0x03, 0xe5,
|
0x37, 0xc3, 0x07, 0xa0, 0x26, 0x92, 0x4b, 0xa6, 0x47, 0x3a, 0x53, 0xed, 0xe9, 0x24, 0x0b, 0x18,
|
||||||
0x33, 0x07, 0x0a, 0x78, 0x5e, 0xe7, 0x43, 0xd8, 0xf1, 0x77, 0xec, 0x21, 0x3c, 0x06, 0xed, 0x0d,
|
0x59, 0x04, 0x8c, 0x1c, 0x8b, 0x80, 0x3d, 0x77, 0xd8, 0xd8, 0x86, 0xec, 0xb8, 0xa8, 0xc5, 0x5a,
|
||||||
0x0b, 0x58, 0xcd, 0x0c, 0x6a, 0x02, 0x3f, 0xfc, 0xd6, 0x05, 0xa8, 0xb2, 0x85, 0x27, 0xd0, 0x19,
|
0xd6, 0x84, 0x5e, 0xdb, 0x5a, 0xb6, 0x41, 0x7b, 0x4a, 0x7d, 0x5a, 0xb2, 0x95, 0x92, 0x98, 0xf4,
|
||||||
0x31, 0x81, 0x9f, 0xec, 0x57, 0xae, 0xd9, 0x24, 0x83, 0x1c, 0x0a, 0x2f, 0x7b, 0xf5, 0xa1, 0x9b,
|
0xce, 0x9b, 0x00, 0x85, 0x11, 0x71, 0x0a, 0x8d, 0x13, 0xca, 0xf1, 0xa3, 0x0a, 0x12, 0x4a, 0x82,
|
||||||
0x67, 0x1a, 0x37, 0x5c, 0x06, 0xb5, 0x4b, 0x62, 0x3c, 0x3d, 0x9c, 0x50, 0x4a, 0x85, 0xa0, 0x14,
|
0xa8, 0x3f, 0xbe, 0x32, 0x2e, 0x5f, 0xc3, 0x47, 0x68, 0x8a, 0x48, 0xe0, 0x2a, 0x5f, 0x97, 0xd2,
|
||||||
0xb1, 0xc3, 0x0d, 0xdc, 0xfa, 0xec, 0x1b, 0xa7, 0xb7, 0x60, 0x54, 0x72, 0xc5, 0x0f, 0x6e, 0x92,
|
0xb0, 0xe9, 0x7b, 0x1b, 0x20, 0x73, 0xf2, 0x4f, 0xa0, 0x64, 0xae, 0xc5, 0x55, 0x86, 0x94, 0x87,
|
||||||
0xab, 0x8f, 0x68, 0x93, 0xdc, 0xbe, 0xd0, 0x38, 0xa0, 0x14, 0xff, 0xba, 0x49, 0xae, 0x3e, 0x0d,
|
0x49, 0xdf, 0xdf, 0x04, 0x5a, 0x08, 0xc8, 0xfc, 0x51, 0x49, 0x40, 0xb9, 0xe7, 0x2b, 0x09, 0xf8,
|
||||||
0x86, 0xb6, 0x93, 0xd9, 0xf3, 0xfc, 0xee, 0x3f, 0xd3, 0x97, 0x57, 0x66, 0xeb, 0xd7, 0x95, 0xd9,
|
0x9d, 0x0b, 0xdf, 0x80, 0x92, 0x79, 0xa6, 0x92, 0x80, 0x72, 0x7b, 0xe9, 0xda, 0x5a, 0x1a, 0x8e,
|
||||||
0xfa, 0xba, 0x32, 0xd1, 0x72, 0x65, 0xa2, 0x9f, 0x2b, 0x13, 0xfd, 0x59, 0x99, 0xc8, 0x55, 0x24,
|
0xc4, 0xbf, 0xe8, 0xf0, 0xdd, 0xec, 0xc2, 0xa8, 0x7d, 0xbf, 0x30, 0x6a, 0x9f, 0xe7, 0x06, 0x9a,
|
||||||
0xf2, 0xd9, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf0, 0x2e, 0xc3, 0x29, 0xaf, 0x06, 0x00, 0x00,
|
0xcd, 0x0d, 0xf4, 0x6d, 0x6e, 0xa0, 0xf3, 0xb9, 0x81, 0x5e, 0x1f, 0xff, 0xc5, 0x2f, 0xf4, 0xa0,
|
||||||
|
0xe8, 0xfa, 0x8a, 0x64, 0x7c, 0xf8, 0x33, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xe8, 0x4d, 0xe1, 0x93,
|
||||||
|
0x07, 0x00, 0x00,
|
||||||
}
|
}
|
@ -1,11 +1,13 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
package containerd.v1.namespaces;
|
package containerd.services.namespaces.v1;
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
import "google/protobuf/empty.proto";
|
import "google/protobuf/empty.proto";
|
||||||
import "google/protobuf/field_mask.proto";
|
import "google/protobuf/field_mask.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/namespaces/v1;namespaces";
|
||||||
|
|
||||||
// Namespaces provides the ability to manipulate containerd namespaces.
|
// Namespaces provides the ability to manipulate containerd namespaces.
|
||||||
//
|
//
|
||||||
// All objects in the system are required to be a member of a namespace. If a
|
// All objects in the system are required to be a member of a namespace. If a
|
||||||
@ -44,7 +46,7 @@ message GetNamespaceResponse {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message ListNamespacesRequest {
|
message ListNamespacesRequest {
|
||||||
string filter = 1; // TODO(stevvooe): Define a filtering syntax to make these queries.
|
string filter = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ListNamespacesResponse {
|
message ListNamespacesResponse {
|
81
vendor/github.com/containerd/containerd/api/services/snapshot/snapshots.proto
generated
vendored
81
vendor/github.com/containerd/containerd/api/services/snapshot/snapshots.proto
generated
vendored
@ -1,81 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.v1.snapshot;
|
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/mount/mount.proto";
|
|
||||||
|
|
||||||
// Snapshot service manages snapshots
|
|
||||||
service Snapshot {
|
|
||||||
rpc Prepare(PrepareRequest) returns (MountsResponse);
|
|
||||||
rpc View(PrepareRequest) returns (MountsResponse);
|
|
||||||
rpc Mounts(MountsRequest) returns (MountsResponse);
|
|
||||||
rpc Commit(CommitRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc Remove(RemoveRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc Stat(StatRequest) returns (StatResponse);
|
|
||||||
rpc List(ListRequest) returns (stream ListResponse);
|
|
||||||
rpc Usage(UsageRequest) returns (UsageResponse);
|
|
||||||
// "Snapshot" prepares a new set of mounts from existing name
|
|
||||||
}
|
|
||||||
|
|
||||||
message PrepareRequest {
|
|
||||||
string key = 1;
|
|
||||||
string parent = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MountsRequest {
|
|
||||||
string key = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MountsResponse {
|
|
||||||
repeated containerd.v1.types.Mount mounts = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message RemoveRequest {
|
|
||||||
string key = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CommitRequest {
|
|
||||||
string name = 1;
|
|
||||||
string key = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message StatRequest {
|
|
||||||
string key = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
enum Kind {
|
|
||||||
option (gogoproto.goproto_enum_prefix) = false;
|
|
||||||
option (gogoproto.enum_customname) = "Kind";
|
|
||||||
|
|
||||||
ACTIVE = 0 [(gogoproto.enumvalue_customname) = "KindActive"];
|
|
||||||
|
|
||||||
COMMITTED = 1 [(gogoproto.enumvalue_customname) = "KindCommitted"];
|
|
||||||
}
|
|
||||||
|
|
||||||
message Info {
|
|
||||||
string name = 1;
|
|
||||||
string parent = 2;
|
|
||||||
Kind kind = 3;
|
|
||||||
bool readonly = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
message StatResponse {
|
|
||||||
Info info = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListRequest{}
|
|
||||||
|
|
||||||
message ListResponse {
|
|
||||||
repeated Info info = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message UsageRequest {
|
|
||||||
string key = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message UsageResponse {
|
|
||||||
int64 inodes = 2;
|
|
||||||
int64 size = 1;
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
104
vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto
generated
vendored
Normal file
104
vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.snapshots.v1;
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
import "google/protobuf/empty.proto";
|
||||||
|
import "github.com/containerd/containerd/api/types/mount.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/snapshot/v1;snapshot";
|
||||||
|
|
||||||
|
// Snapshot service manages snapshots
|
||||||
|
service Snapshots {
|
||||||
|
rpc Prepare(PrepareSnapshotRequest) returns (PrepareSnapshotResponse);
|
||||||
|
rpc View(ViewSnapshotRequest) returns (ViewSnapshotResponse);
|
||||||
|
rpc Mounts(MountsRequest) returns (MountsResponse);
|
||||||
|
rpc Commit(CommitSnapshotRequest) returns (google.protobuf.Empty);
|
||||||
|
rpc Remove(RemoveSnapshotRequest) returns (google.protobuf.Empty);
|
||||||
|
rpc Stat(StatSnapshotRequest) returns (StatSnapshotResponse);
|
||||||
|
rpc List(ListSnapshotsRequest) returns (stream ListSnapshotsResponse);
|
||||||
|
rpc Usage(UsageRequest) returns (UsageResponse);
|
||||||
|
}
|
||||||
|
|
||||||
|
message PrepareSnapshotRequest {
|
||||||
|
string snapshotter = 1;
|
||||||
|
string key = 2;
|
||||||
|
string parent = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PrepareSnapshotResponse {
|
||||||
|
repeated containerd.types.Mount mounts = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ViewSnapshotRequest {
|
||||||
|
string snapshotter = 1;
|
||||||
|
string key = 2;
|
||||||
|
string parent = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ViewSnapshotResponse {
|
||||||
|
repeated containerd.types.Mount mounts = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MountsRequest {
|
||||||
|
string snapshotter = 1;
|
||||||
|
string key = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MountsResponse {
|
||||||
|
repeated containerd.types.Mount mounts = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RemoveSnapshotRequest {
|
||||||
|
string snapshotter = 1;
|
||||||
|
string key = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CommitSnapshotRequest {
|
||||||
|
string snapshotter = 1;
|
||||||
|
string name = 2;
|
||||||
|
string key = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message StatSnapshotRequest {
|
||||||
|
string snapshotter = 1;
|
||||||
|
string key = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum Kind {
|
||||||
|
option (gogoproto.goproto_enum_prefix) = false;
|
||||||
|
option (gogoproto.enum_customname) = "Kind";
|
||||||
|
|
||||||
|
UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "KindUnknown"];
|
||||||
|
VIEW = 1 [(gogoproto.enumvalue_customname) = "KindView"];
|
||||||
|
ACTIVE = 2 [(gogoproto.enumvalue_customname) = "KindActive"];
|
||||||
|
COMMITTED = 3 [(gogoproto.enumvalue_customname) = "KindCommitted"];
|
||||||
|
}
|
||||||
|
|
||||||
|
message Info {
|
||||||
|
string name = 1;
|
||||||
|
string parent = 2;
|
||||||
|
Kind kind = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message StatSnapshotResponse {
|
||||||
|
Info info = 1 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListSnapshotsRequest{
|
||||||
|
string snapshotter = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListSnapshotsResponse {
|
||||||
|
repeated Info info = 1 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message UsageRequest {
|
||||||
|
string snapshotter = 1;
|
||||||
|
string key = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UsageResponse {
|
||||||
|
int64 size = 1;
|
||||||
|
int64 inodes = 2;
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
180
vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
generated
vendored
Normal file
180
vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package containerd.services.tasks.v1;
|
||||||
|
|
||||||
|
import "google/protobuf/empty.proto";
|
||||||
|
import "google/protobuf/any.proto";
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
import "github.com/containerd/containerd/api/types/mount.proto";
|
||||||
|
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
||||||
|
import "github.com/containerd/containerd/api/types/task/task.proto";
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/tasks/v1;tasks";
|
||||||
|
|
||||||
|
service Tasks {
|
||||||
|
// Create a task.
|
||||||
|
rpc Create(CreateTaskRequest) returns (CreateTaskResponse);
|
||||||
|
|
||||||
|
// Start a task.
|
||||||
|
rpc Start(StartTaskRequest) returns (google.protobuf.Empty);
|
||||||
|
|
||||||
|
// Delete a task and on disk state.
|
||||||
|
rpc Delete(DeleteTaskRequest) returns (DeleteResponse);
|
||||||
|
|
||||||
|
rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse);
|
||||||
|
|
||||||
|
rpc Get(GetTaskRequest) returns (GetTaskResponse);
|
||||||
|
|
||||||
|
rpc List(ListTasksRequest) returns (ListTasksResponse);
|
||||||
|
|
||||||
|
// Kill a task or process.
|
||||||
|
rpc Kill(KillRequest) returns (google.protobuf.Empty);
|
||||||
|
|
||||||
|
rpc Exec(ExecProcessRequest) returns (ExecProcessResponse);
|
||||||
|
|
||||||
|
rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty);
|
||||||
|
|
||||||
|
rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty);
|
||||||
|
|
||||||
|
rpc Pause(PauseTaskRequest) returns (google.protobuf.Empty);
|
||||||
|
|
||||||
|
rpc Resume(ResumeTaskRequest) returns (google.protobuf.Empty);
|
||||||
|
|
||||||
|
rpc ListPids(ListPidsRequest) returns (ListPidsResponse);
|
||||||
|
|
||||||
|
rpc Checkpoint(CheckpointTaskRequest) returns (CheckpointTaskResponse);
|
||||||
|
|
||||||
|
rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty);
|
||||||
|
}
|
||||||
|
|
||||||
|
message CreateTaskRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
|
||||||
|
// RootFS provides the pre-chroot mounts to perform in the shim before
|
||||||
|
// executing the container task.
|
||||||
|
//
|
||||||
|
// These are for mounts that cannot be performed in the user namespace.
|
||||||
|
// Typically, these mounts should be resolved from snapshots specified on
|
||||||
|
// the container object.
|
||||||
|
repeated containerd.types.Mount rootfs = 3;
|
||||||
|
|
||||||
|
string stdin = 4;
|
||||||
|
string stdout = 5;
|
||||||
|
string stderr = 6;
|
||||||
|
bool terminal = 7;
|
||||||
|
|
||||||
|
containerd.types.Descriptor checkpoint = 8;
|
||||||
|
|
||||||
|
google.protobuf.Any options = 9;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CreateTaskResponse {
|
||||||
|
string container_id = 1;
|
||||||
|
uint32 pid = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message StartTaskRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DeleteTaskRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DeleteResponse {
|
||||||
|
string id = 1;
|
||||||
|
uint32 pid = 2;
|
||||||
|
uint32 exit_status = 3;
|
||||||
|
google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message DeleteProcessRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
string exec_id = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetTaskRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetTaskResponse {
|
||||||
|
containerd.v1.types.Task task = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListTasksRequest {
|
||||||
|
string filter = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListTasksResponse {
|
||||||
|
repeated containerd.v1.types.Task tasks = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message KillRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
string exec_id = 2;
|
||||||
|
uint32 signal = 3;
|
||||||
|
bool all = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExecProcessRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
string stdin = 2;
|
||||||
|
string stdout = 3;
|
||||||
|
string stderr = 4;
|
||||||
|
bool terminal = 5;
|
||||||
|
// Spec for starting a process in the target container.
|
||||||
|
//
|
||||||
|
// For runc, this is a process spec, for example.
|
||||||
|
google.protobuf.Any spec = 6;
|
||||||
|
// id of the exec process
|
||||||
|
string exec_id = 7;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExecProcessResponse {
|
||||||
|
uint32 pid = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ResizePtyRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
string exec_id = 2;
|
||||||
|
uint32 width = 3;
|
||||||
|
uint32 height = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CloseIORequest {
|
||||||
|
string container_id = 1;
|
||||||
|
string exec_id = 2;
|
||||||
|
bool stdin = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PauseTaskRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ResumeTaskRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListPidsRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListPidsResponse{
|
||||||
|
repeated uint32 pids = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CheckpointTaskRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
string parent_checkpoint = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
|
google.protobuf.Any options = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CheckpointTaskResponse {
|
||||||
|
repeated containerd.types.Descriptor descriptors = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UpdateTaskRequest {
|
||||||
|
string container_id = 1;
|
||||||
|
google.protobuf.Any resources = 2;
|
||||||
|
}
|
@ -1,12 +1,12 @@
|
|||||||
// Code generated by protoc-gen-gogo.
|
// Code generated by protoc-gen-gogo.
|
||||||
// source: github.com/containerd/containerd/api/services/version/version.proto
|
// source: github.com/containerd/containerd/api/services/version/v1/version.proto
|
||||||
// DO NOT EDIT!
|
// DO NOT EDIT!
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package version is a generated protocol buffer package.
|
Package version is a generated protocol buffer package.
|
||||||
|
|
||||||
It is generated from these files:
|
It is generated from these files:
|
||||||
github.com/containerd/containerd/api/services/version/version.proto
|
github.com/containerd/containerd/api/services/version/v1/version.proto
|
||||||
|
|
||||||
It has these top-level messages:
|
It has these top-level messages:
|
||||||
VersionResponse
|
VersionResponse
|
||||||
@ -50,7 +50,7 @@ func (*VersionResponse) ProtoMessage() {}
|
|||||||
func (*VersionResponse) Descriptor() ([]byte, []int) { return fileDescriptorVersion, []int{0} }
|
func (*VersionResponse) Descriptor() ([]byte, []int) { return fileDescriptorVersion, []int{0} }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*VersionResponse)(nil), "containerd.v1.VersionResponse")
|
proto.RegisterType((*VersionResponse)(nil), "containerd.services.version.v1.VersionResponse")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@ -77,7 +77,7 @@ func NewVersionClient(cc *grpc.ClientConn) VersionClient {
|
|||||||
|
|
||||||
func (c *versionClient) Version(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*VersionResponse, error) {
|
func (c *versionClient) Version(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*VersionResponse, error) {
|
||||||
out := new(VersionResponse)
|
out := new(VersionResponse)
|
||||||
err := grpc.Invoke(ctx, "/containerd.v1.Version/Version", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -104,7 +104,7 @@ func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(int
|
|||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/containerd.v1.Version/Version",
|
FullMethod: "/containerd.services.version.v1.Version/Version",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(VersionServer).Version(ctx, req.(*google_protobuf.Empty))
|
return srv.(VersionServer).Version(ctx, req.(*google_protobuf.Empty))
|
||||||
@ -113,7 +113,7 @@ func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(int
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _Version_serviceDesc = grpc.ServiceDesc{
|
var _Version_serviceDesc = grpc.ServiceDesc{
|
||||||
ServiceName: "containerd.v1.Version",
|
ServiceName: "containerd.services.version.v1.Version",
|
||||||
HandlerType: (*VersionServer)(nil),
|
HandlerType: (*VersionServer)(nil),
|
||||||
Methods: []grpc.MethodDesc{
|
Methods: []grpc.MethodDesc{
|
||||||
{
|
{
|
||||||
@ -122,7 +122,7 @@ var _Version_serviceDesc = grpc.ServiceDesc{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Streams: []grpc.StreamDesc{},
|
Streams: []grpc.StreamDesc{},
|
||||||
Metadata: "github.com/containerd/containerd/api/services/version/version.proto",
|
Metadata: "github.com/containerd/containerd/api/services/version/v1/version.proto",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *VersionResponse) Marshal() (dAtA []byte, err error) {
|
func (m *VersionResponse) Marshal() (dAtA []byte, err error) {
|
||||||
@ -442,24 +442,25 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/services/version/version.proto", fileDescriptorVersion)
|
proto.RegisterFile("github.com/containerd/containerd/api/services/version/v1/version.proto", fileDescriptorVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptorVersion = []byte{
|
var fileDescriptorVersion = []byte{
|
||||||
// 225 bytes of a gzipped FileDescriptorProto
|
// 241 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4e, 0xcf, 0x2c, 0xc9,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
|
||||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
||||||
0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xc1, 0x68, 0xbd, 0x82, 0xa2, 0xfc, 0x92, 0x7c, 0x21,
|
0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xe9, 0x97, 0x19, 0xc2, 0x98, 0x7a, 0x05, 0x45, 0xf9,
|
||||||
0x5e, 0x84, 0x72, 0xbd, 0x32, 0x43, 0x29, 0xe9, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0x7d, 0xb0,
|
0x25, 0xf9, 0x42, 0x72, 0x08, 0x1d, 0x7a, 0x30, 0xd5, 0x7a, 0x30, 0x25, 0x65, 0x86, 0x52, 0xd2,
|
||||||
0x64, 0x52, 0x69, 0x9a, 0x7e, 0x6a, 0x6e, 0x41, 0x49, 0x25, 0x44, 0xad, 0x94, 0x48, 0x7a, 0x7e,
|
0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0xd5, 0x49, 0xa5, 0x69, 0xfa, 0xa9, 0xb9, 0x05,
|
||||||
0x7a, 0x3e, 0x98, 0xa9, 0x0f, 0x62, 0x41, 0x44, 0x95, 0xdc, 0xb9, 0xf8, 0xc3, 0x20, 0x46, 0x06,
|
0x25, 0x95, 0x10, 0xcd, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xa6, 0x3e, 0x88, 0x05, 0x11,
|
||||||
0xa5, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x0a, 0x49, 0x70, 0xb1, 0x43, 0x6d, 0x91, 0x60, 0x54,
|
0x55, 0x72, 0xe7, 0xe2, 0x0f, 0x83, 0x18, 0x10, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a,
|
||||||
0x60, 0xd4, 0xe0, 0x0c, 0x82, 0x71, 0x85, 0xa4, 0xb8, 0x38, 0x8a, 0x52, 0xcb, 0x32, 0xc1, 0x52,
|
0x24, 0xc1, 0xc5, 0x0e, 0x35, 0x53, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x15, 0x92,
|
||||||
0x4c, 0x60, 0x29, 0x38, 0xdf, 0xc8, 0x87, 0x8b, 0x1d, 0x6a, 0x90, 0x90, 0x23, 0x82, 0x29, 0xa6,
|
0xe2, 0xe2, 0x28, 0x4a, 0x2d, 0xcb, 0x04, 0x4b, 0x31, 0x81, 0xa5, 0xe0, 0x7c, 0xa3, 0x58, 0x2e,
|
||||||
0x07, 0x71, 0x92, 0x1e, 0xcc, 0x49, 0x7a, 0xae, 0x20, 0x27, 0x49, 0xc9, 0xe9, 0xa1, 0xb8, 0x5c,
|
0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9,
|
||||||
0x0f, 0xcd, 0x0d, 0x4e, 0x12, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xd0, 0xf0, 0x48,
|
0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50,
|
||||||
0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x4c, 0x62,
|
0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78,
|
||||||
0x03, 0x9b, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xb4, 0x27, 0xa4, 0xd8, 0x40, 0x01, 0x00,
|
0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x26, 0xb1,
|
||||||
|
0x81, 0x1d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xb6, 0x37, 0xd8, 0xc6, 0xa7, 0x01, 0x00,
|
||||||
0x00,
|
0x00,
|
||||||
}
|
}
|
@ -1,10 +1,13 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
package containerd.v1;
|
package containerd.services.version.v1;
|
||||||
|
|
||||||
import "google/protobuf/empty.proto";
|
import "google/protobuf/empty.proto";
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
// TODO(stevvooe): Should version service actually be versioned?
|
||||||
|
option go_package = "github.com/containerd/containerd/api/services/version/v1;version";
|
||||||
|
|
||||||
service Version {
|
service Version {
|
||||||
rpc Version(google.protobuf.Empty) returns (VersionResponse);
|
rpc Version(google.protobuf.Empty) returns (VersionResponse);
|
||||||
}
|
}
|
@ -1,17 +1,19 @@
|
|||||||
// Code generated by protoc-gen-gogo.
|
// Code generated by protoc-gen-gogo.
|
||||||
// source: github.com/containerd/containerd/api/types/descriptor/descriptor.proto
|
// source: github.com/containerd/containerd/api/types/descriptor.proto
|
||||||
// DO NOT EDIT!
|
// DO NOT EDIT!
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package descriptor is a generated protocol buffer package.
|
Package types is a generated protocol buffer package.
|
||||||
|
|
||||||
It is generated from these files:
|
It is generated from these files:
|
||||||
github.com/containerd/containerd/api/types/descriptor/descriptor.proto
|
github.com/containerd/containerd/api/types/descriptor.proto
|
||||||
|
github.com/containerd/containerd/api/types/mount.proto
|
||||||
|
|
||||||
It has these top-level messages:
|
It has these top-level messages:
|
||||||
Descriptor
|
Descriptor
|
||||||
|
Mount
|
||||||
*/
|
*/
|
||||||
package descriptor
|
package types
|
||||||
|
|
||||||
import proto "github.com/gogo/protobuf/proto"
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
import fmt "fmt"
|
import fmt "fmt"
|
||||||
@ -52,7 +54,7 @@ func (*Descriptor) ProtoMessage() {}
|
|||||||
func (*Descriptor) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} }
|
func (*Descriptor) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Descriptor)(nil), "containerd.v1.types.Descriptor")
|
proto.RegisterType((*Descriptor)(nil), "containerd.types.Descriptor")
|
||||||
}
|
}
|
||||||
func (m *Descriptor) Marshal() (dAtA []byte, err error) {
|
func (m *Descriptor) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
@ -399,24 +401,24 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor/descriptor.proto", fileDescriptorDescriptor)
|
proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptorDescriptor)
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptorDescriptor = []byte{
|
var fileDescriptorDescriptor = []byte{
|
||||||
// 229 bytes of a gzipped FileDescriptorProto
|
// 232 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
|
||||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
|
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
|
||||||
0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0x21, 0x31, 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85,
|
0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20,
|
||||||
0x84, 0x11, 0x3a, 0xf4, 0xca, 0x0c, 0xf5, 0xc0, 0x1a, 0xa4, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1,
|
0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88,
|
||||||
0xf2, 0xfa, 0x20, 0x16, 0x44, 0xa9, 0x52, 0x37, 0x23, 0x17, 0x97, 0x0b, 0x5c, 0xbf, 0x90, 0x2c,
|
0x3a, 0xa5, 0x6e, 0x46, 0x2e, 0x2e, 0x17, 0xb8, 0x66, 0x21, 0x59, 0x2e, 0xae, 0xdc, 0xd4, 0x94,
|
||||||
0x17, 0x57, 0x6e, 0x6a, 0x4a, 0x66, 0x62, 0x3c, 0x48, 0x8f, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67,
|
0xcc, 0xc4, 0x78, 0x90, 0x1e, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x4e, 0xb0, 0x48, 0x48,
|
||||||
0x10, 0x27, 0x58, 0x24, 0xa4, 0xb2, 0x20, 0x55, 0xc8, 0x8b, 0x8b, 0x2d, 0x25, 0x33, 0x3d, 0xb5,
|
0x65, 0x41, 0xaa, 0x90, 0x17, 0x17, 0x5b, 0x4a, 0x66, 0x7a, 0x6a, 0x71, 0x89, 0x04, 0x13, 0x48,
|
||||||
0xb8, 0x44, 0x82, 0x09, 0x24, 0xe5, 0x64, 0x74, 0xe2, 0x9e, 0x3c, 0xc3, 0xad, 0x7b, 0xf2, 0x5a,
|
0xca, 0xc9, 0xe8, 0xc4, 0x3d, 0x79, 0x86, 0x5b, 0xf7, 0xe4, 0xb5, 0x90, 0x9c, 0x9a, 0x5f, 0x90,
|
||||||
0x48, 0x0e, 0xcf, 0x2f, 0x48, 0xcd, 0x83, 0xdb, 0x5f, 0xac, 0x9f, 0x9e, 0xaf, 0x0b, 0xd1, 0xa2,
|
0x9a, 0x07, 0xb7, 0xbc, 0x58, 0x3f, 0x3d, 0x5f, 0x17, 0xa2, 0x45, 0xcf, 0x05, 0x4c, 0x05, 0x41,
|
||||||
0xe7, 0x02, 0xa6, 0x82, 0xa0, 0x26, 0x08, 0x09, 0x71, 0xb1, 0x14, 0x67, 0x56, 0xa5, 0x4a, 0x30,
|
0x4d, 0x10, 0x12, 0xe2, 0x62, 0x29, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x56, 0x60, 0xd4, 0x60, 0x0e,
|
||||||
0x2b, 0x30, 0x6a, 0x30, 0x07, 0x81, 0xd9, 0x4e, 0x12, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28,
|
0x02, 0xb3, 0x9d, 0xbc, 0x4e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c,
|
||||||
0xc7, 0xd0, 0xf0, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c,
|
0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x18, 0x65, 0x40,
|
||||||
0x92, 0x63, 0x4c, 0x62, 0x03, 0x3b, 0xd7, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x45, 0x60, 0xfd,
|
0x7c, 0x60, 0x58, 0x83, 0xc9, 0x24, 0x36, 0xb0, 0x07, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff,
|
||||||
0x5b, 0x23, 0x01, 0x00, 0x00,
|
0x23, 0x14, 0xc9, 0x7c, 0x47, 0x01, 0x00, 0x00,
|
||||||
}
|
}
|
@ -1,9 +1,11 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
package containerd.v1.types;
|
package containerd.types;
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/types;types";
|
||||||
|
|
||||||
// Descriptor describes a blob in a content store.
|
// Descriptor describes a blob in a content store.
|
||||||
//
|
//
|
||||||
// This descriptor can be used to reference content from an
|
// This descriptor can be used to reference content from an
|
1
vendor/github.com/containerd/containerd/api/types/doc.go
generated
vendored
Normal file
1
vendor/github.com/containerd/containerd/api/types/doc.go
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
package types
|
@ -1,17 +1,8 @@
|
|||||||
// Code generated by protoc-gen-gogo.
|
// Code generated by protoc-gen-gogo.
|
||||||
// source: github.com/containerd/containerd/api/types/mount/mount.proto
|
// source: github.com/containerd/containerd/api/types/mount.proto
|
||||||
// DO NOT EDIT!
|
// DO NOT EDIT!
|
||||||
|
|
||||||
/*
|
package types
|
||||||
Package mount is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
github.com/containerd/containerd/api/types/mount/mount.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
Mount
|
|
||||||
*/
|
|
||||||
package mount
|
|
||||||
|
|
||||||
import proto "github.com/gogo/protobuf/proto"
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
import fmt "fmt"
|
import fmt "fmt"
|
||||||
@ -28,12 +19,6 @@ var _ = proto.Marshal
|
|||||||
var _ = fmt.Errorf
|
var _ = fmt.Errorf
|
||||||
var _ = math.Inf
|
var _ = math.Inf
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
|
||||||
|
|
||||||
// Mount describes mounts for a container.
|
// Mount describes mounts for a container.
|
||||||
//
|
//
|
||||||
// This type is the lingua franca of ContainerD. All services provide mounts
|
// This type is the lingua franca of ContainerD. All services provide mounts
|
||||||
@ -58,7 +43,7 @@ func (*Mount) ProtoMessage() {}
|
|||||||
func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorMount, []int{0} }
|
func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorMount, []int{0} }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Mount)(nil), "containerd.v1.types.Mount")
|
proto.RegisterType((*Mount)(nil), "containerd.types.Mount")
|
||||||
}
|
}
|
||||||
func (m *Mount) Marshal() (dAtA []byte, err error) {
|
func (m *Mount) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
@ -468,22 +453,22 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/types/mount/mount.proto", fileDescriptorMount)
|
proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptorMount)
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptorMount = []byte{
|
var fileDescriptorMount = []byte{
|
||||||
// 197 bytes of a gzipped FileDescriptorProto
|
// 200 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x49, 0xcf, 0x2c, 0xc9,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9,
|
||||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97,
|
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97,
|
||||||
0xe6, 0x95, 0x40, 0x48, 0xbd, 0x82, 0xa2, 0xfc, 0x92, 0x7c, 0x21, 0x61, 0x84, 0x3a, 0xbd, 0x32,
|
0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5,
|
||||||
0x43, 0x3d, 0xb0, 0x32, 0x29, 0x91, 0xf4, 0xfc, 0xf4, 0x7c, 0xb0, 0xbc, 0x3e, 0x88, 0x05, 0x51,
|
0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab,
|
||||||
0xaa, 0x94, 0xca, 0xc5, 0xea, 0x0b, 0xd2, 0x29, 0x24, 0xc4, 0xc5, 0x02, 0x52, 0x27, 0xc1, 0xa8,
|
0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10,
|
||||||
0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x66, 0x0b, 0x89, 0x71, 0xb1, 0x15, 0xe7, 0x97, 0x16, 0x25, 0xa7,
|
0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85,
|
||||||
0x4a, 0x30, 0x81, 0x45, 0xa1, 0x3c, 0x90, 0x78, 0x49, 0x62, 0x51, 0x7a, 0x6a, 0x89, 0x04, 0x33,
|
0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48,
|
||||||
0x44, 0x1c, 0xc2, 0x13, 0x92, 0xe0, 0x62, 0xcf, 0x2f, 0x28, 0xc9, 0xcc, 0xcf, 0x2b, 0x96, 0x60,
|
0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33,
|
||||||
0x51, 0x60, 0xd6, 0xe0, 0x0c, 0x82, 0x71, 0x9d, 0x24, 0x4e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50,
|
0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72,
|
||||||
0x8e, 0xa1, 0xe1, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78,
|
0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01,
|
||||||
0x24, 0xc7, 0x98, 0xc4, 0x06, 0x76, 0x87, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xbe, 0xda, 0x1c,
|
0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x49, 0x6c, 0x60, 0x97, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff,
|
||||||
0x59, 0xf2, 0x00, 0x00, 0x00,
|
0xe5, 0xc7, 0x07, 0x3f, 0x1b, 0x01, 0x00, 0x00,
|
||||||
}
|
}
|
@ -1,9 +1,11 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
package containerd.v1.types;
|
package containerd.types;
|
||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option go_package = "github.com/containerd/containerd/api/types;types";
|
||||||
|
|
||||||
// Mount describes mounts for a container.
|
// Mount describes mounts for a container.
|
||||||
//
|
//
|
||||||
// This type is the lingua franca of ContainerD. All services provide mounts
|
// This type is the lingua franca of ContainerD. All services provide mounts
|
1303
vendor/github.com/containerd/containerd/api/types/task/task.pb.go
generated
vendored
1303
vendor/github.com/containerd/containerd/api/types/task/task.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
55
vendor/github.com/containerd/containerd/api/types/task/task.proto
generated
vendored
55
vendor/github.com/containerd/containerd/api/types/task/task.proto
generated
vendored
@ -4,7 +4,6 @@ package containerd.v1.types;
|
|||||||
|
|
||||||
import "gogoproto/gogo.proto";
|
import "gogoproto/gogo.proto";
|
||||||
import "google/protobuf/any.proto";
|
import "google/protobuf/any.proto";
|
||||||
import "google/protobuf/timestamp.proto";
|
|
||||||
|
|
||||||
enum Status {
|
enum Status {
|
||||||
option (gogoproto.goproto_enum_prefix) = false;
|
option (gogoproto.goproto_enum_prefix) = false;
|
||||||
@ -18,52 +17,12 @@ enum Status {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message Task {
|
message Task {
|
||||||
string id = 1; // TODO(stevvooe): For now, this is just the container id.
|
|
||||||
string container_id = 2;
|
|
||||||
uint32 pid = 3;
|
|
||||||
Status status = 4;
|
|
||||||
google.protobuf.Any spec = 5;
|
|
||||||
string stdin = 6;
|
|
||||||
string stdout = 7;
|
|
||||||
string stderr = 8;
|
|
||||||
bool terminal = 9;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Process {
|
|
||||||
uint32 pid = 1;
|
|
||||||
repeated string args = 2;
|
|
||||||
repeated string env = 3;
|
|
||||||
User user = 4;
|
|
||||||
string cwd = 5;
|
|
||||||
bool terminal = 6;
|
|
||||||
uint32 exit_status = 7;
|
|
||||||
Status status = 8;
|
|
||||||
google.protobuf.Any runtime_data = 9;
|
|
||||||
string stdin = 10;
|
|
||||||
string stdout = 11;
|
|
||||||
string stderr = 12;
|
|
||||||
}
|
|
||||||
|
|
||||||
message User {
|
|
||||||
uint32 uid = 1;
|
|
||||||
uint32 gid = 2;
|
|
||||||
repeated uint32 additional_gids = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Event {
|
|
||||||
string id = 1;
|
string id = 1;
|
||||||
|
uint32 pid = 2;
|
||||||
enum EventType {
|
Status status = 3;
|
||||||
EXIT = 0;
|
google.protobuf.Any spec = 4;
|
||||||
OOM = 1;
|
string stdin = 5;
|
||||||
CREATE = 2;
|
string stdout = 6;
|
||||||
START = 3;
|
string stderr = 7;
|
||||||
EXEC_ADDED = 4;
|
bool terminal = 8;
|
||||||
PAUSED = 5;
|
|
||||||
}
|
|
||||||
|
|
||||||
EventType type = 2;
|
|
||||||
uint32 pid = 3;
|
|
||||||
uint32 exit_status = 4;
|
|
||||||
google.protobuf.Timestamp exited_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
}
|
}
|
||||||
|
13
vendor/github.com/containerd/containerd/apparmor.go
generated
vendored
Normal file
13
vendor/github.com/containerd/containerd/apparmor.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
package containerd
|
||||||
|
|
||||||
|
import specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
|
||||||
|
// WithApparmor sets the provided apparmor profile to the spec
|
||||||
|
func WithApparmorProfile(profile string) SpecOpts {
|
||||||
|
return func(s *specs.Spec) error {
|
||||||
|
s.Process.ApparmorProfile = profile
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
279
vendor/github.com/containerd/containerd/client.go
generated
vendored
279
vendor/github.com/containerd/containerd/client.go
generated
vendored
@ -2,23 +2,31 @@ package containerd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/api/services/containers"
|
containersapi "github.com/containerd/containerd/api/services/containers/v1"
|
||||||
contentapi "github.com/containerd/containerd/api/services/content"
|
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||||
diffapi "github.com/containerd/containerd/api/services/diff"
|
diffapi "github.com/containerd/containerd/api/services/diff/v1"
|
||||||
"github.com/containerd/containerd/api/services/execution"
|
eventsapi "github.com/containerd/containerd/api/services/events/v1"
|
||||||
imagesapi "github.com/containerd/containerd/api/services/images"
|
imagesapi "github.com/containerd/containerd/api/services/images/v1"
|
||||||
namespacesapi "github.com/containerd/containerd/api/services/namespaces"
|
namespacesapi "github.com/containerd/containerd/api/services/namespaces/v1"
|
||||||
snapshotapi "github.com/containerd/containerd/api/services/snapshot"
|
snapshotapi "github.com/containerd/containerd/api/services/snapshot/v1"
|
||||||
versionservice "github.com/containerd/containerd/api/services/version"
|
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||||
|
versionservice "github.com/containerd/containerd/api/services/version/v1"
|
||||||
|
"github.com/containerd/containerd/containers"
|
||||||
"github.com/containerd/containerd/content"
|
"github.com/containerd/containerd/content"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
|
"github.com/containerd/containerd/plugin"
|
||||||
|
"github.com/containerd/containerd/reference"
|
||||||
"github.com/containerd/containerd/remotes"
|
"github.com/containerd/containerd/remotes"
|
||||||
"github.com/containerd/containerd/remotes/docker"
|
"github.com/containerd/containerd/remotes/docker"
|
||||||
"github.com/containerd/containerd/remotes/docker/schema1"
|
"github.com/containerd/containerd/remotes/docker/schema1"
|
||||||
@ -28,9 +36,11 @@ import (
|
|||||||
imagesservice "github.com/containerd/containerd/services/images"
|
imagesservice "github.com/containerd/containerd/services/images"
|
||||||
snapshotservice "github.com/containerd/containerd/services/snapshot"
|
snapshotservice "github.com/containerd/containerd/services/snapshot"
|
||||||
"github.com/containerd/containerd/snapshot"
|
"github.com/containerd/containerd/snapshot"
|
||||||
|
"github.com/containerd/containerd/typeurl"
|
||||||
pempty "github.com/golang/protobuf/ptypes/empty"
|
pempty "github.com/golang/protobuf/ptypes/empty"
|
||||||
"github.com/opencontainers/image-spec/identity"
|
"github.com/opencontainers/image-spec/identity"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
@ -40,10 +50,18 @@ import (
|
|||||||
func init() {
|
func init() {
|
||||||
// reset the grpc logger so that it does not output in the STDIO of the calling process
|
// reset the grpc logger so that it does not output in the STDIO of the calling process
|
||||||
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
|
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
|
||||||
|
|
||||||
|
// register TypeUrls for commonly marshaled external types
|
||||||
|
major := strconv.Itoa(specs.VersionMajor)
|
||||||
|
typeurl.Register(&specs.Spec{}, "opencontainers/runtime-spec", major, "Spec")
|
||||||
|
typeurl.Register(&specs.Process{}, "opencontainers/runtime-spec", major, "Process")
|
||||||
|
typeurl.Register(&specs.LinuxResources{}, "opencontainers/runtime-spec", major, "LinuxResources")
|
||||||
|
typeurl.Register(&specs.WindowsResources{}, "opencontainers/runtime-spec", major, "WindowsResources")
|
||||||
}
|
}
|
||||||
|
|
||||||
type clientOpts struct {
|
type clientOpts struct {
|
||||||
defaultns string
|
defaultns string
|
||||||
|
dialOptions []grpc.DialOption
|
||||||
}
|
}
|
||||||
|
|
||||||
type ClientOpt func(c *clientOpts) error
|
type ClientOpt func(c *clientOpts) error
|
||||||
@ -55,6 +73,14 @@ func WithDefaultNamespace(ns string) ClientOpt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithDialOpts allows grpc.DialOptions to be set on the connection
|
||||||
|
func WithDialOpts(opts []grpc.DialOption) ClientOpt {
|
||||||
|
return func(c *clientOpts) error {
|
||||||
|
c.dialOptions = opts
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// New returns a new containerd client that is connected to the containerd
|
// New returns a new containerd client that is connected to the containerd
|
||||||
// instance provided by address
|
// instance provided by address
|
||||||
func New(address string, opts ...ClientOpt) (*Client, error) {
|
func New(address string, opts ...ClientOpt) (*Client, error) {
|
||||||
@ -64,12 +90,16 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
gopts := []grpc.DialOption{
|
gopts := []grpc.DialOption{
|
||||||
|
grpc.WithBlock(),
|
||||||
grpc.WithInsecure(),
|
grpc.WithInsecure(),
|
||||||
grpc.WithTimeout(100 * time.Second),
|
grpc.WithTimeout(100 * time.Second),
|
||||||
|
grpc.FailOnNonTempDialError(true),
|
||||||
grpc.WithDialer(dialer),
|
grpc.WithDialer(dialer),
|
||||||
}
|
}
|
||||||
|
if len(copts.dialOptions) > 0 {
|
||||||
|
gopts = copts.dialOptions
|
||||||
|
}
|
||||||
if copts.defaultns != "" {
|
if copts.defaultns != "" {
|
||||||
unary, stream := newNSInterceptors(copts.defaultns)
|
unary, stream := newNSInterceptors(copts.defaultns)
|
||||||
gopts = append(gopts,
|
gopts = append(gopts,
|
||||||
@ -77,14 +107,19 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
|
|||||||
grpc.WithStreamInterceptor(stream),
|
grpc.WithStreamInterceptor(stream),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := grpc.Dial(dialAddress(address), gopts...)
|
conn, err := grpc.Dial(dialAddress(address), gopts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to dial %q", address)
|
return nil, errors.Wrapf(err, "failed to dial %q", address)
|
||||||
}
|
}
|
||||||
|
return NewWithConn(conn, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWithConn returns a new containerd client that is connected to the containerd
|
||||||
|
// instance provided by the connection
|
||||||
|
func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) {
|
||||||
return &Client{
|
return &Client{
|
||||||
conn: conn,
|
conn: conn,
|
||||||
runtime: runtime.GOOS,
|
runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,14 +141,14 @@ func (c *Client) IsServing(ctx context.Context) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Containers returns all containers created in containerd
|
// Containers returns all containers created in containerd
|
||||||
func (c *Client) Containers(ctx context.Context) ([]Container, error) {
|
func (c *Client) Containers(ctx context.Context, filters ...string) ([]Container, error) {
|
||||||
r, err := c.ContainerService().List(ctx, &containers.ListContainersRequest{})
|
r, err := c.ContainerService().List(ctx, filters...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var out []Container
|
var out []Container
|
||||||
for _, container := range r.Containers {
|
for _, container := range r {
|
||||||
out = append(out, containerFromProto(c, container))
|
out = append(out, containerFromRecord(c, container))
|
||||||
}
|
}
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
@ -128,11 +163,11 @@ func WithContainerLabels(labels map[string]string) NewContainerOpts {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithExistingRootFS uses an existing root filesystem for the container
|
// WithSnapshot uses an existing root filesystem for the container
|
||||||
func WithExistingRootFS(id string) NewContainerOpts {
|
func WithSnapshot(id string) NewContainerOpts {
|
||||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||||
// check that the snapshot exists, if not, fail on creation
|
// check that the snapshot exists, if not, fail on creation
|
||||||
if _, err := client.SnapshotService().Mounts(ctx, id); err != nil {
|
if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.RootFS = id
|
c.RootFS = id
|
||||||
@ -140,41 +175,54 @@ func WithExistingRootFS(id string) NewContainerOpts {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithNewRootFS allocates a new snapshot to be used by the container as the
|
// WithNewSnapshot allocates a new snapshot to be used by the container as the
|
||||||
// root filesystem in read-write mode
|
// root filesystem in read-write mode
|
||||||
func WithNewRootFS(id string, i Image) NewContainerOpts {
|
func WithNewSnapshot(id string, i Image) NewContainerOpts {
|
||||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||||
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
|
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := client.SnapshotService().Prepare(ctx, id, identity.ChainID(diffIDs).String()); err != nil {
|
if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, identity.ChainID(diffIDs).String()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.RootFS = id
|
c.RootFS = id
|
||||||
|
c.Image = i.Name()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithNewReadonlyRootFS allocates a new snapshot to be used by the container as the
|
// WithNewSnapshotView allocates a new snapshot to be used by the container as the
|
||||||
// root filesystem in read-only mode
|
// root filesystem in read-only mode
|
||||||
func WithNewReadonlyRootFS(id string, i Image) NewContainerOpts {
|
func WithNewSnapshotView(id string, i Image) NewContainerOpts {
|
||||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||||
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
|
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := client.SnapshotService().View(ctx, id, identity.ChainID(diffIDs).String()); err != nil {
|
if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, identity.ChainID(diffIDs).String()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.RootFS = id
|
c.RootFS = id
|
||||||
|
c.Image = i.Name()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithRuntime allows a user to specify the runtime name and additional options that should
|
||||||
|
// be used to create tasks for the container
|
||||||
func WithRuntime(name string) NewContainerOpts {
|
func WithRuntime(name string) NewContainerOpts {
|
||||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||||
c.Runtime = name
|
c.Runtime = containers.RuntimeInfo{
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithSnapshotter(name string) NewContainerOpts {
|
||||||
|
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||||
|
c.Snapshotter = name
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -191,30 +239,28 @@ func WithImage(i Image) NewContainerOpts {
|
|||||||
func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) {
|
func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) {
|
||||||
container := containers.Container{
|
container := containers.Container{
|
||||||
ID: id,
|
ID: id,
|
||||||
Runtime: c.runtime,
|
Runtime: containers.RuntimeInfo{
|
||||||
|
Name: c.runtime,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
if err := o(ctx, c, &container); err != nil {
|
if err := o(ctx, c, &container); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
r, err := c.ContainerService().Create(ctx, &containers.CreateContainerRequest{
|
r, err := c.ContainerService().Create(ctx, container)
|
||||||
Container: container,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return containerFromProto(c, r.Container), nil
|
return containerFromRecord(c, r), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) LoadContainer(ctx context.Context, id string) (Container, error) {
|
func (c *Client) LoadContainer(ctx context.Context, id string) (Container, error) {
|
||||||
response, err := c.ContainerService().Get(ctx, &containers.GetContainerRequest{
|
r, err := c.ContainerService().Get(ctx, id)
|
||||||
ID: id,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return containerFromProto(c, response.Container), nil
|
return containerFromRecord(c, r), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type RemoteOpts func(*Client, *RemoteContext) error
|
type RemoteOpts func(*Client, *RemoteContext) error
|
||||||
@ -231,6 +277,9 @@ type RemoteContext struct {
|
|||||||
// afterwards. Unpacking is required to run an image.
|
// afterwards. Unpacking is required to run an image.
|
||||||
Unpack bool
|
Unpack bool
|
||||||
|
|
||||||
|
// Snapshotter used for unpacking
|
||||||
|
Snapshotter string
|
||||||
|
|
||||||
// BaseHandlers are a set of handlers which get are called on dispatch.
|
// BaseHandlers are a set of handlers which get are called on dispatch.
|
||||||
// These handlers always get called before any operation specific
|
// These handlers always get called before any operation specific
|
||||||
// handlers.
|
// handlers.
|
||||||
@ -258,6 +307,14 @@ func WithPullUnpack(client *Client, c *RemoteContext) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithPullSnapshotter specifies snapshotter name used for unpacking
|
||||||
|
func WithPullSnapshotter(snapshotterName string) RemoteOpts {
|
||||||
|
return func(client *Client, c *RemoteContext) error {
|
||||||
|
c.Snapshotter = snapshotterName
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithSchema1Conversion is used to convert Docker registry schema 1
|
// WithSchema1Conversion is used to convert Docker registry schema 1
|
||||||
// manifests to oci manifests on pull. Without this option schema 1
|
// manifests to oci manifests on pull. Without this option schema 1
|
||||||
// manifests will return a not supported error.
|
// manifests will return a not supported error.
|
||||||
@ -324,20 +381,33 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpts) (Imag
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
imgrec := images.Image{
|
||||||
|
Name: name,
|
||||||
|
Target: desc,
|
||||||
|
}
|
||||||
|
|
||||||
is := c.ImageService()
|
is := c.ImageService()
|
||||||
if err := is.Put(ctx, name, desc); err != nil {
|
if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
|
||||||
|
if !errdefs.IsNotFound(err) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
i, err := is.Get(ctx, name)
|
|
||||||
|
created, err := is.Create(ctx, imgrec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
imgrec = created
|
||||||
|
} else {
|
||||||
|
imgrec = updated
|
||||||
|
}
|
||||||
|
|
||||||
img := &image{
|
img := &image{
|
||||||
client: c,
|
client: c,
|
||||||
i: i,
|
i: imgrec,
|
||||||
}
|
}
|
||||||
if pullCtx.Unpack {
|
if pullCtx.Unpack {
|
||||||
if err := img.Unpack(ctx); err != nil {
|
if err := img.Unpack(ctx, pullCtx.Snapshotter); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -433,20 +503,20 @@ func (c *Client) NamespaceService() namespacesapi.NamespacesClient {
|
|||||||
return namespacesapi.NewNamespacesClient(c.conn)
|
return namespacesapi.NewNamespacesClient(c.conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) ContainerService() containers.ContainersClient {
|
func (c *Client) ContainerService() containers.Store {
|
||||||
return containers.NewContainersClient(c.conn)
|
return NewRemoteContainerStore(containersapi.NewContainersClient(c.conn))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) ContentStore() content.Store {
|
func (c *Client) ContentStore() content.Store {
|
||||||
return contentservice.NewStoreFromClient(contentapi.NewContentClient(c.conn))
|
return contentservice.NewStoreFromClient(contentapi.NewContentClient(c.conn))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) SnapshotService() snapshot.Snapshotter {
|
func (c *Client) SnapshotService(snapshotterName string) snapshot.Snapshotter {
|
||||||
return snapshotservice.NewSnapshotterFromClient(snapshotapi.NewSnapshotClient(c.conn))
|
return snapshotservice.NewSnapshotterFromClient(snapshotapi.NewSnapshotsClient(c.conn), snapshotterName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) TaskService() execution.TasksClient {
|
func (c *Client) TaskService() tasks.TasksClient {
|
||||||
return execution.NewTasksClient(c.conn)
|
return tasks.NewTasksClient(c.conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) ImageService() images.Store {
|
func (c *Client) ImageService() images.Store {
|
||||||
@ -461,6 +531,10 @@ func (c *Client) HealthService() grpc_health_v1.HealthClient {
|
|||||||
return grpc_health_v1.NewHealthClient(c.conn)
|
return grpc_health_v1.NewHealthClient(c.conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Client) EventService() eventsapi.EventsClient {
|
||||||
|
return eventsapi.NewEventsClient(c.conn)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Client) VersionService() versionservice.VersionClient {
|
func (c *Client) VersionService() versionservice.VersionClient {
|
||||||
return versionservice.NewVersionClient(c.conn)
|
return versionservice.NewVersionClient(c.conn)
|
||||||
}
|
}
|
||||||
@ -480,3 +554,120 @@ func (c *Client) Version(ctx context.Context) (Version, error) {
|
|||||||
Revision: response.Revision,
|
Revision: response.Revision,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type imageFormat string
|
||||||
|
|
||||||
|
const (
|
||||||
|
ociImageFormat imageFormat = "oci"
|
||||||
|
)
|
||||||
|
|
||||||
|
type importOpts struct {
|
||||||
|
format imageFormat
|
||||||
|
refObject string
|
||||||
|
}
|
||||||
|
|
||||||
|
type ImportOpt func(c *importOpts) error
|
||||||
|
|
||||||
|
func WithOCIImportFormat() ImportOpt {
|
||||||
|
return func(c *importOpts) error {
|
||||||
|
if c.format != "" {
|
||||||
|
return errors.New("format already set")
|
||||||
|
}
|
||||||
|
c.format = ociImageFormat
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRefObject specifies the ref object to import.
|
||||||
|
// If refObject is empty, it is copied from the ref argument of Import().
|
||||||
|
func WithRefObject(refObject string) ImportOpt {
|
||||||
|
return func(c *importOpts) error {
|
||||||
|
c.refObject = refObject
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveImportOpt(ref string, opts ...ImportOpt) (importOpts, error) {
|
||||||
|
var iopts importOpts
|
||||||
|
for _, o := range opts {
|
||||||
|
if err := o(&iopts); err != nil {
|
||||||
|
return iopts, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// use OCI as the default format
|
||||||
|
if iopts.format == "" {
|
||||||
|
iopts.format = ociImageFormat
|
||||||
|
}
|
||||||
|
// if refObject is not explicitly specified, use the one specified in ref
|
||||||
|
if iopts.refObject == "" {
|
||||||
|
refSpec, err := reference.Parse(ref)
|
||||||
|
if err != nil {
|
||||||
|
return iopts, err
|
||||||
|
}
|
||||||
|
iopts.refObject = refSpec.Object
|
||||||
|
}
|
||||||
|
return iopts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Import imports an image from a Tar stream using reader.
|
||||||
|
// OCI format is assumed by default.
|
||||||
|
//
|
||||||
|
// Note that unreferenced blobs are imported to the content store as well.
|
||||||
|
func (c *Client) Import(ctx context.Context, ref string, reader io.Reader, opts ...ImportOpt) (Image, error) {
|
||||||
|
iopts, err := resolveImportOpt(ref, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch iopts.format {
|
||||||
|
case ociImageFormat:
|
||||||
|
return c.importFromOCITar(ctx, ref, reader, iopts)
|
||||||
|
default:
|
||||||
|
return nil, errors.Errorf("unsupported format: %s", iopts.format)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type exportOpts struct {
|
||||||
|
format imageFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExportOpt func(c *exportOpts) error
|
||||||
|
|
||||||
|
func WithOCIExportFormat() ExportOpt {
|
||||||
|
return func(c *exportOpts) error {
|
||||||
|
if c.format != "" {
|
||||||
|
return errors.New("format already set")
|
||||||
|
}
|
||||||
|
c.format = ociImageFormat
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: add WithMediaTypeTranslation that transforms media types according to the format.
|
||||||
|
// e.g. application/vnd.docker.image.rootfs.diff.tar.gzip
|
||||||
|
// -> application/vnd.oci.image.layer.v1.tar+gzip
|
||||||
|
|
||||||
|
// Export exports an image to a Tar stream.
|
||||||
|
// OCI format is used by default.
|
||||||
|
// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
|
||||||
|
func (c *Client) Export(ctx context.Context, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
|
||||||
|
var eopts exportOpts
|
||||||
|
for _, o := range opts {
|
||||||
|
if err := o(&eopts); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// use OCI as the default format
|
||||||
|
if eopts.format == "" {
|
||||||
|
eopts.format = ociImageFormat
|
||||||
|
}
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
switch eopts.format {
|
||||||
|
case ociImageFormat:
|
||||||
|
go func() {
|
||||||
|
pw.CloseWithError(c.exportToOCITar(ctx, desc, pw, eopts))
|
||||||
|
}()
|
||||||
|
default:
|
||||||
|
return nil, errors.Errorf("unsupported format: %s", eopts.format)
|
||||||
|
}
|
||||||
|
return pr, nil
|
||||||
|
}
|
||||||
|
175
vendor/github.com/containerd/containerd/container.go
generated
vendored
175
vendor/github.com/containerd/containerd/container.go
generated
vendored
@ -4,34 +4,34 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||||
"google.golang.org/grpc/codes"
|
"github.com/containerd/containerd/api/types"
|
||||||
|
"github.com/containerd/containerd/containers"
|
||||||
"github.com/containerd/containerd/api/services/containers"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/api/services/execution"
|
"github.com/containerd/containerd/mount"
|
||||||
"github.com/containerd/containerd/api/types/mount"
|
"github.com/containerd/containerd/typeurl"
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
type DeleteOpts func(context.Context, *Client, containers.Container) error
|
||||||
ErrNoImage = errors.New("container does not have an image")
|
|
||||||
ErrNoRunningTask = errors.New("no running task")
|
|
||||||
)
|
|
||||||
|
|
||||||
type Container interface {
|
type Container interface {
|
||||||
ID() string
|
ID() string
|
||||||
Proto() containers.Container
|
Info() containers.Container
|
||||||
Delete(context.Context) error
|
Delete(context.Context, ...DeleteOpts) error
|
||||||
NewTask(context.Context, IOCreation, ...NewTaskOpts) (Task, error)
|
NewTask(context.Context, IOCreation, ...NewTaskOpts) (Task, error)
|
||||||
Spec() (*specs.Spec, error)
|
Spec() (*specs.Spec, error)
|
||||||
Task(context.Context, IOAttach) (Task, error)
|
Task(context.Context, IOAttach) (Task, error)
|
||||||
Image(context.Context) (Image, error)
|
Image(context.Context) (Image, error)
|
||||||
|
Labels(context.Context) (map[string]string, error)
|
||||||
|
SetLabels(context.Context, map[string]string) (map[string]string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func containerFromProto(client *Client, c containers.Container) *container {
|
func containerFromRecord(client *Client, c containers.Container) *container {
|
||||||
return &container{
|
return &container{
|
||||||
client: client,
|
client: client,
|
||||||
c: c,
|
c: c,
|
||||||
@ -45,7 +45,6 @@ type container struct {
|
|||||||
|
|
||||||
client *Client
|
client *Client
|
||||||
c containers.Container
|
c containers.Container
|
||||||
task *task
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ID returns the container's unique id
|
// ID returns the container's unique id
|
||||||
@ -53,10 +52,53 @@ func (c *container) ID() string {
|
|||||||
return c.c.ID
|
return c.c.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *container) Proto() containers.Container {
|
func (c *container) Info() containers.Container {
|
||||||
return c.c
|
return c.c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *container) Labels(ctx context.Context) (map[string]string, error) {
|
||||||
|
r, err := c.client.ContainerService().Get(ctx, c.ID())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.c = r
|
||||||
|
|
||||||
|
m := make(map[string]string, len(r.Labels))
|
||||||
|
for k, v := range c.c.Labels {
|
||||||
|
m[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) SetLabels(ctx context.Context, labels map[string]string) (map[string]string, error) {
|
||||||
|
container := containers.Container{
|
||||||
|
ID: c.ID(),
|
||||||
|
Labels: labels,
|
||||||
|
}
|
||||||
|
|
||||||
|
var paths []string
|
||||||
|
// mask off paths so we only muck with the labels encountered in labels.
|
||||||
|
// Labels not in the passed in argument will be left alone.
|
||||||
|
for k := range labels {
|
||||||
|
paths = append(paths, strings.Join([]string{"labels", k}, "."))
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := c.client.ContainerService().Update(ctx, container, paths...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.c = r // update our local container
|
||||||
|
|
||||||
|
m := make(map[string]string, len(r.Labels))
|
||||||
|
for k, v := range c.c.Labels {
|
||||||
|
m[k] = v
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Spec returns the current OCI specification for the container
|
// Spec returns the current OCI specification for the container
|
||||||
func (c *container) Spec() (*specs.Spec, error) {
|
func (c *container) Spec() (*specs.Spec, error) {
|
||||||
var s specs.Spec
|
var s specs.Spec
|
||||||
@ -66,43 +108,44 @@ func (c *container) Spec() (*specs.Spec, error) {
|
|||||||
return &s, nil
|
return &s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithSnapshotCleanup deletes the rootfs allocated for the container
|
||||||
|
func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Container) error {
|
||||||
|
if c.RootFS != "" {
|
||||||
|
return client.SnapshotService(c.Snapshotter).Remove(ctx, c.RootFS)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Delete deletes an existing container
|
// Delete deletes an existing container
|
||||||
// an error is returned if the container has running tasks
|
// an error is returned if the container has running tasks
|
||||||
func (c *container) Delete(ctx context.Context) (err error) {
|
func (c *container) Delete(ctx context.Context, opts ...DeleteOpts) (err error) {
|
||||||
// TODO: should the client be the one removing resources attached
|
if _, err := c.Task(ctx, nil); err == nil {
|
||||||
// to the container at the moment before we have GC?
|
return errors.Wrapf(errdefs.ErrFailedPrecondition, "cannot delete running task %v", c.ID())
|
||||||
if c.c.RootFS != "" {
|
|
||||||
err = c.client.SnapshotService().Remove(ctx, c.c.RootFS)
|
|
||||||
}
|
}
|
||||||
if _, cerr := c.client.ContainerService().Delete(ctx, &containers.DeleteContainerRequest{
|
for _, o := range opts {
|
||||||
ID: c.c.ID,
|
if err := o(ctx, c.client, c.c); err != nil {
|
||||||
}); err == nil {
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cerr := c.client.ContainerService().Delete(ctx, c.ID()); err == nil {
|
||||||
err = cerr
|
err = cerr
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *container) Task(ctx context.Context, attach IOAttach) (Task, error) {
|
func (c *container) Task(ctx context.Context, attach IOAttach) (Task, error) {
|
||||||
c.mu.Lock()
|
return c.loadTask(ctx, attach)
|
||||||
defer c.mu.Unlock()
|
|
||||||
if c.task == nil {
|
|
||||||
t, err := c.loadTask(ctx, attach)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c.task = t.(*task)
|
|
||||||
}
|
|
||||||
return c.task, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Image returns the image that the container is based on
|
// Image returns the image that the container is based on
|
||||||
func (c *container) Image(ctx context.Context) (Image, error) {
|
func (c *container) Image(ctx context.Context) (Image, error) {
|
||||||
if c.c.Image == "" {
|
if c.c.Image == "" {
|
||||||
return nil, ErrNoImage
|
return nil, errors.Wrapf(errdefs.ErrNotFound, "container not created from an image")
|
||||||
}
|
}
|
||||||
i, err := c.client.ImageService().Get(ctx, c.c.Image)
|
i, err := c.client.ImageService().Get(ctx, c.c.Image)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, errors.Wrapf(err, "failed to get image for container")
|
||||||
}
|
}
|
||||||
return &image{
|
return &image{
|
||||||
client: c.client,
|
client: c.client,
|
||||||
@ -110,16 +153,23 @@ func (c *container) Image(ctx context.Context) (Image, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type NewTaskOpts func(context.Context, *Client, *execution.CreateRequest) error
|
type NewTaskOpts func(context.Context, *Client, *TaskInfo) error
|
||||||
|
|
||||||
|
func WithRootFS(mounts []mount.Mount) NewTaskOpts {
|
||||||
|
return func(ctx context.Context, c *Client, ti *TaskInfo) error {
|
||||||
|
ti.RootFS = mounts
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...NewTaskOpts) (Task, error) {
|
func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...NewTaskOpts) (Task, error) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
i, err := ioCreate()
|
i, err := ioCreate(c.c.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
request := &execution.CreateRequest{
|
request := &tasks.CreateTaskRequest{
|
||||||
ContainerID: c.c.ID,
|
ContainerID: c.c.ID,
|
||||||
Terminal: i.Terminal,
|
Terminal: i.Terminal,
|
||||||
Stdin: i.Stdin,
|
Stdin: i.Stdin,
|
||||||
@ -128,31 +178,47 @@ func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...Ne
|
|||||||
}
|
}
|
||||||
if c.c.RootFS != "" {
|
if c.c.RootFS != "" {
|
||||||
// get the rootfs from the snapshotter and add it to the request
|
// get the rootfs from the snapshotter and add it to the request
|
||||||
mounts, err := c.client.SnapshotService().Mounts(ctx, c.c.RootFS)
|
mounts, err := c.client.SnapshotService(c.c.Snapshotter).Mounts(ctx, c.c.RootFS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for _, m := range mounts {
|
for _, m := range mounts {
|
||||||
request.Rootfs = append(request.Rootfs, &mount.Mount{
|
request.Rootfs = append(request.Rootfs, &types.Mount{
|
||||||
Type: m.Type,
|
Type: m.Type,
|
||||||
Source: m.Source,
|
Source: m.Source,
|
||||||
Options: m.Options,
|
Options: m.Options,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
var info TaskInfo
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
if err := o(ctx, c.client, request); err != nil {
|
if err := o(ctx, c.client, &info); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if info.RootFS != nil {
|
||||||
|
for _, m := range info.RootFS {
|
||||||
|
request.Rootfs = append(request.Rootfs, &types.Mount{
|
||||||
|
Type: m.Type,
|
||||||
|
Source: m.Source,
|
||||||
|
Options: m.Options,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if info.Options != nil {
|
||||||
|
any, err := typeurl.MarshalAny(info.Options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
request.Options = any
|
||||||
|
}
|
||||||
t := &task{
|
t := &task{
|
||||||
client: c.client,
|
client: c.client,
|
||||||
io: i,
|
io: i,
|
||||||
containerID: c.ID(),
|
id: c.ID(),
|
||||||
pidSync: make(chan struct{}),
|
|
||||||
}
|
}
|
||||||
|
if info.Checkpoint != nil {
|
||||||
if request.Checkpoint != nil {
|
request.Checkpoint = info.Checkpoint
|
||||||
// we need to defer the create call to start
|
// we need to defer the create call to start
|
||||||
t.deferred = request
|
t.deferred = request
|
||||||
} else {
|
} else {
|
||||||
@ -161,26 +227,25 @@ func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...Ne
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
t.pid = response.Pid
|
t.pid = response.Pid
|
||||||
close(t.pidSync)
|
|
||||||
}
|
}
|
||||||
c.task = t
|
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *container) loadTask(ctx context.Context, ioAttach IOAttach) (Task, error) {
|
func (c *container) loadTask(ctx context.Context, ioAttach IOAttach) (Task, error) {
|
||||||
response, err := c.client.TaskService().Info(ctx, &execution.InfoRequest{
|
response, err := c.client.TaskService().Get(ctx, &tasks.GetTaskRequest{
|
||||||
ContainerID: c.c.ID,
|
ContainerID: c.c.ID,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if grpc.Code(errors.Cause(err)) == codes.NotFound {
|
err = errdefs.FromGRPC(err)
|
||||||
return nil, ErrNoRunningTask
|
if errdefs.IsNotFound(err) {
|
||||||
|
return nil, errors.Wrapf(err, "no running task found")
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var i *IO
|
var i *IO
|
||||||
if ioAttach != nil {
|
if ioAttach != nil {
|
||||||
// get the existing fifo paths from the task information stored by the daemon
|
// get the existing fifo paths from the task information stored by the daemon
|
||||||
paths := &FifoSet{
|
paths := &FIFOSet{
|
||||||
Dir: getFifoDir([]string{
|
Dir: getFifoDir([]string{
|
||||||
response.Task.Stdin,
|
response.Task.Stdin,
|
||||||
response.Task.Stdout,
|
response.Task.Stdout,
|
||||||
@ -195,18 +260,12 @@ func (c *container) loadTask(ctx context.Context, ioAttach IOAttach) (Task, erro
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// create and close a channel on load as we already have the pid
|
|
||||||
// and don't want to block calls to Wait(), etc...
|
|
||||||
ps := make(chan struct{})
|
|
||||||
close(ps)
|
|
||||||
t := &task{
|
t := &task{
|
||||||
client: c.client,
|
client: c.client,
|
||||||
io: i,
|
io: i,
|
||||||
containerID: response.Task.ContainerID,
|
id: response.Task.ID,
|
||||||
pid: response.Task.Pid,
|
pid: response.Task.Pid,
|
||||||
pidSync: ps,
|
|
||||||
}
|
}
|
||||||
c.task = t
|
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
26
vendor/github.com/containerd/containerd/container_unix.go
generated
vendored
26
vendor/github.com/containerd/containerd/container_unix.go
generated
vendored
@ -8,17 +8,16 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
||||||
"github.com/containerd/containerd/api/services/containers"
|
"github.com/containerd/containerd/api/types"
|
||||||
"github.com/containerd/containerd/api/services/execution"
|
"github.com/containerd/containerd/containers"
|
||||||
"github.com/containerd/containerd/api/types/descriptor"
|
|
||||||
"github.com/containerd/containerd/content"
|
"github.com/containerd/containerd/content"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
"github.com/containerd/containerd/snapshot"
|
"github.com/gogo/protobuf/proto"
|
||||||
protobuf "github.com/gogo/protobuf/types"
|
protobuf "github.com/gogo/protobuf/types"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/image-spec/identity"
|
"github.com/opencontainers/image-spec/identity"
|
||||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
|
func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
|
||||||
@ -45,8 +44,8 @@ func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := client.SnapshotService().Prepare(ctx, rootfsID, identity.ChainID(diffIDs).String()); err != nil {
|
if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, rootfsID, identity.ChainID(diffIDs).String()); err != nil {
|
||||||
if !snapshot.IsExist(err) {
|
if !errdefs.IsAlreadyExists(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -61,15 +60,16 @@ func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.Spec = &protobuf.Any{
|
var any protobuf.Any
|
||||||
TypeUrl: specs.Version,
|
if err := proto.Unmarshal(data, &any); err != nil {
|
||||||
Value: data,
|
return err
|
||||||
}
|
}
|
||||||
|
c.Spec = &any
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if rw != nil {
|
if rw != nil {
|
||||||
// apply the rw snapshot to the new rw layer
|
// apply the rw snapshot to the new rw layer
|
||||||
mounts, err := client.SnapshotService().Mounts(ctx, rootfsID)
|
mounts, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, rootfsID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -83,7 +83,7 @@ func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func WithTaskCheckpoint(desc v1.Descriptor) NewTaskOpts {
|
func WithTaskCheckpoint(desc v1.Descriptor) NewTaskOpts {
|
||||||
return func(ctx context.Context, c *Client, r *execution.CreateRequest) error {
|
return func(ctx context.Context, c *Client, info *TaskInfo) error {
|
||||||
id := desc.Digest
|
id := desc.Digest
|
||||||
index, err := decodeIndex(ctx, c.ContentStore(), id)
|
index, err := decodeIndex(ctx, c.ContentStore(), id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -91,7 +91,7 @@ func WithTaskCheckpoint(desc v1.Descriptor) NewTaskOpts {
|
|||||||
}
|
}
|
||||||
for _, m := range index.Manifests {
|
for _, m := range index.Manifests {
|
||||||
if m.MediaType == images.MediaTypeContainerd1Checkpoint {
|
if m.MediaType == images.MediaTypeContainerd1Checkpoint {
|
||||||
r.Checkpoint = &descriptor.Descriptor{
|
info.Checkpoint = &types.Descriptor{
|
||||||
MediaType: m.MediaType,
|
MediaType: m.MediaType,
|
||||||
Size_: m.Size,
|
Size_: m.Size,
|
||||||
Digest: m.Digest,
|
Digest: m.Digest,
|
||||||
|
13
vendor/github.com/containerd/containerd/containerd.service
generated
vendored
Normal file
13
vendor/github.com/containerd/containerd/containerd.service
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=containerd container runtime
|
||||||
|
Documentation=https://containerd.io
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStartPre=/sbin/modprobe overlay
|
||||||
|
ExecStart=/usr/local/bin/containerd
|
||||||
|
Delegate=yes
|
||||||
|
KillMode=process
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
25
vendor/github.com/containerd/containerd/containers/containers.go
generated
vendored
25
vendor/github.com/containerd/containerd/containers/containers.go
generated
vendored
@ -3,6 +3,8 @@ package containers
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/gogo/protobuf/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Container represents the set of data pinned by a container. Unless otherwise
|
// Container represents the set of data pinned by a container. Unless otherwise
|
||||||
@ -13,17 +15,32 @@ type Container struct {
|
|||||||
ID string
|
ID string
|
||||||
Labels map[string]string
|
Labels map[string]string
|
||||||
Image string
|
Image string
|
||||||
Runtime string
|
Runtime RuntimeInfo
|
||||||
Spec []byte
|
Spec *types.Any
|
||||||
RootFS string
|
RootFS string
|
||||||
|
Snapshotter string
|
||||||
CreatedAt time.Time
|
CreatedAt time.Time
|
||||||
UpdatedAt time.Time
|
UpdatedAt time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type RuntimeInfo struct {
|
||||||
|
Name string
|
||||||
|
Options *types.Any
|
||||||
|
}
|
||||||
|
|
||||||
type Store interface {
|
type Store interface {
|
||||||
Get(ctx context.Context, id string) (Container, error)
|
Get(ctx context.Context, id string) (Container, error)
|
||||||
List(ctx context.Context, filter string) ([]Container, error)
|
|
||||||
|
// List returns containers that match one or more of the provided filters.
|
||||||
|
List(ctx context.Context, filters ...string) ([]Container, error)
|
||||||
|
|
||||||
Create(ctx context.Context, container Container) (Container, error)
|
Create(ctx context.Context, container Container) (Container, error)
|
||||||
Update(ctx context.Context, container Container) (Container, error)
|
|
||||||
|
// Update the container with the provided container object. ID must be set.
|
||||||
|
//
|
||||||
|
// If one or more fieldpaths are provided, only the field corresponding to
|
||||||
|
// the fieldpaths will be mutated.
|
||||||
|
Update(ctx context.Context, container Container, fieldpaths ...string) (Container, error)
|
||||||
|
|
||||||
Delete(ctx context.Context, id string) error
|
Delete(ctx context.Context, id string) error
|
||||||
}
|
}
|
||||||
|
130
vendor/github.com/containerd/containerd/containerstore.go
generated
vendored
Normal file
130
vendor/github.com/containerd/containerd/containerstore.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
package containerd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
containersapi "github.com/containerd/containerd/api/services/containers/v1"
|
||||||
|
"github.com/containerd/containerd/containers"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
ptypes "github.com/gogo/protobuf/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type remoteContainers struct {
|
||||||
|
client containersapi.ContainersClient
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ containers.Store = &remoteContainers{}
|
||||||
|
|
||||||
|
func NewRemoteContainerStore(client containersapi.ContainersClient) containers.Store {
|
||||||
|
return &remoteContainers{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteContainers) Get(ctx context.Context, id string) (containers.Container, error) {
|
||||||
|
resp, err := r.client.Get(ctx, &containersapi.GetContainerRequest{
|
||||||
|
ID: id,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return containers.Container{}, errdefs.FromGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return containerFromProto(&resp.Container), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteContainers) List(ctx context.Context, filters ...string) ([]containers.Container, error) {
|
||||||
|
resp, err := r.client.List(ctx, &containersapi.ListContainersRequest{
|
||||||
|
Filters: filters,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errdefs.FromGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return containersFromProto(resp.Containers), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteContainers) Create(ctx context.Context, container containers.Container) (containers.Container, error) {
|
||||||
|
created, err := r.client.Create(ctx, &containersapi.CreateContainerRequest{
|
||||||
|
Container: containerToProto(&container),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return containers.Container{}, errdefs.FromGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return containerFromProto(&created.Container), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteContainers) Update(ctx context.Context, container containers.Container, fieldpaths ...string) (containers.Container, error) {
|
||||||
|
var updateMask *ptypes.FieldMask
|
||||||
|
if len(fieldpaths) > 0 {
|
||||||
|
updateMask = &ptypes.FieldMask{
|
||||||
|
Paths: fieldpaths,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
updated, err := r.client.Update(ctx, &containersapi.UpdateContainerRequest{
|
||||||
|
Container: containerToProto(&container),
|
||||||
|
UpdateMask: updateMask,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return containers.Container{}, errdefs.FromGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return containerFromProto(&updated.Container), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteContainers) Delete(ctx context.Context, id string) error {
|
||||||
|
_, err := r.client.Delete(ctx, &containersapi.DeleteContainerRequest{
|
||||||
|
ID: id,
|
||||||
|
})
|
||||||
|
|
||||||
|
return errdefs.FromGRPC(err)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func containerToProto(container *containers.Container) containersapi.Container {
|
||||||
|
return containersapi.Container{
|
||||||
|
ID: container.ID,
|
||||||
|
Labels: container.Labels,
|
||||||
|
Image: container.Image,
|
||||||
|
Runtime: &containersapi.Container_Runtime{
|
||||||
|
Name: container.Runtime.Name,
|
||||||
|
Options: container.Runtime.Options,
|
||||||
|
},
|
||||||
|
Spec: container.Spec,
|
||||||
|
Snapshotter: container.Snapshotter,
|
||||||
|
RootFS: container.RootFS,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func containerFromProto(containerpb *containersapi.Container) containers.Container {
|
||||||
|
var runtime containers.RuntimeInfo
|
||||||
|
if containerpb.Runtime != nil {
|
||||||
|
runtime = containers.RuntimeInfo{
|
||||||
|
Name: containerpb.Runtime.Name,
|
||||||
|
Options: containerpb.Runtime.Options,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return containers.Container{
|
||||||
|
ID: containerpb.ID,
|
||||||
|
Labels: containerpb.Labels,
|
||||||
|
Image: containerpb.Image,
|
||||||
|
Runtime: runtime,
|
||||||
|
Spec: containerpb.Spec,
|
||||||
|
Snapshotter: containerpb.Snapshotter,
|
||||||
|
RootFS: containerpb.RootFS,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func containersFromProto(containerspb []containersapi.Container) []containers.Container {
|
||||||
|
var containers []containers.Container
|
||||||
|
|
||||||
|
for _, container := range containerspb {
|
||||||
|
containers = append(containers, containerFromProto(&container))
|
||||||
|
}
|
||||||
|
|
||||||
|
return containers
|
||||||
|
}
|
79
vendor/github.com/containerd/containerd/content/content.go
generated
vendored
79
vendor/github.com/containerd/containerd/content/content.go
generated
vendored
@ -3,35 +3,10 @@ package content
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/oci"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrNotFound is returned when an item is not found.
|
|
||||||
//
|
|
||||||
// Use IsNotFound(err) to detect this condition.
|
|
||||||
ErrNotFound = errors.New("content: not found")
|
|
||||||
|
|
||||||
// ErrExists is returned when something exists when it may not be expected.
|
|
||||||
//
|
|
||||||
// Use IsExists(err) to detect this condition.
|
|
||||||
ErrExists = errors.New("content: exists")
|
|
||||||
|
|
||||||
// ErrLocked is returned when content is actively being uploaded, this
|
|
||||||
// indicates that another process is attempting to upload the same content.
|
|
||||||
//
|
|
||||||
// Use IsLocked(err) to detect this condition.
|
|
||||||
ErrLocked = errors.New("content: locked")
|
|
||||||
|
|
||||||
bufPool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return make([]byte, 1<<20)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Provider interface {
|
type Provider interface {
|
||||||
@ -48,7 +23,9 @@ type Ingester interface {
|
|||||||
type Info struct {
|
type Info struct {
|
||||||
Digest digest.Digest
|
Digest digest.Digest
|
||||||
Size int64
|
Size int64
|
||||||
CommittedAt time.Time
|
CreatedAt time.Time
|
||||||
|
UpdatedAt time.Time
|
||||||
|
Labels map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
type Status struct {
|
type Status struct {
|
||||||
@ -70,32 +47,39 @@ type Manager interface {
|
|||||||
// If the content is not present, ErrNotFound will be returned.
|
// If the content is not present, ErrNotFound will be returned.
|
||||||
Info(ctx context.Context, dgst digest.Digest) (Info, error)
|
Info(ctx context.Context, dgst digest.Digest) (Info, error)
|
||||||
|
|
||||||
// Walk will call fn for each item in the content store.
|
// Update updates mutable information related to content.
|
||||||
Walk(ctx context.Context, fn WalkFunc) error
|
// If one or more fieldpaths are provided, only those
|
||||||
|
// fields will be updated.
|
||||||
|
// Mutable fields:
|
||||||
|
// labels.*
|
||||||
|
Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error)
|
||||||
|
|
||||||
|
// Walk will call fn for each item in the content store which
|
||||||
|
// match the provided filters. If no filters are given all
|
||||||
|
// items will be walked.
|
||||||
|
Walk(ctx context.Context, fn WalkFunc, filters ...string) error
|
||||||
|
|
||||||
// Delete removes the content from the store.
|
// Delete removes the content from the store.
|
||||||
Delete(ctx context.Context, dgst digest.Digest) error
|
Delete(ctx context.Context, dgst digest.Digest) error
|
||||||
|
}
|
||||||
|
|
||||||
// Status returns the status of any active ingestions whose ref match the
|
// IngestManager provides methods for managing ingests.
|
||||||
|
type IngestManager interface {
|
||||||
|
// Status returns the status of the provided ref.
|
||||||
|
Status(ctx context.Context, ref string) (Status, error)
|
||||||
|
|
||||||
|
// ListStatuses returns the status of any active ingestions whose ref match the
|
||||||
// provided regular expression. If empty, all active ingestions will be
|
// provided regular expression. If empty, all active ingestions will be
|
||||||
// returned.
|
// returned.
|
||||||
//
|
ListStatuses(ctx context.Context, filters ...string) ([]Status, error)
|
||||||
// TODO(stevvooe): Status may be slighly out of place here. If this remains
|
|
||||||
// here, we should remove Manager and just define these on store.
|
|
||||||
Status(ctx context.Context, re string) ([]Status, error)
|
|
||||||
|
|
||||||
// Abort completely cancels the ingest operation targeted by ref.
|
// Abort completely cancels the ingest operation targeted by ref.
|
||||||
//
|
|
||||||
// TODO(stevvooe): Same consideration as above. This should really be
|
|
||||||
// restricted to an ingest management interface.
|
|
||||||
Abort(ctx context.Context, ref string) error
|
Abort(ctx context.Context, ref string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type Writer interface {
|
type Writer interface {
|
||||||
io.WriteCloser
|
oci.BlobWriter
|
||||||
Status() (Status, error)
|
Status() (Status, error)
|
||||||
Digest() digest.Digest
|
|
||||||
Commit(size int64, expected digest.Digest) error
|
|
||||||
Truncate(size int64) error
|
Truncate(size int64) error
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -103,18 +87,7 @@ type Writer interface {
|
|||||||
// are commonly provided by complete implementations.
|
// are commonly provided by complete implementations.
|
||||||
type Store interface {
|
type Store interface {
|
||||||
Manager
|
Manager
|
||||||
Ingester
|
|
||||||
Provider
|
Provider
|
||||||
}
|
IngestManager
|
||||||
|
Ingester
|
||||||
func IsNotFound(err error) bool {
|
|
||||||
return errors.Cause(err) == ErrNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsExists(err error) bool {
|
|
||||||
return errors.Cause(err) == ErrExists
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsLocked(err error) bool {
|
|
||||||
return errors.Cause(err) == ErrLocked
|
|
||||||
}
|
}
|
||||||
|
19
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
19
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
@ -5,11 +5,21 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
bufPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return make([]byte, 1<<20)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
// ReadBlob retrieves the entire contents of the blob from the provider.
|
// ReadBlob retrieves the entire contents of the blob from the provider.
|
||||||
//
|
//
|
||||||
// Avoid using this for large blobs, such as layers.
|
// Avoid using this for large blobs, such as layers.
|
||||||
@ -33,7 +43,7 @@ func ReadBlob(ctx context.Context, provider Provider, dgst digest.Digest) ([]byt
|
|||||||
func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, size int64, expected digest.Digest) error {
|
func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, size int64, expected digest.Digest) error {
|
||||||
cw, err := cs.Writer(ctx, ref, size, expected)
|
cw, err := cs.Writer(ctx, ref, size, expected)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !IsExists(err) {
|
if !errdefs.IsAlreadyExists(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,7 +89,7 @@ func Copy(cw Writer, r io.Reader, size int64, expected digest.Digest) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := cw.Commit(size, expected); err != nil {
|
if err := cw.Commit(size, expected); err != nil {
|
||||||
if !IsExists(err) {
|
if !errdefs.IsAlreadyExists(err) {
|
||||||
return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
|
return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -120,8 +130,3 @@ func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
|
|||||||
|
|
||||||
return r, errors.Wrapf(errUnseekable, "seek to offset %v failed", offset)
|
return r, errors.Wrapf(errUnseekable, "seek to offset %v failed", offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
func readFileString(path string) (string, error) {
|
|
||||||
p, err := ioutil.ReadFile(path)
|
|
||||||
return string(p), err
|
|
||||||
}
|
|
||||||
|
37
vendor/github.com/containerd/containerd/content/locks.go
generated
vendored
37
vendor/github.com/containerd/containerd/content/locks.go
generated
vendored
@ -1,37 +0,0 @@
|
|||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Handles locking references
|
|
||||||
// TODO: use boltdb for lock status
|
|
||||||
|
|
||||||
var (
|
|
||||||
// locks lets us lock in process
|
|
||||||
locks = map[string]struct{}{}
|
|
||||||
locksMu sync.Mutex
|
|
||||||
)
|
|
||||||
|
|
||||||
func tryLock(ref string) error {
|
|
||||||
locksMu.Lock()
|
|
||||||
defer locksMu.Unlock()
|
|
||||||
|
|
||||||
if _, ok := locks[ref]; ok {
|
|
||||||
return errors.Wrapf(ErrLocked, "key %s is locked", ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
locks[ref] = struct{}{}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func unlock(ref string) {
|
|
||||||
locksMu.Lock()
|
|
||||||
defer locksMu.Unlock()
|
|
||||||
|
|
||||||
if _, ok := locks[ref]; ok {
|
|
||||||
delete(locks, ref)
|
|
||||||
}
|
|
||||||
}
|
|
26
vendor/github.com/containerd/containerd/content/readerat.go
generated
vendored
26
vendor/github.com/containerd/containerd/content/readerat.go
generated
vendored
@ -1,26 +0,0 @@
|
|||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// readerat implements io.ReaderAt in a completely stateless manner by opening
|
|
||||||
// the referenced file for each call to ReadAt.
|
|
||||||
type readerAt struct {
|
|
||||||
f string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra readerAt) ReadAt(p []byte, offset int64) (int, error) {
|
|
||||||
fp, err := os.Open(ra.f)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer fp.Close()
|
|
||||||
|
|
||||||
if _, err := fp.Seek(offset, io.SeekStart); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fp.Read(p)
|
|
||||||
}
|
|
358
vendor/github.com/containerd/containerd/content/store.go
generated
vendored
358
vendor/github.com/containerd/containerd/content/store.go
generated
vendored
@ -1,358 +0,0 @@
|
|||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/log"
|
|
||||||
digest "github.com/opencontainers/go-digest"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Store is digest-keyed store for content. All data written into the store is
|
|
||||||
// stored under a verifiable digest.
|
|
||||||
//
|
|
||||||
// Store can generally support multi-reader, single-writer ingest of data,
|
|
||||||
// including resumable ingest.
|
|
||||||
type store struct {
|
|
||||||
root string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewStore(root string) (Store, error) {
|
|
||||||
if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil && !os.IsExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &store{
|
|
||||||
root: root,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) Info(ctx context.Context, dgst digest.Digest) (Info, error) {
|
|
||||||
p := s.blobPath(dgst)
|
|
||||||
fi, err := os.Stat(p)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = ErrNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return Info{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.info(dgst, fi), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) info(dgst digest.Digest, fi os.FileInfo) Info {
|
|
||||||
return Info{
|
|
||||||
Digest: dgst,
|
|
||||||
Size: fi.Size(),
|
|
||||||
CommittedAt: fi.ModTime(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reader returns an io.ReadCloser for the blob.
|
|
||||||
func (s *store) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) {
|
|
||||||
fp, err := os.Open(s.blobPath(dgst))
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = ErrNotFound
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReaderAt returns an io.ReaderAt for the blob.
|
|
||||||
func (s *store) ReaderAt(ctx context.Context, dgst digest.Digest) (io.ReaderAt, error) {
|
|
||||||
return readerAt{f: s.blobPath(dgst)}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes a blob by its digest.
|
|
||||||
//
|
|
||||||
// While this is safe to do concurrently, safe exist-removal logic must hold
|
|
||||||
// some global lock on the store.
|
|
||||||
func (cs *store) Delete(ctx context.Context, dgst digest.Digest) error {
|
|
||||||
if err := os.RemoveAll(cs.blobPath(dgst)); err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ErrNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(stevvooe): Allow querying the set of blobs in the blob store.
|
|
||||||
|
|
||||||
func (cs *store) Walk(ctx context.Context, fn WalkFunc) error {
|
|
||||||
root := filepath.Join(cs.root, "blobs")
|
|
||||||
var alg digest.Algorithm
|
|
||||||
return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !fi.IsDir() && !alg.Available() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(stevvooe): There are few more cases with subdirs that should be
|
|
||||||
// handled in case the layout gets corrupted. This isn't strict enough
|
|
||||||
// an may spew bad data.
|
|
||||||
|
|
||||||
if path == root {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if filepath.Dir(path) == root {
|
|
||||||
alg = digest.Algorithm(filepath.Base(path))
|
|
||||||
|
|
||||||
if !alg.Available() {
|
|
||||||
alg = ""
|
|
||||||
return filepath.SkipDir
|
|
||||||
}
|
|
||||||
|
|
||||||
// descending into a hash directory
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path))
|
|
||||||
if err := dgst.Validate(); err != nil {
|
|
||||||
// log error but don't report
|
|
||||||
log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path")
|
|
||||||
// if we see this, it could mean some sort of corruption of the
|
|
||||||
// store or extra paths not expected previously.
|
|
||||||
}
|
|
||||||
|
|
||||||
return fn(cs.info(dgst, fi))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) Status(ctx context.Context, re string) ([]Status, error) {
|
|
||||||
fp, err := os.Open(filepath.Join(s.root, "ingest"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer fp.Close()
|
|
||||||
|
|
||||||
fis, err := fp.Readdir(-1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rec, err := regexp.Compile(re)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var active []Status
|
|
||||||
for _, fi := range fis {
|
|
||||||
p := filepath.Join(s.root, "ingest", fi.Name())
|
|
||||||
stat, err := s.status(p)
|
|
||||||
if err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(stevvooe): This is a common error if uploads are being
|
|
||||||
// completed while making this listing. Need to consider taking a
|
|
||||||
// lock on the whole store to coordinate this aspect.
|
|
||||||
//
|
|
||||||
// Another option is to cleanup downloads asynchronously and
|
|
||||||
// coordinate this method with the cleanup process.
|
|
||||||
//
|
|
||||||
// For now, we just skip them, as they really don't exist.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !rec.MatchString(stat.Ref) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
active = append(active, stat)
|
|
||||||
}
|
|
||||||
|
|
||||||
return active, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// status works like stat above except uses the path to the ingest.
|
|
||||||
func (s *store) status(ingestPath string) (Status, error) {
|
|
||||||
dp := filepath.Join(ingestPath, "data")
|
|
||||||
fi, err := os.Stat(dp)
|
|
||||||
if err != nil {
|
|
||||||
return Status{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ref, err := readFileString(filepath.Join(ingestPath, "ref"))
|
|
||||||
if err != nil {
|
|
||||||
return Status{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return Status{
|
|
||||||
Ref: ref,
|
|
||||||
Offset: fi.Size(),
|
|
||||||
Total: s.total(ingestPath),
|
|
||||||
UpdatedAt: fi.ModTime(),
|
|
||||||
StartedAt: getStartTime(fi),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// total attempts to resolve the total expected size for the write.
|
|
||||||
func (s *store) total(ingestPath string) int64 {
|
|
||||||
totalS, err := readFileString(filepath.Join(ingestPath, "total"))
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
total, err := strconv.ParseInt(totalS, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
// represents a corrupted file, should probably remove.
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return total
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writer begins or resumes the active writer identified by ref. If the writer
|
|
||||||
// is already in use, an error is returned. Only one writer may be in use per
|
|
||||||
// ref at a time.
|
|
||||||
//
|
|
||||||
// The argument `ref` is used to uniquely identify a long-lived writer transaction.
|
|
||||||
func (s *store) Writer(ctx context.Context, ref string, total int64, expected digest.Digest) (Writer, error) {
|
|
||||||
// TODO(stevvooe): Need to actually store and handle expected here. We have
|
|
||||||
// code in the service that shouldn't be dealing with this.
|
|
||||||
|
|
||||||
path, refp, data := s.ingestPaths(ref)
|
|
||||||
|
|
||||||
if err := tryLock(ref); err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "locking %v failed", ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
digester = digest.Canonical.Digester()
|
|
||||||
offset int64
|
|
||||||
startedAt time.Time
|
|
||||||
updatedAt time.Time
|
|
||||||
)
|
|
||||||
|
|
||||||
// ensure that the ingest path has been created.
|
|
||||||
if err := os.Mkdir(path, 0755); err != nil {
|
|
||||||
if !os.IsExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
status, err := s.status(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed reading status of resume write")
|
|
||||||
}
|
|
||||||
|
|
||||||
if ref != status.Ref {
|
|
||||||
// NOTE(stevvooe): This is fairly catastrophic. Either we have some
|
|
||||||
// layout corruption or a hash collision for the ref key.
|
|
||||||
return nil, errors.Wrapf(err, "ref key does not match: %v != %v", ref, status.Ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
if total > 0 && status.Total > 0 && total != status.Total {
|
|
||||||
return nil, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
|
|
||||||
}
|
|
||||||
|
|
||||||
// slow slow slow!!, send to goroutine or use resumable hashes
|
|
||||||
fp, err := os.Open(data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer fp.Close()
|
|
||||||
|
|
||||||
p := bufPool.Get().([]byte)
|
|
||||||
defer bufPool.Put(p)
|
|
||||||
|
|
||||||
offset, err = io.CopyBuffer(digester.Hash(), fp, p)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
updatedAt = status.UpdatedAt
|
|
||||||
startedAt = status.StartedAt
|
|
||||||
total = status.Total
|
|
||||||
} else {
|
|
||||||
// the ingest is new, we need to setup the target location.
|
|
||||||
// write the ref to a file for later use
|
|
||||||
if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if total > 0 {
|
|
||||||
if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
startedAt = time.Now()
|
|
||||||
updatedAt = startedAt
|
|
||||||
}
|
|
||||||
|
|
||||||
fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to open data file")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &writer{
|
|
||||||
s: s,
|
|
||||||
fp: fp,
|
|
||||||
ref: ref,
|
|
||||||
path: path,
|
|
||||||
offset: offset,
|
|
||||||
total: total,
|
|
||||||
digester: digester,
|
|
||||||
startedAt: startedAt,
|
|
||||||
updatedAt: updatedAt,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Abort an active transaction keyed by ref. If the ingest is active, it will
|
|
||||||
// be cancelled. Any resources associated with the ingest will be cleaned.
|
|
||||||
func (s *store) Abort(ctx context.Context, ref string) error {
|
|
||||||
root := s.ingestRoot(ref)
|
|
||||||
if err := os.RemoveAll(root); err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return ErrNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cs *store) blobPath(dgst digest.Digest) string {
|
|
||||||
return filepath.Join(cs.root, "blobs", dgst.Algorithm().String(), dgst.Hex())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) ingestRoot(ref string) string {
|
|
||||||
dgst := digest.FromString(ref)
|
|
||||||
return filepath.Join(s.root, "ingest", dgst.Hex())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ingestPaths are returned. The paths are the following:
|
|
||||||
//
|
|
||||||
// - root: entire ingest directory
|
|
||||||
// - ref: name of the starting ref, must be unique
|
|
||||||
// - data: file where data is written
|
|
||||||
//
|
|
||||||
func (s *store) ingestPaths(ref string) (string, string, string) {
|
|
||||||
var (
|
|
||||||
fp = s.ingestRoot(ref)
|
|
||||||
rp = filepath.Join(fp, "ref")
|
|
||||||
dp = filepath.Join(fp, "data")
|
|
||||||
)
|
|
||||||
|
|
||||||
return fp, rp, dp
|
|
||||||
}
|
|
15
vendor/github.com/containerd/containerd/content/store_linux.go
generated
vendored
15
vendor/github.com/containerd/containerd/content/store_linux.go
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getStartTime(fi os.FileInfo) time.Time {
|
|
||||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
|
||||||
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
|
|
||||||
}
|
|
||||||
|
|
||||||
return fi.ModTime()
|
|
||||||
}
|
|
17
vendor/github.com/containerd/containerd/content/store_unix.go
generated
vendored
17
vendor/github.com/containerd/containerd/content/store_unix.go
generated
vendored
@ -1,17 +0,0 @@
|
|||||||
// +build darwin freebsd
|
|
||||||
|
|
||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getStartTime(fi os.FileInfo) time.Time {
|
|
||||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
|
||||||
return time.Unix(int64(st.Ctimespec.Sec), int64(st.Ctimespec.Nsec))
|
|
||||||
}
|
|
||||||
|
|
||||||
return fi.ModTime()
|
|
||||||
}
|
|
10
vendor/github.com/containerd/containerd/content/store_windows.go
generated
vendored
10
vendor/github.com/containerd/containerd/content/store_windows.go
generated
vendored
@ -1,10 +0,0 @@
|
|||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getStartTime(fi os.FileInfo) time.Time {
|
|
||||||
return fi.ModTime()
|
|
||||||
}
|
|
140
vendor/github.com/containerd/containerd/content/writer.go
generated
vendored
140
vendor/github.com/containerd/containerd/content/writer.go
generated
vendored
@ -1,140 +0,0 @@
|
|||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/opencontainers/go-digest"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// writer represents a write transaction against the blob store.
|
|
||||||
type writer struct {
|
|
||||||
s *store
|
|
||||||
fp *os.File // opened data file
|
|
||||||
path string // path to writer dir
|
|
||||||
ref string // ref key
|
|
||||||
offset int64
|
|
||||||
total int64
|
|
||||||
digester digest.Digester
|
|
||||||
startedAt time.Time
|
|
||||||
updatedAt time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Status() (Status, error) {
|
|
||||||
return Status{
|
|
||||||
Ref: w.ref,
|
|
||||||
Offset: w.offset,
|
|
||||||
Total: w.total,
|
|
||||||
StartedAt: w.startedAt,
|
|
||||||
UpdatedAt: w.updatedAt,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Digest returns the current digest of the content, up to the current write.
|
|
||||||
//
|
|
||||||
// Cannot be called concurrently with `Write`.
|
|
||||||
func (w *writer) Digest() digest.Digest {
|
|
||||||
return w.digester.Digest()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write p to the transaction.
|
|
||||||
//
|
|
||||||
// Note that writes are unbuffered to the backing file. When writing, it is
|
|
||||||
// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer.
|
|
||||||
func (w *writer) Write(p []byte) (n int, err error) {
|
|
||||||
n, err = w.fp.Write(p)
|
|
||||||
w.digester.Hash().Write(p[:n])
|
|
||||||
w.offset += int64(len(p))
|
|
||||||
w.updatedAt = time.Now()
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Commit(size int64, expected digest.Digest) error {
|
|
||||||
if err := w.fp.Sync(); err != nil {
|
|
||||||
return errors.Wrap(err, "sync failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
fi, err := w.fp.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "stat on ingest file failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
// change to readonly, more important for read, but provides _some_
|
|
||||||
// protection from this point on. We use the existing perms with a mask
|
|
||||||
// only allowing reads honoring the umask on creation.
|
|
||||||
//
|
|
||||||
// This removes write and exec, only allowing read per the creation umask.
|
|
||||||
if err := w.fp.Chmod((fi.Mode() & os.ModePerm) &^ 0333); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to change ingest file permissions")
|
|
||||||
}
|
|
||||||
|
|
||||||
if size > 0 && size != fi.Size() {
|
|
||||||
return errors.Errorf("%q failed size validation: %v != %v", w.ref, fi.Size(), size)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.fp.Close(); err != nil {
|
|
||||||
return errors.Wrap(err, "failed closing ingest")
|
|
||||||
}
|
|
||||||
|
|
||||||
dgst := w.digester.Digest()
|
|
||||||
if expected != "" && expected != dgst {
|
|
||||||
return errors.Errorf("unexpected digest: %v != %v", dgst, expected)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
ingest = filepath.Join(w.path, "data")
|
|
||||||
target = w.s.blobPath(dgst)
|
|
||||||
)
|
|
||||||
|
|
||||||
// make sure parent directories of blob exist
|
|
||||||
if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// clean up!!
|
|
||||||
defer os.RemoveAll(w.path)
|
|
||||||
|
|
||||||
if err := os.Rename(ingest, target); err != nil {
|
|
||||||
if os.IsExist(err) {
|
|
||||||
// collision with the target file!
|
|
||||||
return ErrExists
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
unlock(w.ref)
|
|
||||||
w.fp = nil
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the writer, flushing any unwritten data and leaving the progress in
|
|
||||||
// tact.
|
|
||||||
//
|
|
||||||
// If one needs to resume the transaction, a new writer can be obtained from
|
|
||||||
// `ContentStore.Resume` using the same key. The write can then be continued
|
|
||||||
// from it was left off.
|
|
||||||
//
|
|
||||||
// To abandon a transaction completely, first call close then `Store.Remove` to
|
|
||||||
// clean up the associated resources.
|
|
||||||
func (cw *writer) Close() (err error) {
|
|
||||||
unlock(cw.ref)
|
|
||||||
|
|
||||||
if cw.fp != nil {
|
|
||||||
cw.fp.Sync()
|
|
||||||
return cw.fp.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Truncate(size int64) error {
|
|
||||||
if size != 0 {
|
|
||||||
return errors.New("Truncate: unsupported size")
|
|
||||||
}
|
|
||||||
w.offset = 0
|
|
||||||
w.digester.Hash().Reset()
|
|
||||||
return w.fp.Truncate(0)
|
|
||||||
}
|
|
54
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
Normal file
54
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
// Package errdefs defines the common errors used throughout containerd
|
||||||
|
// packages.
|
||||||
|
//
|
||||||
|
// Use with errors.Wrap and error.Wrapf to add context to an error.
|
||||||
|
//
|
||||||
|
// To detect an error class, use the IsXXX functions to tell whether an error
|
||||||
|
// is of a certain type.
|
||||||
|
//
|
||||||
|
// The functions ToGRPC and FromGRPC can be used to map server-side and
|
||||||
|
// client-side errors to the correct types.
|
||||||
|
package errdefs
|
||||||
|
|
||||||
|
import "github.com/pkg/errors"
|
||||||
|
|
||||||
|
// Definitions of common error types used throughout containerd. All containerd
|
||||||
|
// errors returned by most packages will map into one of these errors classes.
|
||||||
|
// Packages should return errors of these types when they want to instruct a
|
||||||
|
// client to take a particular action.
|
||||||
|
//
|
||||||
|
// For the most part, we just try to provide local grpc errors. Most conditions
|
||||||
|
// map very well to those defined by grpc.
|
||||||
|
var (
|
||||||
|
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
|
||||||
|
ErrInvalidArgument = errors.New("invalid argument")
|
||||||
|
ErrNotFound = errors.New("not found")
|
||||||
|
ErrAlreadyExists = errors.New("already exists")
|
||||||
|
ErrFailedPrecondition = errors.New("failed precondition")
|
||||||
|
ErrUnavailable = errors.New("unavailable")
|
||||||
|
)
|
||||||
|
|
||||||
|
func IsInvalidArgument(err error) bool {
|
||||||
|
return errors.Cause(err) == ErrInvalidArgument
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNotFound returns true if the error is due to a missing object
|
||||||
|
func IsNotFound(err error) bool {
|
||||||
|
return errors.Cause(err) == ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAlreadyExists returns true if the error is due to an already existing
|
||||||
|
// metadata item
|
||||||
|
func IsAlreadyExists(err error) bool {
|
||||||
|
return errors.Cause(err) == ErrAlreadyExists
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsFailedPrecondition returns true if an operation could not proceed to the
|
||||||
|
// lack of a particular condition.
|
||||||
|
func IsFailedPrecondition(err error) bool {
|
||||||
|
return errors.Cause(err) == ErrFailedPrecondition
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsUnavailable(err error) bool {
|
||||||
|
return errors.Cause(err) == ErrUnavailable
|
||||||
|
}
|
106
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
Normal file
106
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
package errdefs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ToGRPC will attempt to map the backend containerd error into a grpc error,
|
||||||
|
// using the original error message as a description.
|
||||||
|
//
|
||||||
|
// Further information may be extracted from certain errors depending on their
|
||||||
|
// type.
|
||||||
|
//
|
||||||
|
// If the error is unmapped, the original error will be returned to be handled
|
||||||
|
// by the regular grpc error handling stack.
|
||||||
|
func ToGRPC(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if isGRPCError(err) {
|
||||||
|
// error has already been mapped to grpc
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case IsInvalidArgument(err):
|
||||||
|
return grpc.Errorf(codes.InvalidArgument, err.Error())
|
||||||
|
case IsNotFound(err):
|
||||||
|
return grpc.Errorf(codes.NotFound, err.Error())
|
||||||
|
case IsAlreadyExists(err):
|
||||||
|
return grpc.Errorf(codes.AlreadyExists, err.Error())
|
||||||
|
case IsFailedPrecondition(err):
|
||||||
|
return grpc.Errorf(codes.FailedPrecondition, err.Error())
|
||||||
|
case IsUnavailable(err):
|
||||||
|
return grpc.Errorf(codes.Unavailable, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||||
|
// and combining it with the target error string.
|
||||||
|
//
|
||||||
|
// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
|
||||||
|
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||||
|
return ToGRPC(errors.Wrapf(err, format, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromGRPC(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var cls error // divide these into error classes, becomes the cause
|
||||||
|
|
||||||
|
switch grpc.Code(err) {
|
||||||
|
case codes.InvalidArgument:
|
||||||
|
cls = ErrInvalidArgument
|
||||||
|
case codes.AlreadyExists:
|
||||||
|
cls = ErrAlreadyExists
|
||||||
|
case codes.NotFound:
|
||||||
|
cls = ErrNotFound
|
||||||
|
case codes.Unavailable:
|
||||||
|
cls = ErrUnavailable
|
||||||
|
case codes.FailedPrecondition:
|
||||||
|
cls = ErrFailedPrecondition
|
||||||
|
default:
|
||||||
|
cls = ErrUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
if cls != nil {
|
||||||
|
msg := rebaseMessage(cls, err)
|
||||||
|
if msg != "" {
|
||||||
|
err = errors.Wrapf(cls, msg)
|
||||||
|
} else {
|
||||||
|
err = cls
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// rebaseMessage removes the repeats for an error at the end of an error
|
||||||
|
// string. This will happen when taking an error over grpc then remapping it.
|
||||||
|
//
|
||||||
|
// Effectively, we just remove the string of cls from the end of err if it
|
||||||
|
// appears there.
|
||||||
|
func rebaseMessage(cls error, err error) string {
|
||||||
|
desc := grpc.ErrorDesc(err)
|
||||||
|
clss := cls.Error()
|
||||||
|
if desc == clss {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.TrimSuffix(desc, ": "+clss)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isGRPCError(err error) bool {
|
||||||
|
_, ok := status.FromError(err)
|
||||||
|
return ok
|
||||||
|
}
|
24
vendor/github.com/containerd/containerd/events/events.go
generated
vendored
Normal file
24
vendor/github.com/containerd/containerd/events/events.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
package events
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
events "github.com/containerd/containerd/api/services/events/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Event interface{}
|
||||||
|
|
||||||
|
// Publisher posts the event.
|
||||||
|
type Publisher interface {
|
||||||
|
Publish(ctx context.Context, topic string, event Event) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type Forwarder interface {
|
||||||
|
Forward(ctx context.Context, envelope *events.Envelope) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type publisherFunc func(ctx context.Context, topic string, event Event) error
|
||||||
|
|
||||||
|
func (fn publisherFunc) Publish(ctx context.Context, topic string, event Event) error {
|
||||||
|
return fn(ctx, topic, event)
|
||||||
|
}
|
163
vendor/github.com/containerd/containerd/events/exchange.go
generated
vendored
Normal file
163
vendor/github.com/containerd/containerd/events/exchange.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
package events
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
events "github.com/containerd/containerd/api/services/events/v1"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
"github.com/containerd/containerd/filters"
|
||||||
|
"github.com/containerd/containerd/identifiers"
|
||||||
|
"github.com/containerd/containerd/log"
|
||||||
|
"github.com/containerd/containerd/namespaces"
|
||||||
|
"github.com/containerd/containerd/typeurl"
|
||||||
|
goevents "github.com/docker/go-events"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Exchange struct {
|
||||||
|
broadcaster *goevents.Broadcaster
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewExchange() *Exchange {
|
||||||
|
return &Exchange{
|
||||||
|
broadcaster: goevents.NewBroadcaster(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Forward accepts an envelope to be direcly distributed on the exchange.
|
||||||
|
func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) error {
|
||||||
|
log.G(ctx).WithFields(logrus.Fields{
|
||||||
|
"topic": envelope.Topic,
|
||||||
|
"ns": envelope.Namespace,
|
||||||
|
"type": envelope.Event.TypeUrl,
|
||||||
|
}).Debug("forward event")
|
||||||
|
|
||||||
|
if err := namespaces.Validate(envelope.Namespace); err != nil {
|
||||||
|
return errors.Wrapf(err, "event envelope has invalid namespace")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := validateTopic(envelope.Topic); err != nil {
|
||||||
|
return errors.Wrapf(err, "envelope topic %q", envelope.Topic)
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.broadcaster.Write(envelope)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish packages and sends an event. The caller will be considered the
|
||||||
|
// initial publisher of the event. This means the timestamp will be calculated
|
||||||
|
// at this point and this method may read from the calling context.
|
||||||
|
func (e *Exchange) Publish(ctx context.Context, topic string, event Event) error {
|
||||||
|
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed publishing event")
|
||||||
|
}
|
||||||
|
if err := validateTopic(topic); err != nil {
|
||||||
|
return errors.Wrapf(err, "envelope topic %q", topic)
|
||||||
|
}
|
||||||
|
|
||||||
|
evany, err := typeurl.MarshalAny(event)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
env := events.Envelope{
|
||||||
|
Timestamp: time.Now().UTC(),
|
||||||
|
Topic: topic,
|
||||||
|
Event: evany,
|
||||||
|
}
|
||||||
|
if err := e.broadcaster.Write(&env); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.G(ctx).WithFields(logrus.Fields{
|
||||||
|
"topic": topic,
|
||||||
|
"type": evany.TypeUrl,
|
||||||
|
"ns": namespace,
|
||||||
|
}).Debug("published event")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe to events on the exchange. Events are sent through the returned
|
||||||
|
// channel ch. If an error is encountered, it will be sent on channel errs and
|
||||||
|
// errs will be closed. To end the subscription, cancel the provided context.
|
||||||
|
func (e *Exchange) Subscribe(ctx context.Context, filters ...filters.Filter) (ch <-chan *events.Envelope, errs <-chan error) {
|
||||||
|
var (
|
||||||
|
evch = make(chan *events.Envelope)
|
||||||
|
errq = make(chan error, 1)
|
||||||
|
channel = goevents.NewChannel(0)
|
||||||
|
queue = goevents.NewQueue(channel)
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO(stevvooe): Insert the filter!
|
||||||
|
|
||||||
|
e.broadcaster.Add(queue)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(errq)
|
||||||
|
defer e.broadcaster.Remove(queue)
|
||||||
|
defer queue.Close()
|
||||||
|
defer channel.Close()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case ev := <-channel.C:
|
||||||
|
env, ok := ev.(*events.Envelope)
|
||||||
|
if !ok {
|
||||||
|
// TODO(stevvooe): For the most part, we are well protected
|
||||||
|
// from this condition. Both Forward and Publish protect
|
||||||
|
// from this.
|
||||||
|
err = errors.Errorf("invalid envelope encountered %#v; please file a bug", ev)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case evch <- env:
|
||||||
|
case <-ctx.Done():
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
if cerr := ctx.Err(); cerr != context.Canceled {
|
||||||
|
err = cerr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
errq <- err
|
||||||
|
}()
|
||||||
|
|
||||||
|
ch = evch
|
||||||
|
errs = errq
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateTopic(topic string) error {
|
||||||
|
if topic == "" {
|
||||||
|
return errors.Wrap(errdefs.ErrInvalidArgument, "must not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if topic[0] != '/' {
|
||||||
|
return errors.Wrapf(errdefs.ErrInvalidArgument, "must start with '/'", topic)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(topic) == 1 {
|
||||||
|
return errors.Wrapf(errdefs.ErrInvalidArgument, "must have at least one component", topic)
|
||||||
|
}
|
||||||
|
|
||||||
|
components := strings.Split(topic[1:], "/")
|
||||||
|
for _, component := range components {
|
||||||
|
if err := identifiers.Validate(component); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed validation on component %q", component)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user